1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
10#include "MCTargetDesc/AArch64AddressingModes.h"
11#include "MCTargetDesc/AArch64InstPrinter.h"
12#include "MCTargetDesc/AArch64MCAsmInfo.h"
13#include "MCTargetDesc/AArch64MCTargetDesc.h"
14#include "MCTargetDesc/AArch64TargetStreamer.h"
15#include "TargetInfo/AArch64TargetInfo.h"
16#include "Utils/AArch64BaseInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringExtras.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
26#include "llvm/ADT/StringSwitch.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCAsmInfo.h"
29#include "llvm/MC/MCContext.h"
30#include "llvm/MC/MCExpr.h"
31#include "llvm/MC/MCInst.h"
32#include "llvm/MC/MCLinkerOptimizationHint.h"
33#include "llvm/MC/MCObjectFileInfo.h"
34#include "llvm/MC/MCParser/AsmLexer.h"
35#include "llvm/MC/MCParser/MCAsmParser.h"
36#include "llvm/MC/MCParser/MCAsmParserExtension.h"
37#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
38#include "llvm/MC/MCParser/MCTargetAsmParser.h"
39#include "llvm/MC/MCRegisterInfo.h"
40#include "llvm/MC/MCStreamer.h"
41#include "llvm/MC/MCSubtargetInfo.h"
42#include "llvm/MC/MCSymbol.h"
43#include "llvm/MC/MCTargetOptions.h"
44#include "llvm/MC/MCValue.h"
45#include "llvm/MC/TargetRegistry.h"
46#include "llvm/Support/AArch64BuildAttributes.h"
47#include "llvm/Support/Compiler.h"
48#include "llvm/Support/ErrorHandling.h"
49#include "llvm/Support/MathExtras.h"
50#include "llvm/Support/SMLoc.h"
51#include "llvm/Support/raw_ostream.h"
52#include "llvm/TargetParser/AArch64TargetParser.h"
53#include "llvm/TargetParser/SubtargetFeature.h"
54#include <cassert>
55#include <cctype>
56#include <cstdint>
57#include <cstdio>
58#include <optional>
59#include <string>
60#include <tuple>
61#include <utility>
62#include <vector>
63
64using namespace llvm;
65
66namespace {
67
68enum class RegKind {
69 Scalar,
70 NeonVector,
71 SVEDataVector,
72 SVEPredicateAsCounter,
73 SVEPredicateVector,
74 Matrix,
75 LookupTable
76};
77
78enum class MatrixKind { Array, Tile, Row, Col };
79
80enum RegConstraintEqualityTy {
81 EqualsReg,
82 EqualsSuperReg,
83 EqualsSubReg
84};
85
86class AArch64AsmParser : public MCTargetAsmParser {
87private:
88 StringRef Mnemonic; ///< Instruction mnemonic.
89
90 // Map of register aliases registers via the .req directive.
91 StringMap<std::pair<RegKind, MCRegister>> RegisterReqs;
92
93 class PrefixInfo {
94 public:
95 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
96 PrefixInfo Prefix;
97 switch (Inst.getOpcode()) {
98 case AArch64::MOVPRFX_ZZ:
99 Prefix.Active = true;
100 Prefix.Dst = Inst.getOperand(i: 0).getReg();
101 break;
102 case AArch64::MOVPRFX_ZPmZ_B:
103 case AArch64::MOVPRFX_ZPmZ_H:
104 case AArch64::MOVPRFX_ZPmZ_S:
105 case AArch64::MOVPRFX_ZPmZ_D:
106 Prefix.Active = true;
107 Prefix.Predicated = true;
108 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
109 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
110 "No destructive element size set for movprfx");
111 Prefix.Dst = Inst.getOperand(i: 0).getReg();
112 Prefix.Pg = Inst.getOperand(i: 2).getReg();
113 break;
114 case AArch64::MOVPRFX_ZPzZ_B:
115 case AArch64::MOVPRFX_ZPzZ_H:
116 case AArch64::MOVPRFX_ZPzZ_S:
117 case AArch64::MOVPRFX_ZPzZ_D:
118 Prefix.Active = true;
119 Prefix.Predicated = true;
120 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
121 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
122 "No destructive element size set for movprfx");
123 Prefix.Dst = Inst.getOperand(i: 0).getReg();
124 Prefix.Pg = Inst.getOperand(i: 1).getReg();
125 break;
126 default:
127 break;
128 }
129
130 return Prefix;
131 }
132
133 PrefixInfo() = default;
134 bool isActive() const { return Active; }
135 bool isPredicated() const { return Predicated; }
136 unsigned getElementSize() const {
137 assert(Predicated);
138 return ElementSize;
139 }
140 MCRegister getDstReg() const { return Dst; }
141 MCRegister getPgReg() const {
142 assert(Predicated);
143 return Pg;
144 }
145
146 private:
147 bool Active = false;
148 bool Predicated = false;
149 unsigned ElementSize;
150 MCRegister Dst;
151 MCRegister Pg;
152 } NextPrefix;
153
154 AArch64TargetStreamer &getTargetStreamer() {
155 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
156 return static_cast<AArch64TargetStreamer &>(TS);
157 }
158
159 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
160
161 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 bool parseSyslAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
163 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
164 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
165 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
166 std::string &Suggestion);
167 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
168 MCRegister matchRegisterNameAlias(StringRef Name, RegKind Kind);
169 bool parseRegister(OperandVector &Operands);
170 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
171 bool parseNeonVectorList(OperandVector &Operands);
172 bool parseOptionalMulOperand(OperandVector &Operands);
173 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
174 bool parseKeywordOperand(OperandVector &Operands);
175 bool parseOperand(OperandVector &Operands, bool isCondCode,
176 bool invertCondCode);
177 bool parseImmExpr(int64_t &Out);
178 bool parseComma();
179 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
180 unsigned Last);
181
182 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
183 OperandVector &Operands);
184
185 bool parseExprWithSpecifier(const MCExpr *&Res, SMLoc &E);
186 bool parseDataExpr(const MCExpr *&Res) override;
187 bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc);
188
189 bool parseDirectiveArch(SMLoc L);
190 bool parseDirectiveArchExtension(SMLoc L);
191 bool parseDirectiveCPU(SMLoc L);
192 bool parseDirectiveInst(SMLoc L);
193
194 bool parseDirectiveTLSDescCall(SMLoc L);
195
196 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
197 bool parseDirectiveLtorg(SMLoc L);
198
199 bool parseDirectiveReq(StringRef Name, SMLoc L);
200 bool parseDirectiveUnreq(SMLoc L);
201 bool parseDirectiveCFINegateRAState();
202 bool parseDirectiveCFINegateRAStateWithPC();
203 bool parseDirectiveCFIBKeyFrame();
204 bool parseDirectiveCFIMTETaggedFrame();
205
206 bool parseDirectiveVariantPCS(SMLoc L);
207
208 bool parseDirectiveSEHAllocStack(SMLoc L);
209 bool parseDirectiveSEHPrologEnd(SMLoc L);
210 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
211 bool parseDirectiveSEHSaveFPLR(SMLoc L);
212 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
213 bool parseDirectiveSEHSaveReg(SMLoc L);
214 bool parseDirectiveSEHSaveRegX(SMLoc L);
215 bool parseDirectiveSEHSaveRegP(SMLoc L);
216 bool parseDirectiveSEHSaveRegPX(SMLoc L);
217 bool parseDirectiveSEHSaveLRPair(SMLoc L);
218 bool parseDirectiveSEHSaveFReg(SMLoc L);
219 bool parseDirectiveSEHSaveFRegX(SMLoc L);
220 bool parseDirectiveSEHSaveFRegP(SMLoc L);
221 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
222 bool parseDirectiveSEHSetFP(SMLoc L);
223 bool parseDirectiveSEHAddFP(SMLoc L);
224 bool parseDirectiveSEHNop(SMLoc L);
225 bool parseDirectiveSEHSaveNext(SMLoc L);
226 bool parseDirectiveSEHEpilogStart(SMLoc L);
227 bool parseDirectiveSEHEpilogEnd(SMLoc L);
228 bool parseDirectiveSEHTrapFrame(SMLoc L);
229 bool parseDirectiveSEHMachineFrame(SMLoc L);
230 bool parseDirectiveSEHContext(SMLoc L);
231 bool parseDirectiveSEHECContext(SMLoc L);
232 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
233 bool parseDirectiveSEHPACSignLR(SMLoc L);
234 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
235 bool parseDirectiveSEHAllocZ(SMLoc L);
236 bool parseDirectiveSEHSaveZReg(SMLoc L);
237 bool parseDirectiveSEHSavePReg(SMLoc L);
238 bool parseDirectiveAeabiSubSectionHeader(SMLoc L);
239 bool parseDirectiveAeabiAArch64Attr(SMLoc L);
240
241 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
242 SmallVectorImpl<SMLoc> &Loc);
243 unsigned getNumRegsForRegKind(RegKind K);
244 bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
245 OperandVector &Operands, MCStreamer &Out,
246 uint64_t &ErrorInfo,
247 bool MatchingInlineAsm) override;
248 /// @name Auto-generated Match Functions
249 /// {
250
251#define GET_ASSEMBLER_HEADER
252#include "AArch64GenAsmMatcher.inc"
253
254 /// }
255
256 ParseStatus tryParseScalarRegister(MCRegister &Reg);
257 ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
258 RegKind MatchKind);
259 ParseStatus tryParseMatrixRegister(OperandVector &Operands);
260 ParseStatus tryParseSVCR(OperandVector &Operands);
261 ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
262 ParseStatus tryParseBarrierOperand(OperandVector &Operands);
263 ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
264 ParseStatus tryParseSysReg(OperandVector &Operands);
265 ParseStatus tryParseSysCROperand(OperandVector &Operands);
266 template <bool IsSVEPrefetch = false>
267 ParseStatus tryParsePrefetch(OperandVector &Operands);
268 ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
269 ParseStatus tryParsePSBHint(OperandVector &Operands);
270 ParseStatus tryParseBTIHint(OperandVector &Operands);
271 ParseStatus tryParseCMHPriorityHint(OperandVector &Operands);
272 ParseStatus tryParseTIndexHint(OperandVector &Operands);
273 ParseStatus tryParseAdrpLabel(OperandVector &Operands);
274 ParseStatus tryParseAdrLabel(OperandVector &Operands);
275 template <bool AddFPZeroAsLiteral>
276 ParseStatus tryParseFPImm(OperandVector &Operands);
277 ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
278 ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
279 bool tryParseNeonVectorRegister(OperandVector &Operands);
280 ParseStatus tryParseVectorIndex(OperandVector &Operands);
281 ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
282 ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
283 template <bool ParseShiftExtend,
284 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
285 ParseStatus tryParseGPROperand(OperandVector &Operands);
286 ParseStatus tryParseZTOperand(OperandVector &Operands);
287 template <bool ParseShiftExtend, bool ParseSuffix>
288 ParseStatus tryParseSVEDataVector(OperandVector &Operands);
289 template <RegKind RK>
290 ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
291 ParseStatus
292 tryParseSVEPredicateOrPredicateAsCounterVector(OperandVector &Operands);
293 template <RegKind VectorKind>
294 ParseStatus tryParseVectorList(OperandVector &Operands,
295 bool ExpectMatch = false);
296 ParseStatus tryParseMatrixTileList(OperandVector &Operands);
297 ParseStatus tryParseSVEPattern(OperandVector &Operands);
298 ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
299 ParseStatus tryParseGPR64x8(OperandVector &Operands);
300 ParseStatus tryParseImmRange(OperandVector &Operands);
301 template <int> ParseStatus tryParseAdjImm0_63(OperandVector &Operands);
302 ParseStatus tryParsePHintInstOperand(OperandVector &Operands);
303
304public:
305 enum AArch64MatchResultTy {
306 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
307#define GET_OPERAND_DIAGNOSTIC_TYPES
308#include "AArch64GenAsmMatcher.inc"
309 };
310 bool IsILP32;
311 bool IsWindowsArm64EC;
312
313 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
314 const MCInstrInfo &MII, const MCTargetOptions &Options)
315 : MCTargetAsmParser(Options, STI, MII) {
316 IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
317 IsWindowsArm64EC = STI.getTargetTriple().isWindowsArm64EC();
318 MCAsmParserExtension::Initialize(Parser);
319 MCStreamer &S = getParser().getStreamer();
320 if (S.getTargetStreamer() == nullptr)
321 new AArch64TargetStreamer(S);
322
323 // Alias .hword/.word/.[dx]word to the target-independent
324 // .2byte/.4byte/.8byte directives as they have the same form and
325 // semantics:
326 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
327 Parser.addAliasForDirective(Directive: ".hword", Alias: ".2byte");
328 Parser.addAliasForDirective(Directive: ".word", Alias: ".4byte");
329 Parser.addAliasForDirective(Directive: ".dword", Alias: ".8byte");
330 Parser.addAliasForDirective(Directive: ".xword", Alias: ".8byte");
331
332 // Initialize the set of available features.
333 setAvailableFeatures(ComputeAvailableFeatures(FB: getSTI().getFeatureBits()));
334 }
335
336 bool areEqualRegs(const MCParsedAsmOperand &Op1,
337 const MCParsedAsmOperand &Op2) const override;
338 bool parseInstruction(ParseInstructionInfo &Info, StringRef Name,
339 SMLoc NameLoc, OperandVector &Operands) override;
340 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
341 ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
342 SMLoc &EndLoc) override;
343 bool ParseDirective(AsmToken DirectiveID) override;
344 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
345 unsigned Kind) override;
346
347 static bool classifySymbolRef(const MCExpr *Expr, AArch64::Specifier &ELFSpec,
348 AArch64::Specifier &DarwinSpec,
349 int64_t &Addend);
350};
351
352/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
353/// instruction.
354class AArch64Operand : public MCParsedAsmOperand {
355private:
356 enum KindTy {
357 k_Immediate,
358 k_ShiftedImm,
359 k_ImmRange,
360 k_CondCode,
361 k_Register,
362 k_MatrixRegister,
363 k_MatrixTileList,
364 k_SVCR,
365 k_VectorList,
366 k_VectorIndex,
367 k_Token,
368 k_SysReg,
369 k_SysCR,
370 k_Prefetch,
371 k_ShiftExtend,
372 k_FPImm,
373 k_Barrier,
374 k_PSBHint,
375 k_PHint,
376 k_BTIHint,
377 k_CMHPriorityHint,
378 k_TIndexHint,
379 } Kind;
380
381 SMLoc StartLoc, EndLoc;
382
383 struct TokOp {
384 const char *Data;
385 unsigned Length;
386 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
387 };
388
389 // Separate shift/extend operand.
390 struct ShiftExtendOp {
391 AArch64_AM::ShiftExtendType Type;
392 unsigned Amount;
393 bool HasExplicitAmount;
394 };
395
396 struct RegOp {
397 MCRegister Reg;
398 RegKind Kind;
399 int ElementWidth;
400
401 // The register may be allowed as a different register class,
402 // e.g. for GPR64as32 or GPR32as64.
403 RegConstraintEqualityTy EqualityTy;
404
405 // In some cases the shift/extend needs to be explicitly parsed together
406 // with the register, rather than as a separate operand. This is needed
407 // for addressing modes where the instruction as a whole dictates the
408 // scaling/extend, rather than specific bits in the instruction.
409 // By parsing them as a single operand, we avoid the need to pass an
410 // extra operand in all CodeGen patterns (because all operands need to
411 // have an associated value), and we avoid the need to update TableGen to
412 // accept operands that have no associated bits in the instruction.
413 //
414 // An added benefit of parsing them together is that the assembler
415 // can give a sensible diagnostic if the scaling is not correct.
416 //
417 // The default is 'lsl #0' (HasExplicitAmount = false) if no
418 // ShiftExtend is specified.
419 ShiftExtendOp ShiftExtend;
420 };
421
422 struct MatrixRegOp {
423 MCRegister Reg;
424 unsigned ElementWidth;
425 MatrixKind Kind;
426 };
427
428 struct MatrixTileListOp {
429 unsigned RegMask = 0;
430 };
431
432 struct VectorListOp {
433 MCRegister Reg;
434 unsigned Count;
435 unsigned Stride;
436 unsigned NumElements;
437 unsigned ElementWidth;
438 RegKind RegisterKind;
439 };
440
441 struct VectorIndexOp {
442 int Val;
443 };
444
445 struct ImmOp {
446 const MCExpr *Val;
447 };
448
449 struct ShiftedImmOp {
450 const MCExpr *Val;
451 unsigned ShiftAmount;
452 };
453
454 struct ImmRangeOp {
455 unsigned First;
456 unsigned Last;
457 };
458
459 struct CondCodeOp {
460 AArch64CC::CondCode Code;
461 };
462
463 struct FPImmOp {
464 uint64_t Val; // APFloat value bitcasted to uint64_t.
465 bool IsExact; // describes whether parsed value was exact.
466 };
467
468 struct BarrierOp {
469 const char *Data;
470 unsigned Length;
471 unsigned Val; // Not the enum since not all values have names.
472 bool HasnXSModifier;
473 };
474
475 struct SysRegOp {
476 const char *Data;
477 unsigned Length;
478 uint32_t MRSReg;
479 uint32_t MSRReg;
480 uint32_t PStateField;
481 };
482
483 struct SysCRImmOp {
484 unsigned Val;
485 };
486
487 struct PrefetchOp {
488 const char *Data;
489 unsigned Length;
490 unsigned Val;
491 };
492
493 struct PSBHintOp {
494 const char *Data;
495 unsigned Length;
496 unsigned Val;
497 };
498 struct PHintOp {
499 const char *Data;
500 unsigned Length;
501 unsigned Val;
502 };
503 struct BTIHintOp {
504 const char *Data;
505 unsigned Length;
506 unsigned Val;
507 };
508 struct CMHPriorityHintOp {
509 const char *Data;
510 unsigned Length;
511 unsigned Val;
512 };
513 struct TIndexHintOp {
514 const char *Data;
515 unsigned Length;
516 unsigned Val;
517 };
518
519 struct SVCROp {
520 const char *Data;
521 unsigned Length;
522 unsigned PStateField;
523 };
524
525 union {
526 struct TokOp Tok;
527 struct RegOp Reg;
528 struct MatrixRegOp MatrixReg;
529 struct MatrixTileListOp MatrixTileList;
530 struct VectorListOp VectorList;
531 struct VectorIndexOp VectorIndex;
532 struct ImmOp Imm;
533 struct ShiftedImmOp ShiftedImm;
534 struct ImmRangeOp ImmRange;
535 struct CondCodeOp CondCode;
536 struct FPImmOp FPImm;
537 struct BarrierOp Barrier;
538 struct SysRegOp SysReg;
539 struct SysCRImmOp SysCRImm;
540 struct PrefetchOp Prefetch;
541 struct PSBHintOp PSBHint;
542 struct PHintOp PHint;
543 struct BTIHintOp BTIHint;
544 struct CMHPriorityHintOp CMHPriorityHint;
545 struct TIndexHintOp TIndexHint;
546 struct ShiftExtendOp ShiftExtend;
547 struct SVCROp SVCR;
548 };
549
550 // Keep the MCContext around as the MCExprs may need manipulated during
551 // the add<>Operands() calls.
552 MCContext &Ctx;
553
554public:
555 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
556
557 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
558 Kind = o.Kind;
559 StartLoc = o.StartLoc;
560 EndLoc = o.EndLoc;
561 switch (Kind) {
562 case k_Token:
563 Tok = o.Tok;
564 break;
565 case k_Immediate:
566 Imm = o.Imm;
567 break;
568 case k_ShiftedImm:
569 ShiftedImm = o.ShiftedImm;
570 break;
571 case k_ImmRange:
572 ImmRange = o.ImmRange;
573 break;
574 case k_CondCode:
575 CondCode = o.CondCode;
576 break;
577 case k_FPImm:
578 FPImm = o.FPImm;
579 break;
580 case k_Barrier:
581 Barrier = o.Barrier;
582 break;
583 case k_Register:
584 Reg = o.Reg;
585 break;
586 case k_MatrixRegister:
587 MatrixReg = o.MatrixReg;
588 break;
589 case k_MatrixTileList:
590 MatrixTileList = o.MatrixTileList;
591 break;
592 case k_VectorList:
593 VectorList = o.VectorList;
594 break;
595 case k_VectorIndex:
596 VectorIndex = o.VectorIndex;
597 break;
598 case k_SysReg:
599 SysReg = o.SysReg;
600 break;
601 case k_SysCR:
602 SysCRImm = o.SysCRImm;
603 break;
604 case k_Prefetch:
605 Prefetch = o.Prefetch;
606 break;
607 case k_PSBHint:
608 PSBHint = o.PSBHint;
609 break;
610 case k_PHint:
611 PHint = o.PHint;
612 break;
613 case k_BTIHint:
614 BTIHint = o.BTIHint;
615 break;
616 case k_CMHPriorityHint:
617 CMHPriorityHint = o.CMHPriorityHint;
618 break;
619 case k_TIndexHint:
620 TIndexHint = o.TIndexHint;
621 break;
622 case k_ShiftExtend:
623 ShiftExtend = o.ShiftExtend;
624 break;
625 case k_SVCR:
626 SVCR = o.SVCR;
627 break;
628 }
629 }
630
631 /// getStartLoc - Get the location of the first token of this operand.
632 SMLoc getStartLoc() const override { return StartLoc; }
633 /// getEndLoc - Get the location of the last token of this operand.
634 SMLoc getEndLoc() const override { return EndLoc; }
635
636 StringRef getToken() const {
637 assert(Kind == k_Token && "Invalid access!");
638 return StringRef(Tok.Data, Tok.Length);
639 }
640
641 bool isTokenSuffix() const {
642 assert(Kind == k_Token && "Invalid access!");
643 return Tok.IsSuffix;
644 }
645
646 const MCExpr *getImm() const {
647 assert(Kind == k_Immediate && "Invalid access!");
648 return Imm.Val;
649 }
650
651 const MCExpr *getShiftedImmVal() const {
652 assert(Kind == k_ShiftedImm && "Invalid access!");
653 return ShiftedImm.Val;
654 }
655
656 unsigned getShiftedImmShift() const {
657 assert(Kind == k_ShiftedImm && "Invalid access!");
658 return ShiftedImm.ShiftAmount;
659 }
660
661 unsigned getFirstImmVal() const {
662 assert(Kind == k_ImmRange && "Invalid access!");
663 return ImmRange.First;
664 }
665
666 unsigned getLastImmVal() const {
667 assert(Kind == k_ImmRange && "Invalid access!");
668 return ImmRange.Last;
669 }
670
671 AArch64CC::CondCode getCondCode() const {
672 assert(Kind == k_CondCode && "Invalid access!");
673 return CondCode.Code;
674 }
675
676 APFloat getFPImm() const {
677 assert (Kind == k_FPImm && "Invalid access!");
678 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
679 }
680
681 bool getFPImmIsExact() const {
682 assert (Kind == k_FPImm && "Invalid access!");
683 return FPImm.IsExact;
684 }
685
686 unsigned getBarrier() const {
687 assert(Kind == k_Barrier && "Invalid access!");
688 return Barrier.Val;
689 }
690
691 StringRef getBarrierName() const {
692 assert(Kind == k_Barrier && "Invalid access!");
693 return StringRef(Barrier.Data, Barrier.Length);
694 }
695
696 bool getBarriernXSModifier() const {
697 assert(Kind == k_Barrier && "Invalid access!");
698 return Barrier.HasnXSModifier;
699 }
700
701 MCRegister getReg() const override {
702 assert(Kind == k_Register && "Invalid access!");
703 return Reg.Reg;
704 }
705
706 MCRegister getMatrixReg() const {
707 assert(Kind == k_MatrixRegister && "Invalid access!");
708 return MatrixReg.Reg;
709 }
710
711 unsigned getMatrixElementWidth() const {
712 assert(Kind == k_MatrixRegister && "Invalid access!");
713 return MatrixReg.ElementWidth;
714 }
715
716 MatrixKind getMatrixKind() const {
717 assert(Kind == k_MatrixRegister && "Invalid access!");
718 return MatrixReg.Kind;
719 }
720
721 unsigned getMatrixTileListRegMask() const {
722 assert(isMatrixTileList() && "Invalid access!");
723 return MatrixTileList.RegMask;
724 }
725
726 RegConstraintEqualityTy getRegEqualityTy() const {
727 assert(Kind == k_Register && "Invalid access!");
728 return Reg.EqualityTy;
729 }
730
731 MCRegister getVectorListStart() const {
732 assert(Kind == k_VectorList && "Invalid access!");
733 return VectorList.Reg;
734 }
735
736 unsigned getVectorListCount() const {
737 assert(Kind == k_VectorList && "Invalid access!");
738 return VectorList.Count;
739 }
740
741 unsigned getVectorListStride() const {
742 assert(Kind == k_VectorList && "Invalid access!");
743 return VectorList.Stride;
744 }
745
746 int getVectorIndex() const {
747 assert(Kind == k_VectorIndex && "Invalid access!");
748 return VectorIndex.Val;
749 }
750
751 StringRef getSysReg() const {
752 assert(Kind == k_SysReg && "Invalid access!");
753 return StringRef(SysReg.Data, SysReg.Length);
754 }
755
756 unsigned getSysCR() const {
757 assert(Kind == k_SysCR && "Invalid access!");
758 return SysCRImm.Val;
759 }
760
761 unsigned getPrefetch() const {
762 assert(Kind == k_Prefetch && "Invalid access!");
763 return Prefetch.Val;
764 }
765
766 unsigned getPSBHint() const {
767 assert(Kind == k_PSBHint && "Invalid access!");
768 return PSBHint.Val;
769 }
770
771 unsigned getPHint() const {
772 assert(Kind == k_PHint && "Invalid access!");
773 return PHint.Val;
774 }
775
776 StringRef getPSBHintName() const {
777 assert(Kind == k_PSBHint && "Invalid access!");
778 return StringRef(PSBHint.Data, PSBHint.Length);
779 }
780
781 StringRef getPHintName() const {
782 assert(Kind == k_PHint && "Invalid access!");
783 return StringRef(PHint.Data, PHint.Length);
784 }
785
786 unsigned getBTIHint() const {
787 assert(Kind == k_BTIHint && "Invalid access!");
788 return BTIHint.Val;
789 }
790
791 StringRef getBTIHintName() const {
792 assert(Kind == k_BTIHint && "Invalid access!");
793 return StringRef(BTIHint.Data, BTIHint.Length);
794 }
795
796 unsigned getCMHPriorityHint() const {
797 assert(Kind == k_CMHPriorityHint && "Invalid access!");
798 return CMHPriorityHint.Val;
799 }
800
801 StringRef getCMHPriorityHintName() const {
802 assert(Kind == k_CMHPriorityHint && "Invalid access!");
803 return StringRef(CMHPriorityHint.Data, CMHPriorityHint.Length);
804 }
805
806 unsigned getTIndexHint() const {
807 assert(Kind == k_TIndexHint && "Invalid access!");
808 return TIndexHint.Val;
809 }
810
811 StringRef getTIndexHintName() const {
812 assert(Kind == k_TIndexHint && "Invalid access!");
813 return StringRef(TIndexHint.Data, TIndexHint.Length);
814 }
815
816 StringRef getSVCR() const {
817 assert(Kind == k_SVCR && "Invalid access!");
818 return StringRef(SVCR.Data, SVCR.Length);
819 }
820
821 StringRef getPrefetchName() const {
822 assert(Kind == k_Prefetch && "Invalid access!");
823 return StringRef(Prefetch.Data, Prefetch.Length);
824 }
825
826 AArch64_AM::ShiftExtendType getShiftExtendType() const {
827 if (Kind == k_ShiftExtend)
828 return ShiftExtend.Type;
829 if (Kind == k_Register)
830 return Reg.ShiftExtend.Type;
831 llvm_unreachable("Invalid access!");
832 }
833
834 unsigned getShiftExtendAmount() const {
835 if (Kind == k_ShiftExtend)
836 return ShiftExtend.Amount;
837 if (Kind == k_Register)
838 return Reg.ShiftExtend.Amount;
839 llvm_unreachable("Invalid access!");
840 }
841
842 bool hasShiftExtendAmount() const {
843 if (Kind == k_ShiftExtend)
844 return ShiftExtend.HasExplicitAmount;
845 if (Kind == k_Register)
846 return Reg.ShiftExtend.HasExplicitAmount;
847 llvm_unreachable("Invalid access!");
848 }
849
850 bool isImm() const override { return Kind == k_Immediate; }
851 bool isMem() const override { return false; }
852
853 bool isUImm6() const {
854 if (!isImm())
855 return false;
856 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
857 if (!MCE)
858 return false;
859 int64_t Val = MCE->getValue();
860 return (Val >= 0 && Val < 64);
861 }
862
863 template <int Width> bool isSImm() const {
864 return bool(isSImmScaled<Width, 1>());
865 }
866
867 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
868 return isImmScaled<Bits, Scale>(true);
869 }
870
871 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
872 DiagnosticPredicate isUImmScaled() const {
873 if (IsRange && isImmRange() &&
874 (getLastImmVal() != getFirstImmVal() + Offset))
875 return DiagnosticPredicate::NoMatch;
876
877 return isImmScaled<Bits, Scale, IsRange>(false);
878 }
879
880 template <int Bits, int Scale, bool IsRange = false>
881 DiagnosticPredicate isImmScaled(bool Signed) const {
882 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
883 (isImmRange() && !IsRange))
884 return DiagnosticPredicate::NoMatch;
885
886 int64_t Val;
887 if (isImmRange())
888 Val = getFirstImmVal();
889 else {
890 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
891 if (!MCE)
892 return DiagnosticPredicate::NoMatch;
893 Val = MCE->getValue();
894 }
895
896 int64_t MinVal, MaxVal;
897 if (Signed) {
898 int64_t Shift = Bits - 1;
899 MinVal = (int64_t(1) << Shift) * -Scale;
900 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
901 } else {
902 MinVal = 0;
903 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
904 }
905
906 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
907 return DiagnosticPredicate::Match;
908
909 return DiagnosticPredicate::NearMatch;
910 }
911
912 DiagnosticPredicate isSVEPattern() const {
913 if (!isImm())
914 return DiagnosticPredicate::NoMatch;
915 auto *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
916 if (!MCE)
917 return DiagnosticPredicate::NoMatch;
918 int64_t Val = MCE->getValue();
919 if (Val >= 0 && Val < 32)
920 return DiagnosticPredicate::Match;
921 return DiagnosticPredicate::NearMatch;
922 }
923
924 DiagnosticPredicate isSVEVecLenSpecifier() const {
925 if (!isImm())
926 return DiagnosticPredicate::NoMatch;
927 auto *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
928 if (!MCE)
929 return DiagnosticPredicate::NoMatch;
930 int64_t Val = MCE->getValue();
931 if (Val >= 0 && Val <= 1)
932 return DiagnosticPredicate::Match;
933 return DiagnosticPredicate::NearMatch;
934 }
935
936 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
937 AArch64::Specifier ELFSpec;
938 AArch64::Specifier DarwinSpec;
939 int64_t Addend;
940 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
941 Addend)) {
942 // If we don't understand the expression, assume the best and
943 // let the fixup and relocation code deal with it.
944 return true;
945 }
946
947 if (DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
948 llvm::is_contained(
949 Set: {AArch64::S_LO12, AArch64::S_GOT_LO12, AArch64::S_GOT_AUTH_LO12,
950 AArch64::S_DTPREL_LO12, AArch64::S_DTPREL_LO12_NC,
951 AArch64::S_TPREL_LO12, AArch64::S_TPREL_LO12_NC,
952 AArch64::S_GOTTPREL_LO12_NC, AArch64::S_TLSDESC_LO12,
953 AArch64::S_TLSDESC_AUTH_LO12, AArch64::S_SECREL_LO12,
954 AArch64::S_SECREL_HI12, AArch64::S_GOT_PAGE_LO15},
955 Element: ELFSpec)) {
956 // Note that we don't range-check the addend. It's adjusted modulo page
957 // size when converted, so there is no "out of range" condition when using
958 // @pageoff.
959 return true;
960 } else if (DarwinSpec == AArch64::S_MACHO_GOTPAGEOFF ||
961 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF) {
962 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
963 return Addend == 0;
964 }
965
966 return false;
967 }
968
969 template <int Scale> bool isUImm12Offset() const {
970 if (!isImm())
971 return false;
972
973 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
974 if (!MCE)
975 return isSymbolicUImm12Offset(Expr: getImm());
976
977 int64_t Val = MCE->getValue();
978 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
979 }
980
981 template <int N, int M>
982 bool isImmInRange() const {
983 if (!isImm())
984 return false;
985 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
986 if (!MCE)
987 return false;
988 int64_t Val = MCE->getValue();
989 return (Val >= N && Val <= M);
990 }
991
992 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
993 // a logical immediate can always be represented when inverted.
994 template <typename T>
995 bool isLogicalImm() const {
996 if (!isImm())
997 return false;
998 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
999 if (!MCE)
1000 return false;
1001
1002 int64_t Val = MCE->getValue();
1003 // Avoid left shift by 64 directly.
1004 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
1005 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
1006 if ((Val & Upper) && (Val & Upper) != Upper)
1007 return false;
1008
1009 return AArch64_AM::isLogicalImmediate(imm: Val & ~Upper, regSize: sizeof(T) * 8);
1010 }
1011
1012 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
1013
1014 bool isImmRange() const { return Kind == k_ImmRange; }
1015
1016 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
1017 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
1018 /// immediate that can be shifted by 'Shift'.
1019 template <unsigned Width>
1020 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
1021 if (isShiftedImm() && Width == getShiftedImmShift())
1022 if (auto *CE = dyn_cast<MCConstantExpr>(Val: getShiftedImmVal()))
1023 return std::make_pair(x: CE->getValue(), y: Width);
1024
1025 if (isImm())
1026 if (auto *CE = dyn_cast<MCConstantExpr>(Val: getImm())) {
1027 int64_t Val = CE->getValue();
1028 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
1029 return std::make_pair(x: Val >> Width, y: Width);
1030 else
1031 return std::make_pair(x&: Val, y: 0u);
1032 }
1033
1034 return {};
1035 }
1036
1037 bool isAddSubImm() const {
1038 if (!isShiftedImm() && !isImm())
1039 return false;
1040
1041 const MCExpr *Expr;
1042
1043 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
1044 if (isShiftedImm()) {
1045 unsigned Shift = ShiftedImm.ShiftAmount;
1046 Expr = ShiftedImm.Val;
1047 if (Shift != 0 && Shift != 12)
1048 return false;
1049 } else {
1050 Expr = getImm();
1051 }
1052
1053 AArch64::Specifier ELFSpec;
1054 AArch64::Specifier DarwinSpec;
1055 int64_t Addend;
1056 if (AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
1057 Addend)) {
1058 return DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
1059 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF ||
1060 (DarwinSpec == AArch64::S_MACHO_GOTPAGEOFF && Addend == 0) ||
1061 llvm::is_contained(
1062 Set: {AArch64::S_LO12, AArch64::S_GOT_AUTH_LO12,
1063 AArch64::S_DTPREL_HI12, AArch64::S_DTPREL_LO12,
1064 AArch64::S_DTPREL_LO12_NC, AArch64::S_TPREL_HI12,
1065 AArch64::S_TPREL_LO12, AArch64::S_TPREL_LO12_NC,
1066 AArch64::S_TLSDESC_LO12, AArch64::S_TLSDESC_AUTH_LO12,
1067 AArch64::S_SECREL_HI12, AArch64::S_SECREL_LO12},
1068 Element: ELFSpec);
1069 }
1070
1071 // If it's a constant, it should be a real immediate in range.
1072 if (auto ShiftedVal = getShiftedVal<12>())
1073 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1074
1075 // If it's an expression, we hope for the best and let the fixup/relocation
1076 // code deal with it.
1077 return true;
1078 }
1079
1080 bool isAddSubImmNeg() const {
1081 if (!isShiftedImm() && !isImm())
1082 return false;
1083
1084 // Otherwise it should be a real negative immediate in range.
1085 if (auto ShiftedVal = getShiftedVal<12>())
1086 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1087
1088 return false;
1089 }
1090
1091 // Signed value in the range -128 to +127. For element widths of
1092 // 16 bits or higher it may also be a signed multiple of 256 in the
1093 // range -32768 to +32512.
1094 // For element-width of 8 bits a range of -128 to 255 is accepted,
1095 // since a copy of a byte can be either signed/unsigned.
1096 template <typename T>
1097 DiagnosticPredicate isSVECpyImm() const {
1098 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(Val: getImm())))
1099 return DiagnosticPredicate::NoMatch;
1100
1101 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1102 std::is_same<int8_t, T>::value;
1103 if (auto ShiftedImm = getShiftedVal<8>())
1104 if (!(IsByte && ShiftedImm->second) &&
1105 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1106 << ShiftedImm->second))
1107 return DiagnosticPredicate::Match;
1108
1109 return DiagnosticPredicate::NearMatch;
1110 }
1111
1112 // Unsigned value in the range 0 to 255. For element widths of
1113 // 16 bits or higher it may also be a signed multiple of 256 in the
1114 // range 0 to 65280.
1115 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1116 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(Val: getImm())))
1117 return DiagnosticPredicate::NoMatch;
1118
1119 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1120 std::is_same<int8_t, T>::value;
1121 if (auto ShiftedImm = getShiftedVal<8>())
1122 if (!(IsByte && ShiftedImm->second) &&
1123 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1124 << ShiftedImm->second))
1125 return DiagnosticPredicate::Match;
1126
1127 return DiagnosticPredicate::NearMatch;
1128 }
1129
1130 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1131 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1132 return DiagnosticPredicate::Match;
1133 return DiagnosticPredicate::NoMatch;
1134 }
1135
1136 bool isCondCode() const { return Kind == k_CondCode; }
1137
1138 bool isSIMDImmType10() const {
1139 if (!isImm())
1140 return false;
1141 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
1142 if (!MCE)
1143 return false;
1144 return AArch64_AM::isAdvSIMDModImmType10(Imm: MCE->getValue());
1145 }
1146
1147 template<int N>
1148 bool isBranchTarget() const {
1149 if (!isImm())
1150 return false;
1151 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
1152 if (!MCE)
1153 return true;
1154 int64_t Val = MCE->getValue();
1155 if (Val & 0x3)
1156 return false;
1157 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1158 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1159 }
1160
1161 bool isMovWSymbol(ArrayRef<AArch64::Specifier> AllowedModifiers) const {
1162 if (!isImm())
1163 return false;
1164
1165 AArch64::Specifier ELFSpec;
1166 AArch64::Specifier DarwinSpec;
1167 int64_t Addend;
1168 if (!AArch64AsmParser::classifySymbolRef(Expr: getImm(), ELFSpec, DarwinSpec,
1169 Addend)) {
1170 return false;
1171 }
1172 if (DarwinSpec != AArch64::S_None)
1173 return false;
1174
1175 return llvm::is_contained(Range&: AllowedModifiers, Element: ELFSpec);
1176 }
1177
1178 bool isMovWSymbolG3() const {
1179 return isMovWSymbol(AllowedModifiers: {AArch64::S_ABS_G3, AArch64::S_PREL_G3});
1180 }
1181
1182 bool isMovWSymbolG2() const {
1183 return isMovWSymbol(AllowedModifiers: {AArch64::S_ABS_G2, AArch64::S_ABS_G2_S,
1184 AArch64::S_ABS_G2_NC, AArch64::S_PREL_G2,
1185 AArch64::S_PREL_G2_NC, AArch64::S_TPREL_G2,
1186 AArch64::S_DTPREL_G2});
1187 }
1188
1189 bool isMovWSymbolG1() const {
1190 return isMovWSymbol(AllowedModifiers: {AArch64::S_ABS_G1, AArch64::S_ABS_G1_S,
1191 AArch64::S_ABS_G1_NC, AArch64::S_PREL_G1,
1192 AArch64::S_PREL_G1_NC, AArch64::S_GOTTPREL_G1,
1193 AArch64::S_TPREL_G1, AArch64::S_TPREL_G1_NC,
1194 AArch64::S_DTPREL_G1, AArch64::S_DTPREL_G1_NC});
1195 }
1196
1197 bool isMovWSymbolG0() const {
1198 return isMovWSymbol(AllowedModifiers: {AArch64::S_ABS_G0, AArch64::S_ABS_G0_S,
1199 AArch64::S_ABS_G0_NC, AArch64::S_PREL_G0,
1200 AArch64::S_PREL_G0_NC, AArch64::S_GOTTPREL_G0_NC,
1201 AArch64::S_TPREL_G0, AArch64::S_TPREL_G0_NC,
1202 AArch64::S_DTPREL_G0, AArch64::S_DTPREL_G0_NC});
1203 }
1204
1205 template<int RegWidth, int Shift>
1206 bool isMOVZMovAlias() const {
1207 if (!isImm()) return false;
1208
1209 const MCExpr *E = getImm();
1210 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: E)) {
1211 uint64_t Value = CE->getValue();
1212
1213 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1214 }
1215 // Only supports the case of Shift being 0 if an expression is used as an
1216 // operand
1217 return !Shift && E;
1218 }
1219
1220 template<int RegWidth, int Shift>
1221 bool isMOVNMovAlias() const {
1222 if (!isImm()) return false;
1223
1224 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1225 if (!CE) return false;
1226 uint64_t Value = CE->getValue();
1227
1228 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1229 }
1230
1231 bool isFPImm() const {
1232 return Kind == k_FPImm &&
1233 AArch64_AM::getFP64Imm(Imm: getFPImm().bitcastToAPInt()) != -1;
1234 }
1235
1236 bool isBarrier() const {
1237 return Kind == k_Barrier && !getBarriernXSModifier();
1238 }
1239 bool isBarriernXS() const {
1240 return Kind == k_Barrier && getBarriernXSModifier();
1241 }
1242 bool isSysReg() const { return Kind == k_SysReg; }
1243
1244 bool isMRSSystemRegister() const {
1245 if (!isSysReg()) return false;
1246
1247 return SysReg.MRSReg != -1U;
1248 }
1249
1250 bool isMSRSystemRegister() const {
1251 if (!isSysReg()) return false;
1252 return SysReg.MSRReg != -1U;
1253 }
1254
1255 bool isSystemPStateFieldWithImm0_1() const {
1256 if (!isSysReg()) return false;
1257 return AArch64PState::lookupPStateImm0_1ByEncoding(Encoding: SysReg.PStateField);
1258 }
1259
1260 bool isSystemPStateFieldWithImm0_15() const {
1261 if (!isSysReg())
1262 return false;
1263 return AArch64PState::lookupPStateImm0_15ByEncoding(Encoding: SysReg.PStateField);
1264 }
1265
1266 bool isSVCR() const {
1267 if (Kind != k_SVCR)
1268 return false;
1269 return SVCR.PStateField != -1U;
1270 }
1271
1272 bool isReg() const override {
1273 return Kind == k_Register;
1274 }
1275
1276 bool isVectorList() const { return Kind == k_VectorList; }
1277
1278 bool isScalarReg() const {
1279 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1280 }
1281
1282 bool isNeonVectorReg() const {
1283 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1284 }
1285
1286 bool isNeonVectorRegLo() const {
1287 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1288 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1289 Reg: Reg.Reg) ||
1290 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1291 Reg: Reg.Reg));
1292 }
1293
1294 bool isNeonVectorReg0to7() const {
1295 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1296 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1297 Reg: Reg.Reg));
1298 }
1299
1300 bool isMatrix() const { return Kind == k_MatrixRegister; }
1301 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1302
1303 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1304 RegKind RK;
1305 switch (Class) {
1306 case AArch64::PPRRegClassID:
1307 case AArch64::PPR_3bRegClassID:
1308 case AArch64::PPR_p8to15RegClassID:
1309 case AArch64::PNRRegClassID:
1310 case AArch64::PNR_p8to15RegClassID:
1311 case AArch64::PPRorPNRRegClassID:
1312 RK = RegKind::SVEPredicateAsCounter;
1313 break;
1314 default:
1315 llvm_unreachable("Unsupported register class");
1316 }
1317
1318 return (Kind == k_Register && Reg.Kind == RK) &&
1319 AArch64MCRegisterClasses[Class].contains(Reg: getReg());
1320 }
1321
1322 template <unsigned Class> bool isSVEVectorReg() const {
1323 RegKind RK;
1324 switch (Class) {
1325 case AArch64::ZPRRegClassID:
1326 case AArch64::ZPR_3bRegClassID:
1327 case AArch64::ZPR_4bRegClassID:
1328 case AArch64::ZPRMul2_LoRegClassID:
1329 case AArch64::ZPRMul2_HiRegClassID:
1330 case AArch64::ZPR_KRegClassID:
1331 RK = RegKind::SVEDataVector;
1332 break;
1333 case AArch64::PPRRegClassID:
1334 case AArch64::PPR_3bRegClassID:
1335 case AArch64::PPR_p8to15RegClassID:
1336 case AArch64::PNRRegClassID:
1337 case AArch64::PNR_p8to15RegClassID:
1338 case AArch64::PPRorPNRRegClassID:
1339 RK = RegKind::SVEPredicateVector;
1340 break;
1341 default:
1342 llvm_unreachable("Unsupported register class");
1343 }
1344
1345 return (Kind == k_Register && Reg.Kind == RK) &&
1346 AArch64MCRegisterClasses[Class].contains(Reg: getReg());
1347 }
1348
1349 template <unsigned Class> bool isFPRasZPR() const {
1350 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1351 AArch64MCRegisterClasses[Class].contains(Reg: getReg());
1352 }
1353
1354 template <int ElementWidth, unsigned Class>
1355 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1356 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1357 return DiagnosticPredicate::NoMatch;
1358
1359 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1360 return DiagnosticPredicate::Match;
1361
1362 return DiagnosticPredicate::NearMatch;
1363 }
1364
1365 template <int ElementWidth, unsigned Class>
1366 DiagnosticPredicate isSVEPredicateOrPredicateAsCounterRegOfWidth() const {
1367 if (Kind != k_Register || (Reg.Kind != RegKind::SVEPredicateAsCounter &&
1368 Reg.Kind != RegKind::SVEPredicateVector))
1369 return DiagnosticPredicate::NoMatch;
1370
1371 if ((isSVEPredicateAsCounterReg<Class>() ||
1372 isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) &&
1373 Reg.ElementWidth == ElementWidth)
1374 return DiagnosticPredicate::Match;
1375
1376 return DiagnosticPredicate::NearMatch;
1377 }
1378
1379 template <int ElementWidth, unsigned Class>
1380 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1381 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1382 return DiagnosticPredicate::NoMatch;
1383
1384 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1385 return DiagnosticPredicate::Match;
1386
1387 return DiagnosticPredicate::NearMatch;
1388 }
1389
1390 template <int ElementWidth, unsigned Class>
1391 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1392 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1393 return DiagnosticPredicate::NoMatch;
1394
1395 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1396 return DiagnosticPredicate::Match;
1397
1398 return DiagnosticPredicate::NearMatch;
1399 }
1400
1401 template <int ElementWidth, unsigned Class,
1402 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1403 bool ShiftWidthAlwaysSame>
1404 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1405 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1406 if (!VectorMatch.isMatch())
1407 return DiagnosticPredicate::NoMatch;
1408
1409 // Give a more specific diagnostic when the user has explicitly typed in
1410 // a shift-amount that does not match what is expected, but for which
1411 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1412 bool MatchShift = getShiftExtendAmount() == Log2_32(Value: ShiftWidth / 8);
1413 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1414 ShiftExtendTy == AArch64_AM::SXTW) &&
1415 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1416 return DiagnosticPredicate::NoMatch;
1417
1418 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1419 return DiagnosticPredicate::Match;
1420
1421 return DiagnosticPredicate::NearMatch;
1422 }
1423
1424 bool isGPR32as64() const {
1425 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1426 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg: Reg.Reg);
1427 }
1428
1429 bool isGPR64as32() const {
1430 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1431 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg: Reg.Reg);
1432 }
1433
1434 bool isGPR64x8() const {
1435 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1436 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1437 Reg: Reg.Reg);
1438 }
1439
1440 bool isWSeqPair() const {
1441 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1442 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1443 Reg: Reg.Reg);
1444 }
1445
1446 bool isXSeqPair() const {
1447 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1448 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1449 Reg: Reg.Reg);
1450 }
1451
1452 bool isSyspXzrPair() const {
1453 return isGPR64<AArch64::GPR64RegClassID>() && Reg.Reg == AArch64::XZR;
1454 }
1455
1456 template<int64_t Angle, int64_t Remainder>
1457 DiagnosticPredicate isComplexRotation() const {
1458 if (!isImm())
1459 return DiagnosticPredicate::NoMatch;
1460
1461 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1462 if (!CE)
1463 return DiagnosticPredicate::NoMatch;
1464 uint64_t Value = CE->getValue();
1465
1466 if (Value % Angle == Remainder && Value <= 270)
1467 return DiagnosticPredicate::Match;
1468 return DiagnosticPredicate::NearMatch;
1469 }
1470
1471 template <unsigned RegClassID> bool isGPR64() const {
1472 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1473 AArch64MCRegisterClasses[RegClassID].contains(Reg: getReg());
1474 }
1475
1476 template <unsigned RegClassID, int ExtWidth>
1477 DiagnosticPredicate isGPR64WithShiftExtend() const {
1478 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1479 return DiagnosticPredicate::NoMatch;
1480
1481 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1482 getShiftExtendAmount() == Log2_32(Value: ExtWidth / 8))
1483 return DiagnosticPredicate::Match;
1484 return DiagnosticPredicate::NearMatch;
1485 }
1486
1487 /// Is this a vector list with the type implicit (presumably attached to the
1488 /// instruction itself)?
1489 template <RegKind VectorKind, unsigned NumRegs, bool IsConsecutive = false>
1490 bool isImplicitlyTypedVectorList() const {
1491 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1492 VectorList.NumElements == 0 &&
1493 VectorList.RegisterKind == VectorKind &&
1494 (!IsConsecutive || (VectorList.Stride == 1));
1495 }
1496
1497 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1498 unsigned ElementWidth, unsigned Stride = 1>
1499 bool isTypedVectorList() const {
1500 if (Kind != k_VectorList)
1501 return false;
1502 if (VectorList.Count != NumRegs)
1503 return false;
1504 if (VectorList.RegisterKind != VectorKind)
1505 return false;
1506 if (VectorList.ElementWidth != ElementWidth)
1507 return false;
1508 if (VectorList.Stride != Stride)
1509 return false;
1510 return VectorList.NumElements == NumElements;
1511 }
1512
1513 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1514 unsigned ElementWidth, unsigned RegClass>
1515 DiagnosticPredicate isTypedVectorListMultiple() const {
1516 bool Res =
1517 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1518 if (!Res)
1519 return DiagnosticPredicate::NoMatch;
1520 if (!AArch64MCRegisterClasses[RegClass].contains(Reg: VectorList.Reg))
1521 return DiagnosticPredicate::NearMatch;
1522 return DiagnosticPredicate::Match;
1523 }
1524
1525 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1526 unsigned ElementWidth>
1527 DiagnosticPredicate isTypedVectorListStrided() const {
1528 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1529 ElementWidth, Stride>();
1530 if (!Res)
1531 return DiagnosticPredicate::NoMatch;
1532 if ((VectorList.Reg < (AArch64::Z0 + Stride)) ||
1533 ((VectorList.Reg >= AArch64::Z16) &&
1534 (VectorList.Reg < (AArch64::Z16 + Stride))))
1535 return DiagnosticPredicate::Match;
1536 return DiagnosticPredicate::NoMatch;
1537 }
1538
1539 template <int Min, int Max>
1540 DiagnosticPredicate isVectorIndex() const {
1541 if (Kind != k_VectorIndex)
1542 return DiagnosticPredicate::NoMatch;
1543 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1544 return DiagnosticPredicate::Match;
1545 return DiagnosticPredicate::NearMatch;
1546 }
1547
1548 bool isToken() const override { return Kind == k_Token; }
1549
1550 bool isTokenEqual(StringRef Str) const {
1551 return Kind == k_Token && getToken() == Str;
1552 }
1553 bool isSysCR() const { return Kind == k_SysCR; }
1554 bool isPrefetch() const { return Kind == k_Prefetch; }
1555 bool isPSBHint() const { return Kind == k_PSBHint; }
1556 bool isPHint() const { return Kind == k_PHint; }
1557 bool isBTIHint() const { return Kind == k_BTIHint; }
1558 bool isCMHPriorityHint() const { return Kind == k_CMHPriorityHint; }
1559 bool isTIndexHint() const { return Kind == k_TIndexHint; }
1560 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1561 bool isShifter() const {
1562 if (!isShiftExtend())
1563 return false;
1564
1565 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1566 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1567 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1568 ST == AArch64_AM::MSL);
1569 }
1570
1571 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1572 if (Kind != k_FPImm)
1573 return DiagnosticPredicate::NoMatch;
1574
1575 if (getFPImmIsExact()) {
1576 // Lookup the immediate from table of supported immediates.
1577 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(Enum: ImmEnum);
1578 assert(Desc && "Unknown enum value");
1579
1580 // Calculate its FP value.
1581 APFloat RealVal(APFloat::IEEEdouble());
1582 auto StatusOrErr =
1583 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1584 if (errorToBool(Err: StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1585 llvm_unreachable("FP immediate is not exact");
1586
1587 if (getFPImm().bitwiseIsEqual(RHS: RealVal))
1588 return DiagnosticPredicate::Match;
1589 }
1590
1591 return DiagnosticPredicate::NearMatch;
1592 }
1593
1594 template <unsigned ImmA, unsigned ImmB>
1595 DiagnosticPredicate isExactFPImm() const {
1596 DiagnosticPredicate Res = DiagnosticPredicate::NoMatch;
1597 if ((Res = isExactFPImm<ImmA>()))
1598 return DiagnosticPredicate::Match;
1599 if ((Res = isExactFPImm<ImmB>()))
1600 return DiagnosticPredicate::Match;
1601 return Res;
1602 }
1603
1604 bool isExtend() const {
1605 if (!isShiftExtend())
1606 return false;
1607
1608 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1609 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1610 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1611 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1612 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1613 ET == AArch64_AM::LSL) &&
1614 getShiftExtendAmount() <= 4;
1615 }
1616
1617 bool isExtend64() const {
1618 if (!isExtend())
1619 return false;
1620 // Make sure the extend expects a 32-bit source register.
1621 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1622 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1623 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1624 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1625 }
1626
1627 bool isExtendLSL64() const {
1628 if (!isExtend())
1629 return false;
1630 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1631 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1632 ET == AArch64_AM::LSL) &&
1633 getShiftExtendAmount() <= 4;
1634 }
1635
1636 bool isLSLImm3Shift() const {
1637 if (!isShiftExtend())
1638 return false;
1639 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1640 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7;
1641 }
1642
1643 template<int Width> bool isMemXExtend() const {
1644 if (!isExtend())
1645 return false;
1646 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1647 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1648 (getShiftExtendAmount() == Log2_32(Value: Width / 8) ||
1649 getShiftExtendAmount() == 0);
1650 }
1651
1652 template<int Width> bool isMemWExtend() const {
1653 if (!isExtend())
1654 return false;
1655 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1656 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1657 (getShiftExtendAmount() == Log2_32(Value: Width / 8) ||
1658 getShiftExtendAmount() == 0);
1659 }
1660
1661 template <unsigned width>
1662 bool isArithmeticShifter() const {
1663 if (!isShifter())
1664 return false;
1665
1666 // An arithmetic shifter is LSL, LSR, or ASR.
1667 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1668 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1669 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1670 }
1671
1672 template <unsigned width>
1673 bool isLogicalShifter() const {
1674 if (!isShifter())
1675 return false;
1676
1677 // A logical shifter is LSL, LSR, ASR or ROR.
1678 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1679 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1680 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1681 getShiftExtendAmount() < width;
1682 }
1683
1684 bool isMovImm32Shifter() const {
1685 if (!isShifter())
1686 return false;
1687
1688 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1689 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1690 if (ST != AArch64_AM::LSL)
1691 return false;
1692 uint64_t Val = getShiftExtendAmount();
1693 return (Val == 0 || Val == 16);
1694 }
1695
1696 bool isMovImm64Shifter() const {
1697 if (!isShifter())
1698 return false;
1699
1700 // A MOVi shifter is LSL of 0 or 16.
1701 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1702 if (ST != AArch64_AM::LSL)
1703 return false;
1704 uint64_t Val = getShiftExtendAmount();
1705 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1706 }
1707
1708 bool isLogicalVecShifter() const {
1709 if (!isShifter())
1710 return false;
1711
1712 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1713 unsigned Shift = getShiftExtendAmount();
1714 return getShiftExtendType() == AArch64_AM::LSL &&
1715 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1716 }
1717
1718 bool isLogicalVecHalfWordShifter() const {
1719 if (!isLogicalVecShifter())
1720 return false;
1721
1722 // A logical vector shifter is a left shift by 0 or 8.
1723 unsigned Shift = getShiftExtendAmount();
1724 return getShiftExtendType() == AArch64_AM::LSL &&
1725 (Shift == 0 || Shift == 8);
1726 }
1727
1728 bool isMoveVecShifter() const {
1729 if (!isShiftExtend())
1730 return false;
1731
1732 // A logical vector shifter is a left shift by 8 or 16.
1733 unsigned Shift = getShiftExtendAmount();
1734 return getShiftExtendType() == AArch64_AM::MSL &&
1735 (Shift == 8 || Shift == 16);
1736 }
1737
1738 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1739 // to LDUR/STUR when the offset is not legal for the former but is for
1740 // the latter. As such, in addition to checking for being a legal unscaled
1741 // address, also check that it is not a legal scaled address. This avoids
1742 // ambiguity in the matcher.
1743 template<int Width>
1744 bool isSImm9OffsetFB() const {
1745 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1746 }
1747
1748 bool isAdrpLabel() const {
1749 // Validation was handled during parsing, so we just verify that
1750 // something didn't go haywire.
1751 if (!isImm())
1752 return false;
1753
1754 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Imm.Val)) {
1755 int64_t Val = CE->getValue();
1756 int64_t Min = - (4096 * (1LL << (21 - 1)));
1757 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1758 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1759 }
1760
1761 return true;
1762 }
1763
1764 bool isAdrLabel() const {
1765 // Validation was handled during parsing, so we just verify that
1766 // something didn't go haywire.
1767 if (!isImm())
1768 return false;
1769
1770 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Imm.Val)) {
1771 int64_t Val = CE->getValue();
1772 int64_t Min = - (1LL << (21 - 1));
1773 int64_t Max = ((1LL << (21 - 1)) - 1);
1774 return Val >= Min && Val <= Max;
1775 }
1776
1777 return true;
1778 }
1779
1780 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1781 DiagnosticPredicate isMatrixRegOperand() const {
1782 if (!isMatrix())
1783 return DiagnosticPredicate::NoMatch;
1784 if (getMatrixKind() != Kind ||
1785 !AArch64MCRegisterClasses[RegClass].contains(Reg: getMatrixReg()) ||
1786 EltSize != getMatrixElementWidth())
1787 return DiagnosticPredicate::NearMatch;
1788 return DiagnosticPredicate::Match;
1789 }
1790
1791 bool isPAuthPCRelLabel16Operand() const {
1792 // PAuth PCRel16 operands are similar to regular branch targets, but only
1793 // negative values are allowed for concrete immediates as signing instr
1794 // should be in a lower address.
1795 if (!isImm())
1796 return false;
1797 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
1798 if (!MCE)
1799 return true;
1800 int64_t Val = MCE->getValue();
1801 if (Val & 0b11)
1802 return false;
1803 return (Val <= 0) && (Val > -(1 << 18));
1804 }
1805
1806 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1807 // Add as immediates when possible. Null MCExpr = 0.
1808 if (!Expr)
1809 Inst.addOperand(Op: MCOperand::createImm(Val: 0));
1810 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Expr))
1811 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue()));
1812 else
1813 Inst.addOperand(Op: MCOperand::createExpr(Val: Expr));
1814 }
1815
1816 void addRegOperands(MCInst &Inst, unsigned N) const {
1817 assert(N == 1 && "Invalid number of operands!");
1818 Inst.addOperand(Op: MCOperand::createReg(Reg: getReg()));
1819 }
1820
1821 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1822 assert(N == 1 && "Invalid number of operands!");
1823 Inst.addOperand(Op: MCOperand::createReg(Reg: getMatrixReg()));
1824 }
1825
1826 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1827 assert(N == 1 && "Invalid number of operands!");
1828 assert(
1829 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1830
1831 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1832 MCRegister Reg = RI->getRegClass(i: AArch64::GPR32RegClassID)
1833 .getRegister(i: RI->getEncodingValue(Reg: getReg()));
1834
1835 Inst.addOperand(Op: MCOperand::createReg(Reg));
1836 }
1837
1838 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1839 assert(N == 1 && "Invalid number of operands!");
1840 assert(
1841 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1842
1843 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1844 MCRegister Reg = RI->getRegClass(i: AArch64::GPR64RegClassID)
1845 .getRegister(i: RI->getEncodingValue(Reg: getReg()));
1846
1847 Inst.addOperand(Op: MCOperand::createReg(Reg));
1848 }
1849
1850 template <int Width>
1851 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1852 unsigned Base;
1853 switch (Width) {
1854 case 8: Base = AArch64::B0; break;
1855 case 16: Base = AArch64::H0; break;
1856 case 32: Base = AArch64::S0; break;
1857 case 64: Base = AArch64::D0; break;
1858 case 128: Base = AArch64::Q0; break;
1859 default:
1860 llvm_unreachable("Unsupported width");
1861 }
1862 Inst.addOperand(Op: MCOperand::createReg(Reg: AArch64::Z0 + getReg() - Base));
1863 }
1864
1865 void addPPRorPNRRegOperands(MCInst &Inst, unsigned N) const {
1866 assert(N == 1 && "Invalid number of operands!");
1867 MCRegister Reg = getReg();
1868 // Normalise to PPR
1869 if (Reg >= AArch64::PN0 && Reg <= AArch64::PN15)
1870 Reg = Reg - AArch64::PN0 + AArch64::P0;
1871 Inst.addOperand(Op: MCOperand::createReg(Reg));
1872 }
1873
1874 void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const {
1875 assert(N == 1 && "Invalid number of operands!");
1876 Inst.addOperand(
1877 Op: MCOperand::createReg(Reg: (getReg() - AArch64::PN0) + AArch64::P0));
1878 }
1879
1880 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1881 assert(N == 1 && "Invalid number of operands!");
1882 assert(
1883 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1884 Inst.addOperand(Op: MCOperand::createReg(Reg: AArch64::D0 + getReg() - AArch64::Q0));
1885 }
1886
1887 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1888 assert(N == 1 && "Invalid number of operands!");
1889 assert(
1890 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1891 Inst.addOperand(Op: MCOperand::createReg(Reg: getReg()));
1892 }
1893
1894 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1895 assert(N == 1 && "Invalid number of operands!");
1896 Inst.addOperand(Op: MCOperand::createReg(Reg: getReg()));
1897 }
1898
1899 void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const {
1900 assert(N == 1 && "Invalid number of operands!");
1901 Inst.addOperand(Op: MCOperand::createReg(Reg: getReg()));
1902 }
1903
1904 enum VecListIndexType {
1905 VecListIdx_DReg = 0,
1906 VecListIdx_QReg = 1,
1907 VecListIdx_ZReg = 2,
1908 VecListIdx_PReg = 3,
1909 };
1910
1911 template <VecListIndexType RegTy, unsigned NumRegs,
1912 bool IsConsecutive = false>
1913 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1914 assert(N == 1 && "Invalid number of operands!");
1915 assert((!IsConsecutive || (getVectorListStride() == 1)) &&
1916 "Expected consecutive registers");
1917 static const unsigned FirstRegs[][5] = {
1918 /* DReg */ { AArch64::Q0,
1919 AArch64::D0, AArch64::D0_D1,
1920 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1921 /* QReg */ { AArch64::Q0,
1922 AArch64::Q0, AArch64::Q0_Q1,
1923 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1924 /* ZReg */ { AArch64::Z0,
1925 AArch64::Z0, AArch64::Z0_Z1,
1926 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1927 /* PReg */ { AArch64::P0,
1928 AArch64::P0, AArch64::P0_P1 }
1929 };
1930
1931 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1932 " NumRegs must be <= 4 for ZRegs");
1933
1934 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1935 " NumRegs must be <= 2 for PRegs");
1936
1937 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1938 Inst.addOperand(Op: MCOperand::createReg(Reg: FirstReg + getVectorListStart() -
1939 FirstRegs[(unsigned)RegTy][0]));
1940 }
1941
1942 template <unsigned NumRegs>
1943 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1944 assert(N == 1 && "Invalid number of operands!");
1945 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1946
1947 switch (NumRegs) {
1948 case 2:
1949 if (getVectorListStart() < AArch64::Z16) {
1950 assert((getVectorListStart() < AArch64::Z8) &&
1951 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1952 Inst.addOperand(Op: MCOperand::createReg(
1953 Reg: AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1954 } else {
1955 assert((getVectorListStart() < AArch64::Z24) &&
1956 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1957 Inst.addOperand(Op: MCOperand::createReg(
1958 Reg: AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1959 }
1960 break;
1961 case 4:
1962 if (getVectorListStart() < AArch64::Z16) {
1963 assert((getVectorListStart() < AArch64::Z4) &&
1964 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1965 Inst.addOperand(Op: MCOperand::createReg(
1966 Reg: AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1967 } else {
1968 assert((getVectorListStart() < AArch64::Z20) &&
1969 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1970 Inst.addOperand(Op: MCOperand::createReg(
1971 Reg: AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1972 }
1973 break;
1974 default:
1975 llvm_unreachable("Unsupported number of registers for strided vec list");
1976 }
1977 }
1978
1979 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1980 assert(N == 1 && "Invalid number of operands!");
1981 unsigned RegMask = getMatrixTileListRegMask();
1982 assert(RegMask <= 0xFF && "Invalid mask!");
1983 Inst.addOperand(Op: MCOperand::createImm(Val: RegMask));
1984 }
1985
1986 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1987 assert(N == 1 && "Invalid number of operands!");
1988 Inst.addOperand(Op: MCOperand::createImm(Val: getVectorIndex()));
1989 }
1990
1991 template <unsigned ImmIs0, unsigned ImmIs1>
1992 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1993 assert(N == 1 && "Invalid number of operands!");
1994 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1995 Inst.addOperand(Op: MCOperand::createImm(Val: bool(isExactFPImm<ImmIs1>())));
1996 }
1997
1998 void addImmOperands(MCInst &Inst, unsigned N) const {
1999 assert(N == 1 && "Invalid number of operands!");
2000 // If this is a pageoff symrefexpr with an addend, adjust the addend
2001 // to be only the page-offset portion. Otherwise, just add the expr
2002 // as-is.
2003 addExpr(Inst, Expr: getImm());
2004 }
2005
2006 template <int Shift>
2007 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
2008 assert(N == 2 && "Invalid number of operands!");
2009 if (auto ShiftedVal = getShiftedVal<Shift>()) {
2010 Inst.addOperand(Op: MCOperand::createImm(Val: ShiftedVal->first));
2011 Inst.addOperand(Op: MCOperand::createImm(Val: ShiftedVal->second));
2012 } else if (isShiftedImm()) {
2013 addExpr(Inst, Expr: getShiftedImmVal());
2014 Inst.addOperand(Op: MCOperand::createImm(Val: getShiftedImmShift()));
2015 } else {
2016 addExpr(Inst, Expr: getImm());
2017 Inst.addOperand(Op: MCOperand::createImm(Val: 0));
2018 }
2019 }
2020
2021 template <int Shift>
2022 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
2023 assert(N == 2 && "Invalid number of operands!");
2024 if (auto ShiftedVal = getShiftedVal<Shift>()) {
2025 Inst.addOperand(Op: MCOperand::createImm(Val: -ShiftedVal->first));
2026 Inst.addOperand(Op: MCOperand::createImm(Val: ShiftedVal->second));
2027 } else
2028 llvm_unreachable("Not a shifted negative immediate");
2029 }
2030
2031 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2032 assert(N == 1 && "Invalid number of operands!");
2033 Inst.addOperand(Op: MCOperand::createImm(Val: getCondCode()));
2034 }
2035
2036 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
2037 assert(N == 1 && "Invalid number of operands!");
2038 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2039 if (!MCE)
2040 addExpr(Inst, Expr: getImm());
2041 else
2042 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 12));
2043 }
2044
2045 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2046 addImmOperands(Inst, N);
2047 }
2048
2049 template<int Scale>
2050 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2051 assert(N == 1 && "Invalid number of operands!");
2052 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2053
2054 if (!MCE) {
2055 Inst.addOperand(Op: MCOperand::createExpr(Val: getImm()));
2056 return;
2057 }
2058 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() / Scale));
2059 }
2060
2061 void addUImm6Operands(MCInst &Inst, unsigned N) const {
2062 assert(N == 1 && "Invalid number of operands!");
2063 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2064 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue()));
2065 }
2066
2067 template <int Scale>
2068 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
2069 assert(N == 1 && "Invalid number of operands!");
2070 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2071 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() / Scale));
2072 }
2073
2074 template <int Scale>
2075 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
2076 assert(N == 1 && "Invalid number of operands!");
2077 Inst.addOperand(Op: MCOperand::createImm(Val: getFirstImmVal() / Scale));
2078 }
2079
2080 template <typename T>
2081 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
2082 assert(N == 1 && "Invalid number of operands!");
2083 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2084 std::make_unsigned_t<T> Val = MCE->getValue();
2085 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(imm: Val, regSize: sizeof(T) * 8);
2086 Inst.addOperand(Op: MCOperand::createImm(Val: encoding));
2087 }
2088
2089 template <typename T>
2090 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
2091 assert(N == 1 && "Invalid number of operands!");
2092 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2093 std::make_unsigned_t<T> Val = ~MCE->getValue();
2094 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(imm: Val, regSize: sizeof(T) * 8);
2095 Inst.addOperand(Op: MCOperand::createImm(Val: encoding));
2096 }
2097
2098 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
2099 assert(N == 1 && "Invalid number of operands!");
2100 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2101 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(Imm: MCE->getValue());
2102 Inst.addOperand(Op: MCOperand::createImm(Val: encoding));
2103 }
2104
2105 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
2106 // Branch operands don't encode the low bits, so shift them off
2107 // here. If it's a label, however, just put it on directly as there's
2108 // not enough information now to do anything.
2109 assert(N == 1 && "Invalid number of operands!");
2110 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2111 if (!MCE) {
2112 addExpr(Inst, Expr: getImm());
2113 return;
2114 }
2115 assert(MCE && "Invalid constant immediate operand!");
2116 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2));
2117 }
2118
2119 void addPAuthPCRelLabel16Operands(MCInst &Inst, unsigned N) const {
2120 // PC-relative operands don't encode the low bits, so shift them off
2121 // here. If it's a label, however, just put it on directly as there's
2122 // not enough information now to do anything.
2123 assert(N == 1 && "Invalid number of operands!");
2124 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2125 if (!MCE) {
2126 addExpr(Inst, Expr: getImm());
2127 return;
2128 }
2129 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2));
2130 }
2131
2132 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
2133 // Branch operands don't encode the low bits, so shift them off
2134 // here. If it's a label, however, just put it on directly as there's
2135 // not enough information now to do anything.
2136 assert(N == 1 && "Invalid number of operands!");
2137 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2138 if (!MCE) {
2139 addExpr(Inst, Expr: getImm());
2140 return;
2141 }
2142 assert(MCE && "Invalid constant immediate operand!");
2143 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2));
2144 }
2145
2146 void addPCRelLabel9Operands(MCInst &Inst, unsigned N) const {
2147 // Branch operands don't encode the low bits, so shift them off
2148 // here. If it's a label, however, just put it on directly as there's
2149 // not enough information now to do anything.
2150 assert(N == 1 && "Invalid number of operands!");
2151 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2152 if (!MCE) {
2153 addExpr(Inst, Expr: getImm());
2154 return;
2155 }
2156 assert(MCE && "Invalid constant immediate operand!");
2157 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2));
2158 }
2159
2160 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
2161 // Branch operands don't encode the low bits, so shift them off
2162 // here. If it's a label, however, just put it on directly as there's
2163 // not enough information now to do anything.
2164 assert(N == 1 && "Invalid number of operands!");
2165 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2166 if (!MCE) {
2167 addExpr(Inst, Expr: getImm());
2168 return;
2169 }
2170 assert(MCE && "Invalid constant immediate operand!");
2171 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2));
2172 }
2173
2174 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2175 assert(N == 1 && "Invalid number of operands!");
2176 Inst.addOperand(Op: MCOperand::createImm(
2177 Val: AArch64_AM::getFP64Imm(Imm: getFPImm().bitcastToAPInt())));
2178 }
2179
2180 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2181 assert(N == 1 && "Invalid number of operands!");
2182 Inst.addOperand(Op: MCOperand::createImm(Val: getBarrier()));
2183 }
2184
2185 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2186 assert(N == 1 && "Invalid number of operands!");
2187 Inst.addOperand(Op: MCOperand::createImm(Val: getBarrier()));
2188 }
2189
2190 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2191 assert(N == 1 && "Invalid number of operands!");
2192
2193 Inst.addOperand(Op: MCOperand::createImm(Val: SysReg.MRSReg));
2194 }
2195
2196 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2197 assert(N == 1 && "Invalid number of operands!");
2198
2199 Inst.addOperand(Op: MCOperand::createImm(Val: SysReg.MSRReg));
2200 }
2201
2202 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2203 assert(N == 1 && "Invalid number of operands!");
2204
2205 Inst.addOperand(Op: MCOperand::createImm(Val: SysReg.PStateField));
2206 }
2207
2208 void addSVCROperands(MCInst &Inst, unsigned N) const {
2209 assert(N == 1 && "Invalid number of operands!");
2210
2211 Inst.addOperand(Op: MCOperand::createImm(Val: SVCR.PStateField));
2212 }
2213
2214 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2215 assert(N == 1 && "Invalid number of operands!");
2216
2217 Inst.addOperand(Op: MCOperand::createImm(Val: SysReg.PStateField));
2218 }
2219
2220 void addSysCROperands(MCInst &Inst, unsigned N) const {
2221 assert(N == 1 && "Invalid number of operands!");
2222 Inst.addOperand(Op: MCOperand::createImm(Val: getSysCR()));
2223 }
2224
2225 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2226 assert(N == 1 && "Invalid number of operands!");
2227 Inst.addOperand(Op: MCOperand::createImm(Val: getPrefetch()));
2228 }
2229
2230 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2231 assert(N == 1 && "Invalid number of operands!");
2232 Inst.addOperand(Op: MCOperand::createImm(Val: getPSBHint()));
2233 }
2234
2235 void addPHintOperands(MCInst &Inst, unsigned N) const {
2236 assert(N == 1 && "Invalid number of operands!");
2237 Inst.addOperand(Op: MCOperand::createImm(Val: getPHint()));
2238 }
2239
2240 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2241 assert(N == 1 && "Invalid number of operands!");
2242 Inst.addOperand(Op: MCOperand::createImm(Val: getBTIHint()));
2243 }
2244
2245 void addCMHPriorityHintOperands(MCInst &Inst, unsigned N) const {
2246 assert(N == 1 && "Invalid number of operands!");
2247 Inst.addOperand(Op: MCOperand::createImm(Val: getCMHPriorityHint()));
2248 }
2249
2250 void addTIndexHintOperands(MCInst &Inst, unsigned N) const {
2251 assert(N == 1 && "Invalid number of operands!");
2252 Inst.addOperand(Op: MCOperand::createImm(Val: getTIndexHint()));
2253 }
2254
2255 void addShifterOperands(MCInst &Inst, unsigned N) const {
2256 assert(N == 1 && "Invalid number of operands!");
2257 unsigned Imm =
2258 AArch64_AM::getShifterImm(ST: getShiftExtendType(), Imm: getShiftExtendAmount());
2259 Inst.addOperand(Op: MCOperand::createImm(Val: Imm));
2260 }
2261
2262 void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const {
2263 assert(N == 1 && "Invalid number of operands!");
2264 unsigned Imm = getShiftExtendAmount();
2265 Inst.addOperand(Op: MCOperand::createImm(Val: Imm));
2266 }
2267
2268 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2269 assert(N == 1 && "Invalid number of operands!");
2270
2271 if (!isScalarReg())
2272 return;
2273
2274 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2275 MCRegister Reg = RI->getRegClass(i: AArch64::GPR64RegClassID)
2276 .getRegister(i: RI->getEncodingValue(Reg: getReg()));
2277 if (Reg != AArch64::XZR)
2278 llvm_unreachable("wrong register");
2279
2280 Inst.addOperand(Op: MCOperand::createReg(Reg: AArch64::XZR));
2281 }
2282
2283 void addExtendOperands(MCInst &Inst, unsigned N) const {
2284 assert(N == 1 && "Invalid number of operands!");
2285 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2286 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2287 unsigned Imm = AArch64_AM::getArithExtendImm(ET, Imm: getShiftExtendAmount());
2288 Inst.addOperand(Op: MCOperand::createImm(Val: Imm));
2289 }
2290
2291 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2292 assert(N == 1 && "Invalid number of operands!");
2293 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2294 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2295 unsigned Imm = AArch64_AM::getArithExtendImm(ET, Imm: getShiftExtendAmount());
2296 Inst.addOperand(Op: MCOperand::createImm(Val: Imm));
2297 }
2298
2299 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2300 assert(N == 2 && "Invalid number of operands!");
2301 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2302 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2303 Inst.addOperand(Op: MCOperand::createImm(Val: IsSigned));
2304 Inst.addOperand(Op: MCOperand::createImm(Val: getShiftExtendAmount() != 0));
2305 }
2306
2307 // For 8-bit load/store instructions with a register offset, both the
2308 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2309 // they're disambiguated by whether the shift was explicit or implicit rather
2310 // than its size.
2311 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2312 assert(N == 2 && "Invalid number of operands!");
2313 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2314 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2315 Inst.addOperand(Op: MCOperand::createImm(Val: IsSigned));
2316 Inst.addOperand(Op: MCOperand::createImm(Val: hasShiftExtendAmount()));
2317 }
2318
2319 template<int Shift>
2320 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2321 assert(N == 1 && "Invalid number of operands!");
2322
2323 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
2324 if (CE) {
2325 uint64_t Value = CE->getValue();
2326 Inst.addOperand(Op: MCOperand::createImm(Val: (Value >> Shift) & 0xffff));
2327 } else {
2328 addExpr(Inst, Expr: getImm());
2329 }
2330 }
2331
2332 template<int Shift>
2333 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2334 assert(N == 1 && "Invalid number of operands!");
2335
2336 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2337 uint64_t Value = CE->getValue();
2338 Inst.addOperand(Op: MCOperand::createImm(Val: (~Value >> Shift) & 0xffff));
2339 }
2340
2341 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2342 assert(N == 1 && "Invalid number of operands!");
2343 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2344 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() / 90));
2345 }
2346
2347 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2348 assert(N == 1 && "Invalid number of operands!");
2349 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2350 Inst.addOperand(Op: MCOperand::createImm(Val: (MCE->getValue() - 90) / 180));
2351 }
2352
2353 void print(raw_ostream &OS, const MCAsmInfo &MAI) const override;
2354
2355 static std::unique_ptr<AArch64Operand>
2356 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2357 auto Op = std::make_unique<AArch64Operand>(args: k_Token, args&: Ctx);
2358 Op->Tok.Data = Str.data();
2359 Op->Tok.Length = Str.size();
2360 Op->Tok.IsSuffix = IsSuffix;
2361 Op->StartLoc = S;
2362 Op->EndLoc = S;
2363 return Op;
2364 }
2365
2366 static std::unique_ptr<AArch64Operand>
2367 CreateReg(MCRegister Reg, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2368 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2369 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2370 unsigned ShiftAmount = 0, unsigned HasExplicitAmount = false) {
2371 auto Op = std::make_unique<AArch64Operand>(args: k_Register, args&: Ctx);
2372 Op->Reg.Reg = Reg;
2373 Op->Reg.Kind = Kind;
2374 Op->Reg.ElementWidth = 0;
2375 Op->Reg.EqualityTy = EqTy;
2376 Op->Reg.ShiftExtend.Type = ExtTy;
2377 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2378 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2379 Op->StartLoc = S;
2380 Op->EndLoc = E;
2381 return Op;
2382 }
2383
2384 static std::unique_ptr<AArch64Operand> CreateVectorReg(
2385 MCRegister Reg, RegKind Kind, unsigned ElementWidth, SMLoc S, SMLoc E,
2386 MCContext &Ctx, AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2387 unsigned ShiftAmount = 0, unsigned HasExplicitAmount = false) {
2388 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2389 Kind == RegKind::SVEPredicateVector ||
2390 Kind == RegKind::SVEPredicateAsCounter) &&
2391 "Invalid vector kind");
2392 auto Op = CreateReg(Reg, Kind, S, E, Ctx, EqTy: EqualsReg, ExtTy, ShiftAmount,
2393 HasExplicitAmount);
2394 Op->Reg.ElementWidth = ElementWidth;
2395 return Op;
2396 }
2397
2398 static std::unique_ptr<AArch64Operand>
2399 CreateVectorList(MCRegister Reg, unsigned Count, unsigned Stride,
2400 unsigned NumElements, unsigned ElementWidth,
2401 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2402 auto Op = std::make_unique<AArch64Operand>(args: k_VectorList, args&: Ctx);
2403 Op->VectorList.Reg = Reg;
2404 Op->VectorList.Count = Count;
2405 Op->VectorList.Stride = Stride;
2406 Op->VectorList.NumElements = NumElements;
2407 Op->VectorList.ElementWidth = ElementWidth;
2408 Op->VectorList.RegisterKind = RegisterKind;
2409 Op->StartLoc = S;
2410 Op->EndLoc = E;
2411 return Op;
2412 }
2413
2414 static std::unique_ptr<AArch64Operand>
2415 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2416 auto Op = std::make_unique<AArch64Operand>(args: k_VectorIndex, args&: Ctx);
2417 Op->VectorIndex.Val = Idx;
2418 Op->StartLoc = S;
2419 Op->EndLoc = E;
2420 return Op;
2421 }
2422
2423 static std::unique_ptr<AArch64Operand>
2424 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2425 auto Op = std::make_unique<AArch64Operand>(args: k_MatrixTileList, args&: Ctx);
2426 Op->MatrixTileList.RegMask = RegMask;
2427 Op->StartLoc = S;
2428 Op->EndLoc = E;
2429 return Op;
2430 }
2431
2432 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2433 const unsigned ElementWidth) {
2434 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2435 RegMap = {
2436 {{0, AArch64::ZAB0},
2437 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2438 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2439 {{8, AArch64::ZAB0},
2440 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2441 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2442 {{16, AArch64::ZAH0},
2443 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2444 {{16, AArch64::ZAH1},
2445 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2446 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2447 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2448 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2449 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2450 };
2451
2452 if (ElementWidth == 64)
2453 OutRegs.insert(V: Reg);
2454 else {
2455 std::vector<unsigned> Regs = RegMap[std::make_pair(x: ElementWidth, y&: Reg)];
2456 assert(!Regs.empty() && "Invalid tile or element width!");
2457 OutRegs.insert_range(R&: Regs);
2458 }
2459 }
2460
2461 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2462 SMLoc E, MCContext &Ctx) {
2463 auto Op = std::make_unique<AArch64Operand>(args: k_Immediate, args&: Ctx);
2464 Op->Imm.Val = Val;
2465 Op->StartLoc = S;
2466 Op->EndLoc = E;
2467 return Op;
2468 }
2469
2470 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2471 unsigned ShiftAmount,
2472 SMLoc S, SMLoc E,
2473 MCContext &Ctx) {
2474 auto Op = std::make_unique<AArch64Operand>(args: k_ShiftedImm, args&: Ctx);
2475 Op->ShiftedImm .Val = Val;
2476 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2477 Op->StartLoc = S;
2478 Op->EndLoc = E;
2479 return Op;
2480 }
2481
2482 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2483 unsigned Last, SMLoc S,
2484 SMLoc E,
2485 MCContext &Ctx) {
2486 auto Op = std::make_unique<AArch64Operand>(args: k_ImmRange, args&: Ctx);
2487 Op->ImmRange.First = First;
2488 Op->ImmRange.Last = Last;
2489 Op->EndLoc = E;
2490 return Op;
2491 }
2492
2493 static std::unique_ptr<AArch64Operand>
2494 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2495 auto Op = std::make_unique<AArch64Operand>(args: k_CondCode, args&: Ctx);
2496 Op->CondCode.Code = Code;
2497 Op->StartLoc = S;
2498 Op->EndLoc = E;
2499 return Op;
2500 }
2501
2502 static std::unique_ptr<AArch64Operand>
2503 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2504 auto Op = std::make_unique<AArch64Operand>(args: k_FPImm, args&: Ctx);
2505 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2506 Op->FPImm.IsExact = IsExact;
2507 Op->StartLoc = S;
2508 Op->EndLoc = S;
2509 return Op;
2510 }
2511
2512 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2513 StringRef Str,
2514 SMLoc S,
2515 MCContext &Ctx,
2516 bool HasnXSModifier) {
2517 auto Op = std::make_unique<AArch64Operand>(args: k_Barrier, args&: Ctx);
2518 Op->Barrier.Val = Val;
2519 Op->Barrier.Data = Str.data();
2520 Op->Barrier.Length = Str.size();
2521 Op->Barrier.HasnXSModifier = HasnXSModifier;
2522 Op->StartLoc = S;
2523 Op->EndLoc = S;
2524 return Op;
2525 }
2526
2527 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2528 uint32_t MRSReg,
2529 uint32_t MSRReg,
2530 uint32_t PStateField,
2531 MCContext &Ctx) {
2532 auto Op = std::make_unique<AArch64Operand>(args: k_SysReg, args&: Ctx);
2533 Op->SysReg.Data = Str.data();
2534 Op->SysReg.Length = Str.size();
2535 Op->SysReg.MRSReg = MRSReg;
2536 Op->SysReg.MSRReg = MSRReg;
2537 Op->SysReg.PStateField = PStateField;
2538 Op->StartLoc = S;
2539 Op->EndLoc = S;
2540 return Op;
2541 }
2542
2543 static std::unique_ptr<AArch64Operand>
2544 CreatePHintInst(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2545 auto Op = std::make_unique<AArch64Operand>(args: k_PHint, args&: Ctx);
2546 Op->PHint.Val = Val;
2547 Op->PHint.Data = Str.data();
2548 Op->PHint.Length = Str.size();
2549 Op->StartLoc = S;
2550 Op->EndLoc = S;
2551 return Op;
2552 }
2553
2554 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2555 SMLoc E, MCContext &Ctx) {
2556 auto Op = std::make_unique<AArch64Operand>(args: k_SysCR, args&: Ctx);
2557 Op->SysCRImm.Val = Val;
2558 Op->StartLoc = S;
2559 Op->EndLoc = E;
2560 return Op;
2561 }
2562
2563 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2564 StringRef Str,
2565 SMLoc S,
2566 MCContext &Ctx) {
2567 auto Op = std::make_unique<AArch64Operand>(args: k_Prefetch, args&: Ctx);
2568 Op->Prefetch.Val = Val;
2569 Op->Barrier.Data = Str.data();
2570 Op->Barrier.Length = Str.size();
2571 Op->StartLoc = S;
2572 Op->EndLoc = S;
2573 return Op;
2574 }
2575
2576 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2577 StringRef Str,
2578 SMLoc S,
2579 MCContext &Ctx) {
2580 auto Op = std::make_unique<AArch64Operand>(args: k_PSBHint, args&: Ctx);
2581 Op->PSBHint.Val = Val;
2582 Op->PSBHint.Data = Str.data();
2583 Op->PSBHint.Length = Str.size();
2584 Op->StartLoc = S;
2585 Op->EndLoc = S;
2586 return Op;
2587 }
2588
2589 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2590 StringRef Str,
2591 SMLoc S,
2592 MCContext &Ctx) {
2593 auto Op = std::make_unique<AArch64Operand>(args: k_BTIHint, args&: Ctx);
2594 Op->BTIHint.Val = Val | 32;
2595 Op->BTIHint.Data = Str.data();
2596 Op->BTIHint.Length = Str.size();
2597 Op->StartLoc = S;
2598 Op->EndLoc = S;
2599 return Op;
2600 }
2601
2602 static std::unique_ptr<AArch64Operand>
2603 CreateCMHPriorityHint(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2604 auto Op = std::make_unique<AArch64Operand>(args: k_CMHPriorityHint, args&: Ctx);
2605 Op->CMHPriorityHint.Val = Val;
2606 Op->CMHPriorityHint.Data = Str.data();
2607 Op->CMHPriorityHint.Length = Str.size();
2608 Op->StartLoc = S;
2609 Op->EndLoc = S;
2610 return Op;
2611 }
2612
2613 static std::unique_ptr<AArch64Operand>
2614 CreateTIndexHint(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2615 auto Op = std::make_unique<AArch64Operand>(args: k_TIndexHint, args&: Ctx);
2616 Op->TIndexHint.Val = Val;
2617 Op->TIndexHint.Data = Str.data();
2618 Op->TIndexHint.Length = Str.size();
2619 Op->StartLoc = S;
2620 Op->EndLoc = S;
2621 return Op;
2622 }
2623
2624 static std::unique_ptr<AArch64Operand>
2625 CreateMatrixRegister(MCRegister Reg, unsigned ElementWidth, MatrixKind Kind,
2626 SMLoc S, SMLoc E, MCContext &Ctx) {
2627 auto Op = std::make_unique<AArch64Operand>(args: k_MatrixRegister, args&: Ctx);
2628 Op->MatrixReg.Reg = Reg;
2629 Op->MatrixReg.ElementWidth = ElementWidth;
2630 Op->MatrixReg.Kind = Kind;
2631 Op->StartLoc = S;
2632 Op->EndLoc = E;
2633 return Op;
2634 }
2635
2636 static std::unique_ptr<AArch64Operand>
2637 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2638 auto Op = std::make_unique<AArch64Operand>(args: k_SVCR, args&: Ctx);
2639 Op->SVCR.PStateField = PStateField;
2640 Op->SVCR.Data = Str.data();
2641 Op->SVCR.Length = Str.size();
2642 Op->StartLoc = S;
2643 Op->EndLoc = S;
2644 return Op;
2645 }
2646
2647 static std::unique_ptr<AArch64Operand>
2648 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2649 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2650 auto Op = std::make_unique<AArch64Operand>(args: k_ShiftExtend, args&: Ctx);
2651 Op->ShiftExtend.Type = ShOp;
2652 Op->ShiftExtend.Amount = Val;
2653 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2654 Op->StartLoc = S;
2655 Op->EndLoc = E;
2656 return Op;
2657 }
2658};
2659
2660} // end anonymous namespace.
2661
2662void AArch64Operand::print(raw_ostream &OS, const MCAsmInfo &MAI) const {
2663 switch (Kind) {
2664 case k_FPImm:
2665 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2666 if (!getFPImmIsExact())
2667 OS << " (inexact)";
2668 OS << ">";
2669 break;
2670 case k_Barrier: {
2671 StringRef Name = getBarrierName();
2672 if (!Name.empty())
2673 OS << "<barrier " << Name << ">";
2674 else
2675 OS << "<barrier invalid #" << getBarrier() << ">";
2676 break;
2677 }
2678 case k_Immediate:
2679 MAI.printExpr(OS, *getImm());
2680 break;
2681 case k_ShiftedImm: {
2682 unsigned Shift = getShiftedImmShift();
2683 OS << "<shiftedimm ";
2684 MAI.printExpr(OS, *getShiftedImmVal());
2685 OS << ", lsl #" << AArch64_AM::getShiftValue(Imm: Shift) << ">";
2686 break;
2687 }
2688 case k_ImmRange: {
2689 OS << "<immrange ";
2690 OS << getFirstImmVal();
2691 OS << ":" << getLastImmVal() << ">";
2692 break;
2693 }
2694 case k_CondCode:
2695 OS << "<condcode " << getCondCode() << ">";
2696 break;
2697 case k_VectorList: {
2698 OS << "<vectorlist ";
2699 MCRegister Reg = getVectorListStart();
2700 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2701 OS << Reg.id() + i * getVectorListStride() << " ";
2702 OS << ">";
2703 break;
2704 }
2705 case k_VectorIndex:
2706 OS << "<vectorindex " << getVectorIndex() << ">";
2707 break;
2708 case k_SysReg:
2709 OS << "<sysreg: " << getSysReg() << '>';
2710 break;
2711 case k_Token:
2712 OS << "'" << getToken() << "'";
2713 break;
2714 case k_SysCR:
2715 OS << "c" << getSysCR();
2716 break;
2717 case k_Prefetch: {
2718 StringRef Name = getPrefetchName();
2719 if (!Name.empty())
2720 OS << "<prfop " << Name << ">";
2721 else
2722 OS << "<prfop invalid #" << getPrefetch() << ">";
2723 break;
2724 }
2725 case k_PSBHint:
2726 OS << getPSBHintName();
2727 break;
2728 case k_PHint:
2729 OS << getPHintName();
2730 break;
2731 case k_BTIHint:
2732 OS << getBTIHintName();
2733 break;
2734 case k_CMHPriorityHint:
2735 OS << getCMHPriorityHintName();
2736 break;
2737 case k_TIndexHint:
2738 OS << getTIndexHintName();
2739 break;
2740 case k_MatrixRegister:
2741 OS << "<matrix " << getMatrixReg().id() << ">";
2742 break;
2743 case k_MatrixTileList: {
2744 OS << "<matrixlist ";
2745 unsigned RegMask = getMatrixTileListRegMask();
2746 unsigned MaxBits = 8;
2747 for (unsigned I = MaxBits; I > 0; --I)
2748 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2749 OS << '>';
2750 break;
2751 }
2752 case k_SVCR: {
2753 OS << getSVCR();
2754 break;
2755 }
2756 case k_Register:
2757 OS << "<register " << getReg().id() << ">";
2758 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2759 break;
2760 [[fallthrough]];
2761 case k_ShiftExtend:
2762 OS << "<" << AArch64_AM::getShiftExtendName(ST: getShiftExtendType()) << " #"
2763 << getShiftExtendAmount();
2764 if (!hasShiftExtendAmount())
2765 OS << "<imp>";
2766 OS << '>';
2767 break;
2768 }
2769}
2770
2771/// @name Auto-generated Match Functions
2772/// {
2773
2774static MCRegister MatchRegisterName(StringRef Name);
2775
2776/// }
2777
2778static unsigned MatchNeonVectorRegName(StringRef Name) {
2779 return StringSwitch<unsigned>(Name.lower())
2780 .Case(S: "v0", Value: AArch64::Q0)
2781 .Case(S: "v1", Value: AArch64::Q1)
2782 .Case(S: "v2", Value: AArch64::Q2)
2783 .Case(S: "v3", Value: AArch64::Q3)
2784 .Case(S: "v4", Value: AArch64::Q4)
2785 .Case(S: "v5", Value: AArch64::Q5)
2786 .Case(S: "v6", Value: AArch64::Q6)
2787 .Case(S: "v7", Value: AArch64::Q7)
2788 .Case(S: "v8", Value: AArch64::Q8)
2789 .Case(S: "v9", Value: AArch64::Q9)
2790 .Case(S: "v10", Value: AArch64::Q10)
2791 .Case(S: "v11", Value: AArch64::Q11)
2792 .Case(S: "v12", Value: AArch64::Q12)
2793 .Case(S: "v13", Value: AArch64::Q13)
2794 .Case(S: "v14", Value: AArch64::Q14)
2795 .Case(S: "v15", Value: AArch64::Q15)
2796 .Case(S: "v16", Value: AArch64::Q16)
2797 .Case(S: "v17", Value: AArch64::Q17)
2798 .Case(S: "v18", Value: AArch64::Q18)
2799 .Case(S: "v19", Value: AArch64::Q19)
2800 .Case(S: "v20", Value: AArch64::Q20)
2801 .Case(S: "v21", Value: AArch64::Q21)
2802 .Case(S: "v22", Value: AArch64::Q22)
2803 .Case(S: "v23", Value: AArch64::Q23)
2804 .Case(S: "v24", Value: AArch64::Q24)
2805 .Case(S: "v25", Value: AArch64::Q25)
2806 .Case(S: "v26", Value: AArch64::Q26)
2807 .Case(S: "v27", Value: AArch64::Q27)
2808 .Case(S: "v28", Value: AArch64::Q28)
2809 .Case(S: "v29", Value: AArch64::Q29)
2810 .Case(S: "v30", Value: AArch64::Q30)
2811 .Case(S: "v31", Value: AArch64::Q31)
2812 .Default(Value: 0);
2813}
2814
2815/// Returns an optional pair of (#elements, element-width) if Suffix
2816/// is a valid vector kind. Where the number of elements in a vector
2817/// or the vector width is implicit or explicitly unknown (but still a
2818/// valid suffix kind), 0 is used.
2819static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2820 RegKind VectorKind) {
2821 std::pair<int, int> Res = {-1, -1};
2822
2823 switch (VectorKind) {
2824 case RegKind::NeonVector:
2825 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2826 .Case(S: "", Value: {0, 0})
2827 .Case(S: ".1d", Value: {1, 64})
2828 .Case(S: ".1q", Value: {1, 128})
2829 // '.2h' needed for fp16 scalar pairwise reductions
2830 .Case(S: ".2h", Value: {2, 16})
2831 .Case(S: ".2b", Value: {2, 8})
2832 .Case(S: ".2s", Value: {2, 32})
2833 .Case(S: ".2d", Value: {2, 64})
2834 // '.4b' is another special case for the ARMv8.2a dot product
2835 // operand
2836 .Case(S: ".4b", Value: {4, 8})
2837 .Case(S: ".4h", Value: {4, 16})
2838 .Case(S: ".4s", Value: {4, 32})
2839 .Case(S: ".8b", Value: {8, 8})
2840 .Case(S: ".8h", Value: {8, 16})
2841 .Case(S: ".16b", Value: {16, 8})
2842 // Accept the width neutral ones, too, for verbose syntax. If
2843 // those aren't used in the right places, the token operand won't
2844 // match so all will work out.
2845 .Case(S: ".b", Value: {0, 8})
2846 .Case(S: ".h", Value: {0, 16})
2847 .Case(S: ".s", Value: {0, 32})
2848 .Case(S: ".d", Value: {0, 64})
2849 .Default(Value: {-1, -1});
2850 break;
2851 case RegKind::SVEPredicateAsCounter:
2852 case RegKind::SVEPredicateVector:
2853 case RegKind::SVEDataVector:
2854 case RegKind::Matrix:
2855 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2856 .Case(S: "", Value: {0, 0})
2857 .Case(S: ".b", Value: {0, 8})
2858 .Case(S: ".h", Value: {0, 16})
2859 .Case(S: ".s", Value: {0, 32})
2860 .Case(S: ".d", Value: {0, 64})
2861 .Case(S: ".q", Value: {0, 128})
2862 .Default(Value: {-1, -1});
2863 break;
2864 default:
2865 llvm_unreachable("Unsupported RegKind");
2866 }
2867
2868 if (Res == std::make_pair(x: -1, y: -1))
2869 return std::nullopt;
2870
2871 return std::optional<std::pair<int, int>>(Res);
2872}
2873
2874static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2875 return parseVectorKind(Suffix, VectorKind).has_value();
2876}
2877
2878static unsigned matchSVEDataVectorRegName(StringRef Name) {
2879 return StringSwitch<unsigned>(Name.lower())
2880 .Case(S: "z0", Value: AArch64::Z0)
2881 .Case(S: "z1", Value: AArch64::Z1)
2882 .Case(S: "z2", Value: AArch64::Z2)
2883 .Case(S: "z3", Value: AArch64::Z3)
2884 .Case(S: "z4", Value: AArch64::Z4)
2885 .Case(S: "z5", Value: AArch64::Z5)
2886 .Case(S: "z6", Value: AArch64::Z6)
2887 .Case(S: "z7", Value: AArch64::Z7)
2888 .Case(S: "z8", Value: AArch64::Z8)
2889 .Case(S: "z9", Value: AArch64::Z9)
2890 .Case(S: "z10", Value: AArch64::Z10)
2891 .Case(S: "z11", Value: AArch64::Z11)
2892 .Case(S: "z12", Value: AArch64::Z12)
2893 .Case(S: "z13", Value: AArch64::Z13)
2894 .Case(S: "z14", Value: AArch64::Z14)
2895 .Case(S: "z15", Value: AArch64::Z15)
2896 .Case(S: "z16", Value: AArch64::Z16)
2897 .Case(S: "z17", Value: AArch64::Z17)
2898 .Case(S: "z18", Value: AArch64::Z18)
2899 .Case(S: "z19", Value: AArch64::Z19)
2900 .Case(S: "z20", Value: AArch64::Z20)
2901 .Case(S: "z21", Value: AArch64::Z21)
2902 .Case(S: "z22", Value: AArch64::Z22)
2903 .Case(S: "z23", Value: AArch64::Z23)
2904 .Case(S: "z24", Value: AArch64::Z24)
2905 .Case(S: "z25", Value: AArch64::Z25)
2906 .Case(S: "z26", Value: AArch64::Z26)
2907 .Case(S: "z27", Value: AArch64::Z27)
2908 .Case(S: "z28", Value: AArch64::Z28)
2909 .Case(S: "z29", Value: AArch64::Z29)
2910 .Case(S: "z30", Value: AArch64::Z30)
2911 .Case(S: "z31", Value: AArch64::Z31)
2912 .Default(Value: 0);
2913}
2914
2915static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2916 return StringSwitch<unsigned>(Name.lower())
2917 .Case(S: "p0", Value: AArch64::P0)
2918 .Case(S: "p1", Value: AArch64::P1)
2919 .Case(S: "p2", Value: AArch64::P2)
2920 .Case(S: "p3", Value: AArch64::P3)
2921 .Case(S: "p4", Value: AArch64::P4)
2922 .Case(S: "p5", Value: AArch64::P5)
2923 .Case(S: "p6", Value: AArch64::P6)
2924 .Case(S: "p7", Value: AArch64::P7)
2925 .Case(S: "p8", Value: AArch64::P8)
2926 .Case(S: "p9", Value: AArch64::P9)
2927 .Case(S: "p10", Value: AArch64::P10)
2928 .Case(S: "p11", Value: AArch64::P11)
2929 .Case(S: "p12", Value: AArch64::P12)
2930 .Case(S: "p13", Value: AArch64::P13)
2931 .Case(S: "p14", Value: AArch64::P14)
2932 .Case(S: "p15", Value: AArch64::P15)
2933 .Default(Value: 0);
2934}
2935
2936static unsigned matchSVEPredicateAsCounterRegName(StringRef Name) {
2937 return StringSwitch<unsigned>(Name.lower())
2938 .Case(S: "pn0", Value: AArch64::PN0)
2939 .Case(S: "pn1", Value: AArch64::PN1)
2940 .Case(S: "pn2", Value: AArch64::PN2)
2941 .Case(S: "pn3", Value: AArch64::PN3)
2942 .Case(S: "pn4", Value: AArch64::PN4)
2943 .Case(S: "pn5", Value: AArch64::PN5)
2944 .Case(S: "pn6", Value: AArch64::PN6)
2945 .Case(S: "pn7", Value: AArch64::PN7)
2946 .Case(S: "pn8", Value: AArch64::PN8)
2947 .Case(S: "pn9", Value: AArch64::PN9)
2948 .Case(S: "pn10", Value: AArch64::PN10)
2949 .Case(S: "pn11", Value: AArch64::PN11)
2950 .Case(S: "pn12", Value: AArch64::PN12)
2951 .Case(S: "pn13", Value: AArch64::PN13)
2952 .Case(S: "pn14", Value: AArch64::PN14)
2953 .Case(S: "pn15", Value: AArch64::PN15)
2954 .Default(Value: 0);
2955}
2956
2957static unsigned matchMatrixTileListRegName(StringRef Name) {
2958 return StringSwitch<unsigned>(Name.lower())
2959 .Case(S: "za0.d", Value: AArch64::ZAD0)
2960 .Case(S: "za1.d", Value: AArch64::ZAD1)
2961 .Case(S: "za2.d", Value: AArch64::ZAD2)
2962 .Case(S: "za3.d", Value: AArch64::ZAD3)
2963 .Case(S: "za4.d", Value: AArch64::ZAD4)
2964 .Case(S: "za5.d", Value: AArch64::ZAD5)
2965 .Case(S: "za6.d", Value: AArch64::ZAD6)
2966 .Case(S: "za7.d", Value: AArch64::ZAD7)
2967 .Case(S: "za0.s", Value: AArch64::ZAS0)
2968 .Case(S: "za1.s", Value: AArch64::ZAS1)
2969 .Case(S: "za2.s", Value: AArch64::ZAS2)
2970 .Case(S: "za3.s", Value: AArch64::ZAS3)
2971 .Case(S: "za0.h", Value: AArch64::ZAH0)
2972 .Case(S: "za1.h", Value: AArch64::ZAH1)
2973 .Case(S: "za0.b", Value: AArch64::ZAB0)
2974 .Default(Value: 0);
2975}
2976
2977static unsigned matchMatrixRegName(StringRef Name) {
2978 return StringSwitch<unsigned>(Name.lower())
2979 .Case(S: "za", Value: AArch64::ZA)
2980 .Case(S: "za0.q", Value: AArch64::ZAQ0)
2981 .Case(S: "za1.q", Value: AArch64::ZAQ1)
2982 .Case(S: "za2.q", Value: AArch64::ZAQ2)
2983 .Case(S: "za3.q", Value: AArch64::ZAQ3)
2984 .Case(S: "za4.q", Value: AArch64::ZAQ4)
2985 .Case(S: "za5.q", Value: AArch64::ZAQ5)
2986 .Case(S: "za6.q", Value: AArch64::ZAQ6)
2987 .Case(S: "za7.q", Value: AArch64::ZAQ7)
2988 .Case(S: "za8.q", Value: AArch64::ZAQ8)
2989 .Case(S: "za9.q", Value: AArch64::ZAQ9)
2990 .Case(S: "za10.q", Value: AArch64::ZAQ10)
2991 .Case(S: "za11.q", Value: AArch64::ZAQ11)
2992 .Case(S: "za12.q", Value: AArch64::ZAQ12)
2993 .Case(S: "za13.q", Value: AArch64::ZAQ13)
2994 .Case(S: "za14.q", Value: AArch64::ZAQ14)
2995 .Case(S: "za15.q", Value: AArch64::ZAQ15)
2996 .Case(S: "za0.d", Value: AArch64::ZAD0)
2997 .Case(S: "za1.d", Value: AArch64::ZAD1)
2998 .Case(S: "za2.d", Value: AArch64::ZAD2)
2999 .Case(S: "za3.d", Value: AArch64::ZAD3)
3000 .Case(S: "za4.d", Value: AArch64::ZAD4)
3001 .Case(S: "za5.d", Value: AArch64::ZAD5)
3002 .Case(S: "za6.d", Value: AArch64::ZAD6)
3003 .Case(S: "za7.d", Value: AArch64::ZAD7)
3004 .Case(S: "za0.s", Value: AArch64::ZAS0)
3005 .Case(S: "za1.s", Value: AArch64::ZAS1)
3006 .Case(S: "za2.s", Value: AArch64::ZAS2)
3007 .Case(S: "za3.s", Value: AArch64::ZAS3)
3008 .Case(S: "za0.h", Value: AArch64::ZAH0)
3009 .Case(S: "za1.h", Value: AArch64::ZAH1)
3010 .Case(S: "za0.b", Value: AArch64::ZAB0)
3011 .Case(S: "za0h.q", Value: AArch64::ZAQ0)
3012 .Case(S: "za1h.q", Value: AArch64::ZAQ1)
3013 .Case(S: "za2h.q", Value: AArch64::ZAQ2)
3014 .Case(S: "za3h.q", Value: AArch64::ZAQ3)
3015 .Case(S: "za4h.q", Value: AArch64::ZAQ4)
3016 .Case(S: "za5h.q", Value: AArch64::ZAQ5)
3017 .Case(S: "za6h.q", Value: AArch64::ZAQ6)
3018 .Case(S: "za7h.q", Value: AArch64::ZAQ7)
3019 .Case(S: "za8h.q", Value: AArch64::ZAQ8)
3020 .Case(S: "za9h.q", Value: AArch64::ZAQ9)
3021 .Case(S: "za10h.q", Value: AArch64::ZAQ10)
3022 .Case(S: "za11h.q", Value: AArch64::ZAQ11)
3023 .Case(S: "za12h.q", Value: AArch64::ZAQ12)
3024 .Case(S: "za13h.q", Value: AArch64::ZAQ13)
3025 .Case(S: "za14h.q", Value: AArch64::ZAQ14)
3026 .Case(S: "za15h.q", Value: AArch64::ZAQ15)
3027 .Case(S: "za0h.d", Value: AArch64::ZAD0)
3028 .Case(S: "za1h.d", Value: AArch64::ZAD1)
3029 .Case(S: "za2h.d", Value: AArch64::ZAD2)
3030 .Case(S: "za3h.d", Value: AArch64::ZAD3)
3031 .Case(S: "za4h.d", Value: AArch64::ZAD4)
3032 .Case(S: "za5h.d", Value: AArch64::ZAD5)
3033 .Case(S: "za6h.d", Value: AArch64::ZAD6)
3034 .Case(S: "za7h.d", Value: AArch64::ZAD7)
3035 .Case(S: "za0h.s", Value: AArch64::ZAS0)
3036 .Case(S: "za1h.s", Value: AArch64::ZAS1)
3037 .Case(S: "za2h.s", Value: AArch64::ZAS2)
3038 .Case(S: "za3h.s", Value: AArch64::ZAS3)
3039 .Case(S: "za0h.h", Value: AArch64::ZAH0)
3040 .Case(S: "za1h.h", Value: AArch64::ZAH1)
3041 .Case(S: "za0h.b", Value: AArch64::ZAB0)
3042 .Case(S: "za0v.q", Value: AArch64::ZAQ0)
3043 .Case(S: "za1v.q", Value: AArch64::ZAQ1)
3044 .Case(S: "za2v.q", Value: AArch64::ZAQ2)
3045 .Case(S: "za3v.q", Value: AArch64::ZAQ3)
3046 .Case(S: "za4v.q", Value: AArch64::ZAQ4)
3047 .Case(S: "za5v.q", Value: AArch64::ZAQ5)
3048 .Case(S: "za6v.q", Value: AArch64::ZAQ6)
3049 .Case(S: "za7v.q", Value: AArch64::ZAQ7)
3050 .Case(S: "za8v.q", Value: AArch64::ZAQ8)
3051 .Case(S: "za9v.q", Value: AArch64::ZAQ9)
3052 .Case(S: "za10v.q", Value: AArch64::ZAQ10)
3053 .Case(S: "za11v.q", Value: AArch64::ZAQ11)
3054 .Case(S: "za12v.q", Value: AArch64::ZAQ12)
3055 .Case(S: "za13v.q", Value: AArch64::ZAQ13)
3056 .Case(S: "za14v.q", Value: AArch64::ZAQ14)
3057 .Case(S: "za15v.q", Value: AArch64::ZAQ15)
3058 .Case(S: "za0v.d", Value: AArch64::ZAD0)
3059 .Case(S: "za1v.d", Value: AArch64::ZAD1)
3060 .Case(S: "za2v.d", Value: AArch64::ZAD2)
3061 .Case(S: "za3v.d", Value: AArch64::ZAD3)
3062 .Case(S: "za4v.d", Value: AArch64::ZAD4)
3063 .Case(S: "za5v.d", Value: AArch64::ZAD5)
3064 .Case(S: "za6v.d", Value: AArch64::ZAD6)
3065 .Case(S: "za7v.d", Value: AArch64::ZAD7)
3066 .Case(S: "za0v.s", Value: AArch64::ZAS0)
3067 .Case(S: "za1v.s", Value: AArch64::ZAS1)
3068 .Case(S: "za2v.s", Value: AArch64::ZAS2)
3069 .Case(S: "za3v.s", Value: AArch64::ZAS3)
3070 .Case(S: "za0v.h", Value: AArch64::ZAH0)
3071 .Case(S: "za1v.h", Value: AArch64::ZAH1)
3072 .Case(S: "za0v.b", Value: AArch64::ZAB0)
3073 .Default(Value: 0);
3074}
3075
3076bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
3077 SMLoc &EndLoc) {
3078 return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess();
3079}
3080
3081ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
3082 SMLoc &EndLoc) {
3083 StartLoc = getLoc();
3084 ParseStatus Res = tryParseScalarRegister(Reg);
3085 EndLoc = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
3086 return Res;
3087}
3088
3089// Matches a register name or register alias previously defined by '.req'
3090MCRegister AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
3091 RegKind Kind) {
3092 MCRegister Reg = MCRegister();
3093 if ((Reg = matchSVEDataVectorRegName(Name)))
3094 return Kind == RegKind::SVEDataVector ? Reg : MCRegister();
3095
3096 if ((Reg = matchSVEPredicateVectorRegName(Name)))
3097 return Kind == RegKind::SVEPredicateVector ? Reg : MCRegister();
3098
3099 if ((Reg = matchSVEPredicateAsCounterRegName(Name)))
3100 return Kind == RegKind::SVEPredicateAsCounter ? Reg : MCRegister();
3101
3102 if ((Reg = MatchNeonVectorRegName(Name)))
3103 return Kind == RegKind::NeonVector ? Reg : MCRegister();
3104
3105 if ((Reg = matchMatrixRegName(Name)))
3106 return Kind == RegKind::Matrix ? Reg : MCRegister();
3107
3108 if (Name.equals_insensitive(RHS: "zt0"))
3109 return Kind == RegKind::LookupTable ? unsigned(AArch64::ZT0) : 0;
3110
3111 // The parsed register must be of RegKind Scalar
3112 if ((Reg = MatchRegisterName(Name)))
3113 return (Kind == RegKind::Scalar) ? Reg : MCRegister();
3114
3115 if (!Reg) {
3116 // Handle a few common aliases of registers.
3117 if (MCRegister Reg = StringSwitch<unsigned>(Name.lower())
3118 .Case(S: "fp", Value: AArch64::FP)
3119 .Case(S: "lr", Value: AArch64::LR)
3120 .Case(S: "x31", Value: AArch64::XZR)
3121 .Case(S: "w31", Value: AArch64::WZR)
3122 .Default(Value: 0))
3123 return Kind == RegKind::Scalar ? Reg : MCRegister();
3124
3125 // Check for aliases registered via .req. Canonicalize to lower case.
3126 // That's more consistent since register names are case insensitive, and
3127 // it's how the original entry was passed in from MC/MCParser/AsmParser.
3128 auto Entry = RegisterReqs.find(Key: Name.lower());
3129 if (Entry == RegisterReqs.end())
3130 return MCRegister();
3131
3132 // set Reg if the match is the right kind of register
3133 if (Kind == Entry->getValue().first)
3134 Reg = Entry->getValue().second;
3135 }
3136 return Reg;
3137}
3138
3139unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
3140 switch (K) {
3141 case RegKind::Scalar:
3142 case RegKind::NeonVector:
3143 case RegKind::SVEDataVector:
3144 return 32;
3145 case RegKind::Matrix:
3146 case RegKind::SVEPredicateVector:
3147 case RegKind::SVEPredicateAsCounter:
3148 return 16;
3149 case RegKind::LookupTable:
3150 return 1;
3151 }
3152 llvm_unreachable("Unsupported RegKind");
3153}
3154
3155/// tryParseScalarRegister - Try to parse a register name. The token must be an
3156/// Identifier when called, and if it is a register name the token is eaten and
3157/// the register is added to the operand list.
3158ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
3159 const AsmToken &Tok = getTok();
3160 if (Tok.isNot(K: AsmToken::Identifier))
3161 return ParseStatus::NoMatch;
3162
3163 std::string lowerCase = Tok.getString().lower();
3164 MCRegister Reg = matchRegisterNameAlias(Name: lowerCase, Kind: RegKind::Scalar);
3165 if (!Reg)
3166 return ParseStatus::NoMatch;
3167
3168 RegNum = Reg;
3169 Lex(); // Eat identifier token.
3170 return ParseStatus::Success;
3171}
3172
3173/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
3174ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
3175 SMLoc S = getLoc();
3176
3177 if (getTok().isNot(K: AsmToken::Identifier))
3178 return Error(L: S, Msg: "Expected cN operand where 0 <= N <= 15");
3179
3180 StringRef Tok = getTok().getIdentifier();
3181 if (Tok[0] != 'c' && Tok[0] != 'C')
3182 return Error(L: S, Msg: "Expected cN operand where 0 <= N <= 15");
3183
3184 uint32_t CRNum;
3185 bool BadNum = Tok.drop_front().getAsInteger(Radix: 10, Result&: CRNum);
3186 if (BadNum || CRNum > 15)
3187 return Error(L: S, Msg: "Expected cN operand where 0 <= N <= 15");
3188
3189 Lex(); // Eat identifier token.
3190 Operands.push_back(
3191 Elt: AArch64Operand::CreateSysCR(Val: CRNum, S, E: getLoc(), Ctx&: getContext()));
3192 return ParseStatus::Success;
3193}
3194
3195// Either an identifier for named values or a 6-bit immediate.
3196ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
3197 SMLoc S = getLoc();
3198 const AsmToken &Tok = getTok();
3199
3200 unsigned MaxVal = 63;
3201
3202 // Immediate case, with optional leading hash:
3203 if (parseOptionalToken(T: AsmToken::Hash) ||
3204 Tok.is(K: AsmToken::Integer)) {
3205 const MCExpr *ImmVal;
3206 if (getParser().parseExpression(Res&: ImmVal))
3207 return ParseStatus::Failure;
3208
3209 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
3210 if (!MCE)
3211 return TokError(Msg: "immediate value expected for prefetch operand");
3212 unsigned prfop = MCE->getValue();
3213 if (prfop > MaxVal)
3214 return TokError(Msg: "prefetch operand out of range, [0," + utostr(X: MaxVal) +
3215 "] expected");
3216
3217 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(Encoding: MCE->getValue());
3218 Operands.push_back(Elt: AArch64Operand::CreatePrefetch(
3219 Val: prfop, Str: RPRFM ? RPRFM->Name : "", S, Ctx&: getContext()));
3220 return ParseStatus::Success;
3221 }
3222
3223 if (Tok.isNot(K: AsmToken::Identifier))
3224 return TokError(Msg: "prefetch hint expected");
3225
3226 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Name: Tok.getString());
3227 if (!RPRFM)
3228 return TokError(Msg: "prefetch hint expected");
3229
3230 Operands.push_back(Elt: AArch64Operand::CreatePrefetch(
3231 Val: RPRFM->Encoding, Str: Tok.getString(), S, Ctx&: getContext()));
3232 Lex(); // Eat identifier token.
3233 return ParseStatus::Success;
3234}
3235
3236/// tryParsePrefetch - Try to parse a prefetch operand.
3237template <bool IsSVEPrefetch>
3238ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3239 SMLoc S = getLoc();
3240 const AsmToken &Tok = getTok();
3241
3242 auto LookupByName = [](StringRef N) {
3243 if (IsSVEPrefetch) {
3244 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(Name: N))
3245 return std::optional<unsigned>(Res->Encoding);
3246 } else if (auto Res = AArch64PRFM::lookupPRFMByName(Name: N))
3247 return std::optional<unsigned>(Res->Encoding);
3248 return std::optional<unsigned>();
3249 };
3250
3251 auto LookupByEncoding = [](unsigned E) {
3252 if (IsSVEPrefetch) {
3253 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(Encoding: E))
3254 return std::optional<StringRef>(Res->Name);
3255 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(Encoding: E))
3256 return std::optional<StringRef>(Res->Name);
3257 return std::optional<StringRef>();
3258 };
3259 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3260
3261 // Either an identifier for named values or a 5-bit immediate.
3262 // Eat optional hash.
3263 if (parseOptionalToken(T: AsmToken::Hash) ||
3264 Tok.is(K: AsmToken::Integer)) {
3265 const MCExpr *ImmVal;
3266 if (getParser().parseExpression(Res&: ImmVal))
3267 return ParseStatus::Failure;
3268
3269 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
3270 if (!MCE)
3271 return TokError(Msg: "immediate value expected for prefetch operand");
3272 unsigned prfop = MCE->getValue();
3273 if (prfop > MaxVal)
3274 return TokError(Msg: "prefetch operand out of range, [0," + utostr(X: MaxVal) +
3275 "] expected");
3276
3277 auto PRFM = LookupByEncoding(MCE->getValue());
3278 Operands.push_back(AArch64Operand::CreatePrefetch(Val: prfop, Str: PRFM.value_or(""),
3279 S, Ctx&: getContext()));
3280 return ParseStatus::Success;
3281 }
3282
3283 if (Tok.isNot(K: AsmToken::Identifier))
3284 return TokError(Msg: "prefetch hint expected");
3285
3286 auto PRFM = LookupByName(Tok.getString());
3287 if (!PRFM)
3288 return TokError(Msg: "prefetch hint expected");
3289
3290 Operands.push_back(AArch64Operand::CreatePrefetch(
3291 Val: *PRFM, Str: Tok.getString(), S, Ctx&: getContext()));
3292 Lex(); // Eat identifier token.
3293 return ParseStatus::Success;
3294}
3295
3296/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3297ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3298 SMLoc S = getLoc();
3299 const AsmToken &Tok = getTok();
3300 if (Tok.isNot(K: AsmToken::Identifier))
3301 return TokError(Msg: "invalid operand for instruction");
3302
3303 auto PSB = AArch64PSBHint::lookupPSBByName(Name: Tok.getString());
3304 if (!PSB)
3305 return TokError(Msg: "invalid operand for instruction");
3306
3307 Operands.push_back(Elt: AArch64Operand::CreatePSBHint(
3308 Val: PSB->Encoding, Str: Tok.getString(), S, Ctx&: getContext()));
3309 Lex(); // Eat identifier token.
3310 return ParseStatus::Success;
3311}
3312
3313ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3314 SMLoc StartLoc = getLoc();
3315
3316 MCRegister RegNum;
3317
3318 // The case where xzr, xzr is not present is handled by an InstAlias.
3319
3320 auto RegTok = getTok(); // in case we need to backtrack
3321 if (!tryParseScalarRegister(RegNum).isSuccess())
3322 return ParseStatus::NoMatch;
3323
3324 if (RegNum != AArch64::XZR) {
3325 getLexer().UnLex(Token: RegTok);
3326 return ParseStatus::NoMatch;
3327 }
3328
3329 if (parseComma())
3330 return ParseStatus::Failure;
3331
3332 if (!tryParseScalarRegister(RegNum).isSuccess())
3333 return TokError(Msg: "expected register operand");
3334
3335 if (RegNum != AArch64::XZR)
3336 return TokError(Msg: "xzr must be followed by xzr");
3337
3338 // We need to push something, since we claim this is an operand in .td.
3339 // See also AArch64AsmParser::parseKeywordOperand.
3340 Operands.push_back(Elt: AArch64Operand::CreateReg(
3341 Reg: RegNum, Kind: RegKind::Scalar, S: StartLoc, E: getLoc(), Ctx&: getContext()));
3342
3343 return ParseStatus::Success;
3344}
3345
3346/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3347ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3348 SMLoc S = getLoc();
3349 const AsmToken &Tok = getTok();
3350 if (Tok.isNot(K: AsmToken::Identifier))
3351 return TokError(Msg: "invalid operand for instruction");
3352
3353 auto BTI = AArch64BTIHint::lookupBTIByName(Name: Tok.getString());
3354 if (!BTI)
3355 return TokError(Msg: "invalid operand for instruction");
3356
3357 Operands.push_back(Elt: AArch64Operand::CreateBTIHint(
3358 Val: BTI->Encoding, Str: Tok.getString(), S, Ctx&: getContext()));
3359 Lex(); // Eat identifier token.
3360 return ParseStatus::Success;
3361}
3362
3363/// tryParseCMHPriorityHint - Try to parse a CMHPriority operand
3364ParseStatus AArch64AsmParser::tryParseCMHPriorityHint(OperandVector &Operands) {
3365 SMLoc S = getLoc();
3366 const AsmToken &Tok = getTok();
3367 if (Tok.isNot(K: AsmToken::Identifier))
3368 return TokError(Msg: "invalid operand for instruction");
3369
3370 auto CMHPriority =
3371 AArch64CMHPriorityHint::lookupCMHPriorityHintByName(Name: Tok.getString());
3372 if (!CMHPriority)
3373 return TokError(Msg: "invalid operand for instruction");
3374
3375 Operands.push_back(Elt: AArch64Operand::CreateCMHPriorityHint(
3376 Val: CMHPriority->Encoding, Str: Tok.getString(), S, Ctx&: getContext()));
3377 Lex(); // Eat identifier token.
3378 return ParseStatus::Success;
3379}
3380
3381/// tryParseTIndexHint - Try to parse a TIndex operand
3382ParseStatus AArch64AsmParser::tryParseTIndexHint(OperandVector &Operands) {
3383 SMLoc S = getLoc();
3384 const AsmToken &Tok = getTok();
3385 if (Tok.isNot(K: AsmToken::Identifier))
3386 return TokError(Msg: "invalid operand for instruction");
3387
3388 auto TIndex = AArch64TIndexHint::lookupTIndexByName(Name: Tok.getString());
3389 if (!TIndex)
3390 return TokError(Msg: "invalid operand for instruction");
3391
3392 Operands.push_back(Elt: AArch64Operand::CreateTIndexHint(
3393 Val: TIndex->Encoding, Str: Tok.getString(), S, Ctx&: getContext()));
3394 Lex(); // Eat identifier token.
3395 return ParseStatus::Success;
3396}
3397
3398/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3399/// instruction.
3400ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3401 SMLoc S = getLoc();
3402 const MCExpr *Expr = nullptr;
3403
3404 if (getTok().is(K: AsmToken::Hash)) {
3405 Lex(); // Eat hash token.
3406 }
3407
3408 if (parseSymbolicImmVal(ImmVal&: Expr))
3409 return ParseStatus::Failure;
3410
3411 AArch64::Specifier ELFSpec;
3412 AArch64::Specifier DarwinSpec;
3413 int64_t Addend;
3414 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3415 if (DarwinSpec == AArch64::S_None && ELFSpec == AArch64::S_INVALID) {
3416 // No modifier was specified at all; this is the syntax for an ELF basic
3417 // ADRP relocation (unfortunately).
3418 Expr =
3419 MCSpecifierExpr::create(Expr, S: AArch64::S_ABS_PAGE, Ctx&: getContext(), Loc: S);
3420 } else if ((DarwinSpec == AArch64::S_MACHO_GOTPAGE ||
3421 DarwinSpec == AArch64::S_MACHO_TLVPPAGE) &&
3422 Addend != 0) {
3423 return Error(L: S, Msg: "gotpage label reference not allowed an addend");
3424 } else if (DarwinSpec != AArch64::S_MACHO_PAGE &&
3425 DarwinSpec != AArch64::S_MACHO_GOTPAGE &&
3426 DarwinSpec != AArch64::S_MACHO_TLVPPAGE &&
3427 ELFSpec != AArch64::S_ABS_PAGE_NC &&
3428 ELFSpec != AArch64::S_GOT_PAGE &&
3429 ELFSpec != AArch64::S_GOT_AUTH_PAGE &&
3430 ELFSpec != AArch64::S_GOT_PAGE_LO15 &&
3431 ELFSpec != AArch64::S_GOTTPREL_PAGE &&
3432 ELFSpec != AArch64::S_TLSDESC_PAGE &&
3433 ELFSpec != AArch64::S_TLSDESC_AUTH_PAGE) {
3434 // The operand must be an @page or @gotpage qualified symbolref.
3435 return Error(L: S, Msg: "page or gotpage label reference expected");
3436 }
3437 }
3438
3439 // We have either a label reference possibly with addend or an immediate. The
3440 // addend is a raw value here. The linker will adjust it to only reference the
3441 // page.
3442 SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
3443 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: Expr, S, E, Ctx&: getContext()));
3444
3445 return ParseStatus::Success;
3446}
3447
3448/// tryParseAdrLabel - Parse and validate a source label for the ADR
3449/// instruction.
3450ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3451 SMLoc S = getLoc();
3452 const MCExpr *Expr = nullptr;
3453
3454 // Leave anything with a bracket to the default for SVE
3455 if (getTok().is(K: AsmToken::LBrac))
3456 return ParseStatus::NoMatch;
3457
3458 if (getTok().is(K: AsmToken::Hash))
3459 Lex(); // Eat hash token.
3460
3461 if (parseSymbolicImmVal(ImmVal&: Expr))
3462 return ParseStatus::Failure;
3463
3464 AArch64::Specifier ELFSpec;
3465 AArch64::Specifier DarwinSpec;
3466 int64_t Addend;
3467 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3468 if (DarwinSpec == AArch64::S_None && ELFSpec == AArch64::S_INVALID) {
3469 // No modifier was specified at all; this is the syntax for an ELF basic
3470 // ADR relocation (unfortunately).
3471 Expr = MCSpecifierExpr::create(Expr, S: AArch64::S_ABS, Ctx&: getContext(), Loc: S);
3472 } else if (ELFSpec != AArch64::S_GOT_AUTH_PAGE) {
3473 // For tiny code model, we use :got_auth: operator to fill 21-bit imm of
3474 // adr. It's not actually GOT entry page address but the GOT address
3475 // itself - we just share the same variant kind with :got_auth: operator
3476 // applied for adrp.
3477 // TODO: can we somehow get current TargetMachine object to call
3478 // getCodeModel() on it to ensure we are using tiny code model?
3479 return Error(L: S, Msg: "unexpected adr label");
3480 }
3481 }
3482
3483 SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
3484 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: Expr, S, E, Ctx&: getContext()));
3485 return ParseStatus::Success;
3486}
3487
3488/// tryParseFPImm - A floating point immediate expression operand.
3489template <bool AddFPZeroAsLiteral>
3490ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3491 SMLoc S = getLoc();
3492
3493 bool Hash = parseOptionalToken(T: AsmToken::Hash);
3494
3495 // Handle negation, as that still comes through as a separate token.
3496 bool isNegative = parseOptionalToken(T: AsmToken::Minus);
3497
3498 const AsmToken &Tok = getTok();
3499 if (!Tok.is(K: AsmToken::Real) && !Tok.is(K: AsmToken::Integer)) {
3500 if (!Hash)
3501 return ParseStatus::NoMatch;
3502 return TokError(Msg: "invalid floating point immediate");
3503 }
3504
3505 // Parse hexadecimal representation.
3506 if (Tok.is(K: AsmToken::Integer) && Tok.getString().starts_with(Prefix: "0x")) {
3507 if (Tok.getIntVal() > 255 || isNegative)
3508 return TokError(Msg: "encoded floating point value out of range");
3509
3510 APFloat F((double)AArch64_AM::getFPImmFloat(Imm: Tok.getIntVal()));
3511 Operands.push_back(
3512 Elt: AArch64Operand::CreateFPImm(Val: F, IsExact: true, S, Ctx&: getContext()));
3513 } else {
3514 // Parse FP representation.
3515 APFloat RealVal(APFloat::IEEEdouble());
3516 auto StatusOrErr =
3517 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3518 if (errorToBool(Err: StatusOrErr.takeError()))
3519 return TokError(Msg: "invalid floating point representation");
3520
3521 if (isNegative)
3522 RealVal.changeSign();
3523
3524 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3525 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: "#0", S, Ctx&: getContext()));
3526 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: ".0", S, Ctx&: getContext()));
3527 } else
3528 Operands.push_back(Elt: AArch64Operand::CreateFPImm(
3529 Val: RealVal, IsExact: *StatusOrErr == APFloat::opOK, S, Ctx&: getContext()));
3530 }
3531
3532 Lex(); // Eat the token.
3533
3534 return ParseStatus::Success;
3535}
3536
3537/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3538/// a shift suffix, for example '#1, lsl #12'.
3539ParseStatus
3540AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3541 SMLoc S = getLoc();
3542
3543 if (getTok().is(K: AsmToken::Hash))
3544 Lex(); // Eat '#'
3545 else if (getTok().isNot(K: AsmToken::Integer))
3546 // Operand should start from # or should be integer, emit error otherwise.
3547 return ParseStatus::NoMatch;
3548
3549 if (getTok().is(K: AsmToken::Integer) &&
3550 getLexer().peekTok().is(K: AsmToken::Colon))
3551 return tryParseImmRange(Operands);
3552
3553 const MCExpr *Imm = nullptr;
3554 if (parseSymbolicImmVal(ImmVal&: Imm))
3555 return ParseStatus::Failure;
3556 else if (getTok().isNot(K: AsmToken::Comma)) {
3557 Operands.push_back(
3558 Elt: AArch64Operand::CreateImm(Val: Imm, S, E: getLoc(), Ctx&: getContext()));
3559 return ParseStatus::Success;
3560 }
3561
3562 // Eat ','
3563 Lex();
3564 StringRef VecGroup;
3565 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3566 Operands.push_back(
3567 Elt: AArch64Operand::CreateImm(Val: Imm, S, E: getLoc(), Ctx&: getContext()));
3568 Operands.push_back(
3569 Elt: AArch64Operand::CreateToken(Str: VecGroup, S: getLoc(), Ctx&: getContext()));
3570 return ParseStatus::Success;
3571 }
3572
3573 // The optional operand must be "lsl #N" where N is non-negative.
3574 if (!getTok().is(K: AsmToken::Identifier) ||
3575 !getTok().getIdentifier().equals_insensitive(RHS: "lsl"))
3576 return Error(L: getLoc(), Msg: "only 'lsl #+N' valid after immediate");
3577
3578 // Eat 'lsl'
3579 Lex();
3580
3581 parseOptionalToken(T: AsmToken::Hash);
3582
3583 if (getTok().isNot(K: AsmToken::Integer))
3584 return Error(L: getLoc(), Msg: "only 'lsl #+N' valid after immediate");
3585
3586 int64_t ShiftAmount = getTok().getIntVal();
3587
3588 if (ShiftAmount < 0)
3589 return Error(L: getLoc(), Msg: "positive shift amount required");
3590 Lex(); // Eat the number
3591
3592 // Just in case the optional lsl #0 is used for immediates other than zero.
3593 if (ShiftAmount == 0 && Imm != nullptr) {
3594 Operands.push_back(
3595 Elt: AArch64Operand::CreateImm(Val: Imm, S, E: getLoc(), Ctx&: getContext()));
3596 return ParseStatus::Success;
3597 }
3598
3599 Operands.push_back(Elt: AArch64Operand::CreateShiftedImm(Val: Imm, ShiftAmount, S,
3600 E: getLoc(), Ctx&: getContext()));
3601 return ParseStatus::Success;
3602}
3603
3604/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3605/// suggestion to help common typos.
3606AArch64CC::CondCode
3607AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3608 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3609 .Case(S: "eq", Value: AArch64CC::EQ)
3610 .Case(S: "ne", Value: AArch64CC::NE)
3611 .Case(S: "cs", Value: AArch64CC::HS)
3612 .Case(S: "hs", Value: AArch64CC::HS)
3613 .Case(S: "cc", Value: AArch64CC::LO)
3614 .Case(S: "lo", Value: AArch64CC::LO)
3615 .Case(S: "mi", Value: AArch64CC::MI)
3616 .Case(S: "pl", Value: AArch64CC::PL)
3617 .Case(S: "vs", Value: AArch64CC::VS)
3618 .Case(S: "vc", Value: AArch64CC::VC)
3619 .Case(S: "hi", Value: AArch64CC::HI)
3620 .Case(S: "ls", Value: AArch64CC::LS)
3621 .Case(S: "ge", Value: AArch64CC::GE)
3622 .Case(S: "lt", Value: AArch64CC::LT)
3623 .Case(S: "gt", Value: AArch64CC::GT)
3624 .Case(S: "le", Value: AArch64CC::LE)
3625 .Case(S: "al", Value: AArch64CC::AL)
3626 .Case(S: "nv", Value: AArch64CC::NV)
3627 // SVE condition code aliases:
3628 .Case(S: "none", Value: AArch64CC::EQ)
3629 .Case(S: "any", Value: AArch64CC::NE)
3630 .Case(S: "nlast", Value: AArch64CC::HS)
3631 .Case(S: "last", Value: AArch64CC::LO)
3632 .Case(S: "first", Value: AArch64CC::MI)
3633 .Case(S: "nfrst", Value: AArch64CC::PL)
3634 .Case(S: "pmore", Value: AArch64CC::HI)
3635 .Case(S: "plast", Value: AArch64CC::LS)
3636 .Case(S: "tcont", Value: AArch64CC::GE)
3637 .Case(S: "tstop", Value: AArch64CC::LT)
3638 .Default(Value: AArch64CC::Invalid);
3639
3640 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3641 Suggestion = "nfrst";
3642
3643 return CC;
3644}
3645
3646/// parseCondCode - Parse a Condition Code operand.
3647bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3648 bool invertCondCode) {
3649 SMLoc S = getLoc();
3650 const AsmToken &Tok = getTok();
3651 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3652
3653 StringRef Cond = Tok.getString();
3654 std::string Suggestion;
3655 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3656 if (CC == AArch64CC::Invalid) {
3657 std::string Msg = "invalid condition code";
3658 if (!Suggestion.empty())
3659 Msg += ", did you mean " + Suggestion + "?";
3660 return TokError(Msg);
3661 }
3662 Lex(); // Eat identifier token.
3663
3664 if (invertCondCode) {
3665 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3666 return TokError(Msg: "condition codes AL and NV are invalid for this instruction");
3667 CC = AArch64CC::getInvertedCondCode(Code: AArch64CC::CondCode(CC));
3668 }
3669
3670 Operands.push_back(
3671 Elt: AArch64Operand::CreateCondCode(Code: CC, S, E: getLoc(), Ctx&: getContext()));
3672 return false;
3673}
3674
3675ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3676 const AsmToken &Tok = getTok();
3677 SMLoc S = getLoc();
3678
3679 if (Tok.isNot(K: AsmToken::Identifier))
3680 return TokError(Msg: "invalid operand for instruction");
3681
3682 unsigned PStateImm = -1;
3683 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Name: Tok.getString());
3684 if (!SVCR)
3685 return ParseStatus::NoMatch;
3686 if (SVCR->haveFeatures(ActiveFeatures: getSTI().getFeatureBits()))
3687 PStateImm = SVCR->Encoding;
3688
3689 Operands.push_back(
3690 Elt: AArch64Operand::CreateSVCR(PStateField: PStateImm, Str: Tok.getString(), S, Ctx&: getContext()));
3691 Lex(); // Eat identifier token.
3692 return ParseStatus::Success;
3693}
3694
3695ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3696 const AsmToken &Tok = getTok();
3697 SMLoc S = getLoc();
3698
3699 StringRef Name = Tok.getString();
3700
3701 if (Name.equals_insensitive(RHS: "za") || Name.starts_with_insensitive(Prefix: "za.")) {
3702 Lex(); // eat "za[.(b|h|s|d)]"
3703 unsigned ElementWidth = 0;
3704 auto DotPosition = Name.find(C: '.');
3705 if (DotPosition != StringRef::npos) {
3706 const auto &KindRes =
3707 parseVectorKind(Suffix: Name.drop_front(N: DotPosition), VectorKind: RegKind::Matrix);
3708 if (!KindRes)
3709 return TokError(
3710 Msg: "Expected the register to be followed by element width suffix");
3711 ElementWidth = KindRes->second;
3712 }
3713 Operands.push_back(Elt: AArch64Operand::CreateMatrixRegister(
3714 Reg: AArch64::ZA, ElementWidth, Kind: MatrixKind::Array, S, E: getLoc(),
3715 Ctx&: getContext()));
3716 if (getLexer().is(K: AsmToken::LBrac)) {
3717 // There's no comma after matrix operand, so we can parse the next operand
3718 // immediately.
3719 if (parseOperand(Operands, isCondCode: false, invertCondCode: false))
3720 return ParseStatus::NoMatch;
3721 }
3722 return ParseStatus::Success;
3723 }
3724
3725 // Try to parse matrix register.
3726 MCRegister Reg = matchRegisterNameAlias(Name, Kind: RegKind::Matrix);
3727 if (!Reg)
3728 return ParseStatus::NoMatch;
3729
3730 size_t DotPosition = Name.find(C: '.');
3731 assert(DotPosition != StringRef::npos && "Unexpected register");
3732
3733 StringRef Head = Name.take_front(N: DotPosition);
3734 StringRef Tail = Name.drop_front(N: DotPosition);
3735 StringRef RowOrColumn = Head.take_back();
3736
3737 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3738 .Case(S: "h", Value: MatrixKind::Row)
3739 .Case(S: "v", Value: MatrixKind::Col)
3740 .Default(Value: MatrixKind::Tile);
3741
3742 // Next up, parsing the suffix
3743 const auto &KindRes = parseVectorKind(Suffix: Tail, VectorKind: RegKind::Matrix);
3744 if (!KindRes)
3745 return TokError(
3746 Msg: "Expected the register to be followed by element width suffix");
3747 unsigned ElementWidth = KindRes->second;
3748
3749 Lex();
3750
3751 Operands.push_back(Elt: AArch64Operand::CreateMatrixRegister(
3752 Reg, ElementWidth, Kind, S, E: getLoc(), Ctx&: getContext()));
3753
3754 if (getLexer().is(K: AsmToken::LBrac)) {
3755 // There's no comma after matrix operand, so we can parse the next operand
3756 // immediately.
3757 if (parseOperand(Operands, isCondCode: false, invertCondCode: false))
3758 return ParseStatus::NoMatch;
3759 }
3760 return ParseStatus::Success;
3761}
3762
3763/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3764/// them if present.
3765ParseStatus
3766AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3767 const AsmToken &Tok = getTok();
3768 std::string LowerID = Tok.getString().lower();
3769 AArch64_AM::ShiftExtendType ShOp =
3770 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3771 .Case(S: "lsl", Value: AArch64_AM::LSL)
3772 .Case(S: "lsr", Value: AArch64_AM::LSR)
3773 .Case(S: "asr", Value: AArch64_AM::ASR)
3774 .Case(S: "ror", Value: AArch64_AM::ROR)
3775 .Case(S: "msl", Value: AArch64_AM::MSL)
3776 .Case(S: "uxtb", Value: AArch64_AM::UXTB)
3777 .Case(S: "uxth", Value: AArch64_AM::UXTH)
3778 .Case(S: "uxtw", Value: AArch64_AM::UXTW)
3779 .Case(S: "uxtx", Value: AArch64_AM::UXTX)
3780 .Case(S: "sxtb", Value: AArch64_AM::SXTB)
3781 .Case(S: "sxth", Value: AArch64_AM::SXTH)
3782 .Case(S: "sxtw", Value: AArch64_AM::SXTW)
3783 .Case(S: "sxtx", Value: AArch64_AM::SXTX)
3784 .Default(Value: AArch64_AM::InvalidShiftExtend);
3785
3786 if (ShOp == AArch64_AM::InvalidShiftExtend)
3787 return ParseStatus::NoMatch;
3788
3789 SMLoc S = Tok.getLoc();
3790 Lex();
3791
3792 bool Hash = parseOptionalToken(T: AsmToken::Hash);
3793
3794 if (!Hash && getLexer().isNot(K: AsmToken::Integer)) {
3795 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3796 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3797 ShOp == AArch64_AM::MSL) {
3798 // We expect a number here.
3799 return TokError(Msg: "expected #imm after shift specifier");
3800 }
3801
3802 // "extend" type operations don't need an immediate, #0 is implicit.
3803 SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
3804 Operands.push_back(
3805 Elt: AArch64Operand::CreateShiftExtend(ShOp, Val: 0, HasExplicitAmount: false, S, E, Ctx&: getContext()));
3806 return ParseStatus::Success;
3807 }
3808
3809 // Make sure we do actually have a number, identifier or a parenthesized
3810 // expression.
3811 SMLoc E = getLoc();
3812 if (!getTok().is(K: AsmToken::Integer) && !getTok().is(K: AsmToken::LParen) &&
3813 !getTok().is(K: AsmToken::Identifier))
3814 return Error(L: E, Msg: "expected integer shift amount");
3815
3816 const MCExpr *ImmVal;
3817 if (getParser().parseExpression(Res&: ImmVal))
3818 return ParseStatus::Failure;
3819
3820 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
3821 if (!MCE)
3822 return Error(L: E, Msg: "expected constant '#imm' after shift specifier");
3823
3824 E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
3825 Operands.push_back(Elt: AArch64Operand::CreateShiftExtend(
3826 ShOp, Val: MCE->getValue(), HasExplicitAmount: true, S, E, Ctx&: getContext()));
3827 return ParseStatus::Success;
3828}
3829
3830static const struct Extension {
3831 const char *Name;
3832 const FeatureBitset Features;
3833} ExtensionMap[] = {
3834 {.Name: "crc", .Features: {AArch64::FeatureCRC}},
3835 {.Name: "sm4", .Features: {AArch64::FeatureSM4}},
3836 {.Name: "sha3", .Features: {AArch64::FeatureSHA3}},
3837 {.Name: "sha2", .Features: {AArch64::FeatureSHA2}},
3838 {.Name: "aes", .Features: {AArch64::FeatureAES}},
3839 {.Name: "crypto", .Features: {AArch64::FeatureCrypto}},
3840 {.Name: "fp", .Features: {AArch64::FeatureFPARMv8}},
3841 {.Name: "simd", .Features: {AArch64::FeatureNEON}},
3842 {.Name: "ras", .Features: {AArch64::FeatureRAS}},
3843 {.Name: "rasv2", .Features: {AArch64::FeatureRASv2}},
3844 {.Name: "lse", .Features: {AArch64::FeatureLSE}},
3845 {.Name: "predres", .Features: {AArch64::FeaturePredRes}},
3846 {.Name: "predres2", .Features: {AArch64::FeatureSPECRES2}},
3847 {.Name: "ccdp", .Features: {AArch64::FeatureCacheDeepPersist}},
3848 {.Name: "mte", .Features: {AArch64::FeatureMTE}},
3849 {.Name: "memtag", .Features: {AArch64::FeatureMTE}},
3850 {.Name: "tlb-rmi", .Features: {AArch64::FeatureTLB_RMI}},
3851 {.Name: "pan", .Features: {AArch64::FeaturePAN}},
3852 {.Name: "pan-rwv", .Features: {AArch64::FeaturePAN_RWV}},
3853 {.Name: "ccpp", .Features: {AArch64::FeatureCCPP}},
3854 {.Name: "rcpc", .Features: {AArch64::FeatureRCPC}},
3855 {.Name: "rng", .Features: {AArch64::FeatureRandGen}},
3856 {.Name: "sve", .Features: {AArch64::FeatureSVE}},
3857 {.Name: "sve-b16b16", .Features: {AArch64::FeatureSVEB16B16}},
3858 {.Name: "sve2", .Features: {AArch64::FeatureSVE2}},
3859 {.Name: "sve-aes", .Features: {AArch64::FeatureSVEAES}},
3860 {.Name: "sve2-aes", .Features: {AArch64::FeatureAliasSVE2AES, AArch64::FeatureSVEAES}},
3861 {.Name: "sve-sm4", .Features: {AArch64::FeatureSVESM4}},
3862 {.Name: "sve2-sm4", .Features: {AArch64::FeatureAliasSVE2SM4, AArch64::FeatureSVESM4}},
3863 {.Name: "sve-sha3", .Features: {AArch64::FeatureSVESHA3}},
3864 {.Name: "sve2-sha3", .Features: {AArch64::FeatureAliasSVE2SHA3, AArch64::FeatureSVESHA3}},
3865 {.Name: "sve-bitperm", .Features: {AArch64::FeatureSVEBitPerm}},
3866 {.Name: "sve2-bitperm",
3867 .Features: {AArch64::FeatureAliasSVE2BitPerm, AArch64::FeatureSVEBitPerm,
3868 AArch64::FeatureSVE2}},
3869 {.Name: "sve2p1", .Features: {AArch64::FeatureSVE2p1}},
3870 {.Name: "ls64", .Features: {AArch64::FeatureLS64}},
3871 {.Name: "xs", .Features: {AArch64::FeatureXS}},
3872 {.Name: "pauth", .Features: {AArch64::FeaturePAuth}},
3873 {.Name: "flagm", .Features: {AArch64::FeatureFlagM}},
3874 {.Name: "rme", .Features: {AArch64::FeatureRME}},
3875 {.Name: "sme", .Features: {AArch64::FeatureSME}},
3876 {.Name: "sme-f64f64", .Features: {AArch64::FeatureSMEF64F64}},
3877 {.Name: "sme-f16f16", .Features: {AArch64::FeatureSMEF16F16}},
3878 {.Name: "sme-i16i64", .Features: {AArch64::FeatureSMEI16I64}},
3879 {.Name: "sme2", .Features: {AArch64::FeatureSME2}},
3880 {.Name: "sme2p1", .Features: {AArch64::FeatureSME2p1}},
3881 {.Name: "sme-b16b16", .Features: {AArch64::FeatureSMEB16B16}},
3882 {.Name: "hbc", .Features: {AArch64::FeatureHBC}},
3883 {.Name: "mops", .Features: {AArch64::FeatureMOPS}},
3884 {.Name: "mec", .Features: {AArch64::FeatureMEC}},
3885 {.Name: "the", .Features: {AArch64::FeatureTHE}},
3886 {.Name: "d128", .Features: {AArch64::FeatureD128}},
3887 {.Name: "lse128", .Features: {AArch64::FeatureLSE128}},
3888 {.Name: "ite", .Features: {AArch64::FeatureITE}},
3889 {.Name: "cssc", .Features: {AArch64::FeatureCSSC}},
3890 {.Name: "rcpc3", .Features: {AArch64::FeatureRCPC3}},
3891 {.Name: "gcs", .Features: {AArch64::FeatureGCS}},
3892 {.Name: "bf16", .Features: {AArch64::FeatureBF16}},
3893 {.Name: "compnum", .Features: {AArch64::FeatureComplxNum}},
3894 {.Name: "dotprod", .Features: {AArch64::FeatureDotProd}},
3895 {.Name: "f32mm", .Features: {AArch64::FeatureMatMulFP32}},
3896 {.Name: "f64mm", .Features: {AArch64::FeatureMatMulFP64}},
3897 {.Name: "fp16", .Features: {AArch64::FeatureFullFP16}},
3898 {.Name: "fp16fml", .Features: {AArch64::FeatureFP16FML}},
3899 {.Name: "i8mm", .Features: {AArch64::FeatureMatMulInt8}},
3900 {.Name: "lor", .Features: {AArch64::FeatureLOR}},
3901 {.Name: "profile", .Features: {AArch64::FeatureSPE}},
3902 // "rdma" is the name documented by binutils for the feature, but
3903 // binutils also accepts incomplete prefixes of features, so "rdm"
3904 // works too. Support both spellings here.
3905 {.Name: "rdm", .Features: {AArch64::FeatureRDM}},
3906 {.Name: "rdma", .Features: {AArch64::FeatureRDM}},
3907 {.Name: "sb", .Features: {AArch64::FeatureSB}},
3908 {.Name: "ssbs", .Features: {AArch64::FeatureSSBS}},
3909 {.Name: "fp8", .Features: {AArch64::FeatureFP8}},
3910 {.Name: "faminmax", .Features: {AArch64::FeatureFAMINMAX}},
3911 {.Name: "fp8fma", .Features: {AArch64::FeatureFP8FMA}},
3912 {.Name: "ssve-fp8fma", .Features: {AArch64::FeatureSSVE_FP8FMA}},
3913 {.Name: "fp8dot2", .Features: {AArch64::FeatureFP8DOT2}},
3914 {.Name: "ssve-fp8dot2", .Features: {AArch64::FeatureSSVE_FP8DOT2}},
3915 {.Name: "fp8dot4", .Features: {AArch64::FeatureFP8DOT4}},
3916 {.Name: "ssve-fp8dot4", .Features: {AArch64::FeatureSSVE_FP8DOT4}},
3917 {.Name: "lut", .Features: {AArch64::FeatureLUT}},
3918 {.Name: "sme-lutv2", .Features: {AArch64::FeatureSME_LUTv2}},
3919 {.Name: "sme-f8f16", .Features: {AArch64::FeatureSMEF8F16}},
3920 {.Name: "sme-f8f32", .Features: {AArch64::FeatureSMEF8F32}},
3921 {.Name: "sme-fa64", .Features: {AArch64::FeatureSMEFA64}},
3922 {.Name: "cpa", .Features: {AArch64::FeatureCPA}},
3923 {.Name: "tlbiw", .Features: {AArch64::FeatureTLBIW}},
3924 {.Name: "pops", .Features: {AArch64::FeaturePoPS}},
3925 {.Name: "cmpbr", .Features: {AArch64::FeatureCMPBR}},
3926 {.Name: "f8f32mm", .Features: {AArch64::FeatureF8F32MM}},
3927 {.Name: "f8f16mm", .Features: {AArch64::FeatureF8F16MM}},
3928 {.Name: "fprcvt", .Features: {AArch64::FeatureFPRCVT}},
3929 {.Name: "lsfe", .Features: {AArch64::FeatureLSFE}},
3930 {.Name: "sme2p2", .Features: {AArch64::FeatureSME2p2}},
3931 {.Name: "ssve-aes", .Features: {AArch64::FeatureSSVE_AES}},
3932 {.Name: "sve2p2", .Features: {AArch64::FeatureSVE2p2}},
3933 {.Name: "sve-aes2", .Features: {AArch64::FeatureSVEAES2}},
3934 {.Name: "sve-bfscale", .Features: {AArch64::FeatureSVEBFSCALE}},
3935 {.Name: "sve-f16f32mm", .Features: {AArch64::FeatureSVE_F16F32MM}},
3936 {.Name: "lsui", .Features: {AArch64::FeatureLSUI}},
3937 {.Name: "occmo", .Features: {AArch64::FeatureOCCMO}},
3938 {.Name: "ssve-bitperm", .Features: {AArch64::FeatureSSVE_BitPerm}},
3939 {.Name: "sme-mop4", .Features: {AArch64::FeatureSME_MOP4}},
3940 {.Name: "sme-tmop", .Features: {AArch64::FeatureSME_TMOP}},
3941 {.Name: "lscp", .Features: {AArch64::FeatureLSCP}},
3942 {.Name: "tlbid", .Features: {AArch64::FeatureTLBID}},
3943 {.Name: "mpamv2", .Features: {AArch64::FeatureMPAMv2}},
3944 {.Name: "mtetc", .Features: {AArch64::FeatureMTETC}},
3945 {.Name: "gcie", .Features: {AArch64::FeatureGCIE}},
3946 {.Name: "sme2p3", .Features: {AArch64::FeatureSME2p3}},
3947 {.Name: "sve2p3", .Features: {AArch64::FeatureSVE2p3}},
3948 {.Name: "sve-b16mm", .Features: {AArch64::FeatureSVE_B16MM}},
3949 {.Name: "f16mm", .Features: {AArch64::FeatureF16MM}},
3950 {.Name: "f16f32dot", .Features: {AArch64::FeatureF16F32DOT}},
3951 {.Name: "f16f32mm", .Features: {AArch64::FeatureF16F32MM}},
3952 {.Name: "mops-go", .Features: {AArch64::FeatureMOPS_GO}},
3953 {.Name: "poe2", .Features: {AArch64::FeatureS1POE2}},
3954 {.Name: "tev", .Features: {AArch64::FeatureTEV}},
3955 {.Name: "btie", .Features: {AArch64::FeatureBTIE}},
3956 {.Name: "dit", .Features: {AArch64::FeatureDIT}},
3957 {.Name: "brbe", .Features: {AArch64::FeatureBRBE}},
3958 {.Name: "bti", .Features: {AArch64::FeatureBranchTargetId}},
3959 {.Name: "fcma", .Features: {AArch64::FeatureComplxNum}},
3960 {.Name: "jscvt", .Features: {AArch64::FeatureJS}},
3961 {.Name: "pauth-lr", .Features: {AArch64::FeaturePAuthLR}},
3962 {.Name: "ssve-fexpa", .Features: {AArch64::FeatureSSVE_FEXPA}},
3963 {.Name: "wfxt", .Features: {AArch64::FeatureWFxT}},
3964};
3965
3966static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3967 if (FBS[AArch64::HasV8_0aOps])
3968 Str += "ARMv8a";
3969 if (FBS[AArch64::HasV8_1aOps])
3970 Str += "ARMv8.1a";
3971 else if (FBS[AArch64::HasV8_2aOps])
3972 Str += "ARMv8.2a";
3973 else if (FBS[AArch64::HasV8_3aOps])
3974 Str += "ARMv8.3a";
3975 else if (FBS[AArch64::HasV8_4aOps])
3976 Str += "ARMv8.4a";
3977 else if (FBS[AArch64::HasV8_5aOps])
3978 Str += "ARMv8.5a";
3979 else if (FBS[AArch64::HasV8_6aOps])
3980 Str += "ARMv8.6a";
3981 else if (FBS[AArch64::HasV8_7aOps])
3982 Str += "ARMv8.7a";
3983 else if (FBS[AArch64::HasV8_8aOps])
3984 Str += "ARMv8.8a";
3985 else if (FBS[AArch64::HasV8_9aOps])
3986 Str += "ARMv8.9a";
3987 else if (FBS[AArch64::HasV9_0aOps])
3988 Str += "ARMv9-a";
3989 else if (FBS[AArch64::HasV9_1aOps])
3990 Str += "ARMv9.1a";
3991 else if (FBS[AArch64::HasV9_2aOps])
3992 Str += "ARMv9.2a";
3993 else if (FBS[AArch64::HasV9_3aOps])
3994 Str += "ARMv9.3a";
3995 else if (FBS[AArch64::HasV9_4aOps])
3996 Str += "ARMv9.4a";
3997 else if (FBS[AArch64::HasV9_5aOps])
3998 Str += "ARMv9.5a";
3999 else if (FBS[AArch64::HasV9_6aOps])
4000 Str += "ARMv9.6a";
4001 else if (FBS[AArch64::HasV9_7aOps])
4002 Str += "ARMv9.7a";
4003 else if (FBS[AArch64::HasV8_0rOps])
4004 Str += "ARMv8r";
4005 else {
4006 SmallVector<std::string, 2> ExtMatches;
4007 for (const auto& Ext : ExtensionMap) {
4008 // Use & in case multiple features are enabled
4009 if ((FBS & Ext.Features) != FeatureBitset())
4010 ExtMatches.push_back(Elt: Ext.Name);
4011 }
4012 Str += !ExtMatches.empty() ? llvm::join(R&: ExtMatches, Separator: ", ") : "(unknown)";
4013 }
4014}
4015
4016void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
4017 SMLoc S) {
4018 const uint16_t Op2 = Encoding & 7;
4019 const uint16_t Cm = (Encoding & 0x78) >> 3;
4020 const uint16_t Cn = (Encoding & 0x780) >> 7;
4021 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
4022
4023 const MCExpr *Expr = MCConstantExpr::create(Value: Op1, Ctx&: getContext());
4024
4025 Operands.push_back(
4026 Elt: AArch64Operand::CreateImm(Val: Expr, S, E: getLoc(), Ctx&: getContext()));
4027 Operands.push_back(
4028 Elt: AArch64Operand::CreateSysCR(Val: Cn, S, E: getLoc(), Ctx&: getContext()));
4029 Operands.push_back(
4030 Elt: AArch64Operand::CreateSysCR(Val: Cm, S, E: getLoc(), Ctx&: getContext()));
4031 Expr = MCConstantExpr::create(Value: Op2, Ctx&: getContext());
4032 Operands.push_back(
4033 Elt: AArch64Operand::CreateImm(Val: Expr, S, E: getLoc(), Ctx&: getContext()));
4034}
4035
4036/// parseSysAlias - The IC, DC, AT, TLBI, MLBI and GIC{R} and GSB instructions
4037/// are simple aliases for the SYS instruction. Parse them specially so that
4038/// we create a SYS MCInst.
4039bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
4040 OperandVector &Operands) {
4041 if (Name.contains(C: '.'))
4042 return TokError(Msg: "invalid operand");
4043
4044 Mnemonic = Name;
4045 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: "sys", S: NameLoc, Ctx&: getContext()));
4046
4047 const AsmToken &Tok = getTok();
4048 StringRef Op = Tok.getString();
4049 SMLoc S = Tok.getLoc();
4050 bool ExpectRegister = true;
4051 bool OptionalRegister = false;
4052 bool hasAll = getSTI().hasFeature(Feature: AArch64::FeatureAll);
4053 bool hasTLBID = getSTI().hasFeature(Feature: AArch64::FeatureTLBID);
4054
4055 if (Mnemonic == "ic") {
4056 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Name: Op);
4057 if (!IC)
4058 return TokError(Msg: "invalid operand for IC instruction");
4059 else if (!IC->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4060 std::string Str("IC " + std::string(IC->Name) + " requires: ");
4061 setRequiredFeatureString(FBS: IC->getRequiredFeatures(), Str);
4062 return TokError(Msg: Str);
4063 }
4064 ExpectRegister = IC->NeedsReg;
4065 createSysAlias(Encoding: IC->Encoding, Operands, S);
4066 } else if (Mnemonic == "dc") {
4067 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Name: Op);
4068 if (!DC)
4069 return TokError(Msg: "invalid operand for DC instruction");
4070 else if (!DC->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4071 std::string Str("DC " + std::string(DC->Name) + " requires: ");
4072 setRequiredFeatureString(FBS: DC->getRequiredFeatures(), Str);
4073 return TokError(Msg: Str);
4074 }
4075 createSysAlias(Encoding: DC->Encoding, Operands, S);
4076 } else if (Mnemonic == "at") {
4077 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Name: Op);
4078 if (!AT)
4079 return TokError(Msg: "invalid operand for AT instruction");
4080 else if (!AT->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4081 std::string Str("AT " + std::string(AT->Name) + " requires: ");
4082 setRequiredFeatureString(FBS: AT->getRequiredFeatures(), Str);
4083 return TokError(Msg: Str);
4084 }
4085 createSysAlias(Encoding: AT->Encoding, Operands, S);
4086 } else if (Mnemonic == "tlbi") {
4087 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Name: Op);
4088 if (!TLBI)
4089 return TokError(Msg: "invalid operand for TLBI instruction");
4090 else if (!TLBI->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4091 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
4092 setRequiredFeatureString(FBS: TLBI->getRequiredFeatures(), Str);
4093 return TokError(Msg: Str);
4094 }
4095 ExpectRegister = TLBI->NeedsReg;
4096 bool hasTLBID = getSTI().hasFeature(Feature: AArch64::FeatureTLBID);
4097 if (hasAll || hasTLBID) {
4098 OptionalRegister = TLBI->OptionalReg;
4099 }
4100 createSysAlias(Encoding: TLBI->Encoding, Operands, S);
4101 } else if (Mnemonic == "mlbi") {
4102 const AArch64MLBI::MLBI *MLBI = AArch64MLBI::lookupMLBIByName(Name: Op);
4103 if (!MLBI)
4104 return TokError(Msg: "invalid operand for MLBI instruction");
4105 else if (!MLBI->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4106 std::string Str("MLBI " + std::string(MLBI->Name) + " requires: ");
4107 setRequiredFeatureString(FBS: MLBI->getRequiredFeatures(), Str);
4108 return TokError(Msg: Str);
4109 }
4110 ExpectRegister = MLBI->NeedsReg;
4111 createSysAlias(Encoding: MLBI->Encoding, Operands, S);
4112 } else if (Mnemonic == "gic") {
4113 const AArch64GIC::GIC *GIC = AArch64GIC::lookupGICByName(Name: Op);
4114 if (!GIC)
4115 return TokError(Msg: "invalid operand for GIC instruction");
4116 else if (!GIC->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4117 std::string Str("GIC " + std::string(GIC->Name) + " requires: ");
4118 setRequiredFeatureString(FBS: GIC->getRequiredFeatures(), Str);
4119 return TokError(Msg: Str);
4120 }
4121 ExpectRegister = GIC->NeedsReg;
4122 createSysAlias(Encoding: GIC->Encoding, Operands, S);
4123 } else if (Mnemonic == "gsb") {
4124 const AArch64GSB::GSB *GSB = AArch64GSB::lookupGSBByName(Name: Op);
4125 if (!GSB)
4126 return TokError(Msg: "invalid operand for GSB instruction");
4127 else if (!GSB->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4128 std::string Str("GSB " + std::string(GSB->Name) + " requires: ");
4129 setRequiredFeatureString(FBS: GSB->getRequiredFeatures(), Str);
4130 return TokError(Msg: Str);
4131 }
4132 ExpectRegister = false;
4133 createSysAlias(Encoding: GSB->Encoding, Operands, S);
4134 } else if (Mnemonic == "plbi") {
4135 const AArch64PLBI::PLBI *PLBI = AArch64PLBI::lookupPLBIByName(Name: Op);
4136 if (!PLBI)
4137 return TokError(Msg: "invalid operand for PLBI instruction");
4138 else if (!PLBI->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4139 std::string Str("PLBI " + std::string(PLBI->Name) + " requires: ");
4140 setRequiredFeatureString(FBS: PLBI->getRequiredFeatures(), Str);
4141 return TokError(Msg: Str);
4142 }
4143 ExpectRegister = PLBI->NeedsReg;
4144 if (hasAll || hasTLBID) {
4145 OptionalRegister = PLBI->OptionalReg;
4146 }
4147 createSysAlias(Encoding: PLBI->Encoding, Operands, S);
4148 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" ||
4149 Mnemonic == "cosp") {
4150
4151 if (Op.lower() != "rctx")
4152 return TokError(Msg: "invalid operand for prediction restriction instruction");
4153
4154 bool hasPredres = hasAll || getSTI().hasFeature(Feature: AArch64::FeaturePredRes);
4155 bool hasSpecres2 = hasAll || getSTI().hasFeature(Feature: AArch64::FeatureSPECRES2);
4156
4157 if (Mnemonic == "cosp" && !hasSpecres2)
4158 return TokError(Msg: "COSP requires: predres2");
4159 if (!hasPredres)
4160 return TokError(Msg: Mnemonic.upper() + "RCTX requires: predres");
4161
4162 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
4163 : Mnemonic == "dvp" ? 0b101
4164 : Mnemonic == "cosp" ? 0b110
4165 : Mnemonic == "cpp" ? 0b111
4166 : 0;
4167 assert(PRCTX_Op2 &&
4168 "Invalid mnemonic for prediction restriction instruction");
4169 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
4170 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
4171
4172 createSysAlias(Encoding, Operands, S);
4173 }
4174
4175 Lex(); // Eat operand.
4176
4177 bool HasRegister = false;
4178
4179 // Check for the optional register operand.
4180 if (parseOptionalToken(T: AsmToken::Comma)) {
4181 if (Tok.isNot(K: AsmToken::Identifier) || parseRegister(Operands))
4182 return TokError(Msg: "expected register operand");
4183 HasRegister = true;
4184 }
4185
4186 if (!OptionalRegister) {
4187 if (ExpectRegister && !HasRegister)
4188 return TokError(Msg: "specified " + Mnemonic + " op requires a register");
4189 else if (!ExpectRegister && HasRegister)
4190 return TokError(Msg: "specified " + Mnemonic + " op does not use a register");
4191 }
4192
4193 if (parseToken(T: AsmToken::EndOfStatement, Msg: "unexpected token in argument list"))
4194 return true;
4195
4196 return false;
4197}
4198
4199/// parseSyslAlias - The GICR instructions are simple aliases for
4200/// the SYSL instruction. Parse them specially so that we create a
4201/// SYS MCInst.
4202bool AArch64AsmParser::parseSyslAlias(StringRef Name, SMLoc NameLoc,
4203 OperandVector &Operands) {
4204
4205 Mnemonic = Name;
4206 Operands.push_back(
4207 Elt: AArch64Operand::CreateToken(Str: "sysl", S: NameLoc, Ctx&: getContext()));
4208
4209 // Now expect two operands (identifier + register)
4210 SMLoc startLoc = getLoc();
4211 const AsmToken &regTok = getTok();
4212 StringRef reg = regTok.getString();
4213 MCRegister Reg = matchRegisterNameAlias(Name: reg.lower(), Kind: RegKind::Scalar);
4214 if (!Reg)
4215 return TokError(Msg: "expected register operand");
4216
4217 Operands.push_back(Elt: AArch64Operand::CreateReg(
4218 Reg, Kind: RegKind::Scalar, S: startLoc, E: getLoc(), Ctx&: getContext(), EqTy: EqualsReg));
4219
4220 Lex(); // Eat token
4221 if (parseToken(T: AsmToken::Comma))
4222 return true;
4223
4224 // Check for identifier
4225 const AsmToken &operandTok = getTok();
4226 StringRef Op = operandTok.getString();
4227 SMLoc S2 = operandTok.getLoc();
4228 Lex(); // Eat token
4229
4230 if (Mnemonic == "gicr") {
4231 const AArch64GICR::GICR *GICR = AArch64GICR::lookupGICRByName(Name: Op);
4232 if (!GICR)
4233 return Error(L: S2, Msg: "invalid operand for GICR instruction");
4234 else if (!GICR->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4235 std::string Str("GICR " + std::string(GICR->Name) + " requires: ");
4236 setRequiredFeatureString(FBS: GICR->getRequiredFeatures(), Str);
4237 return Error(L: S2, Msg: Str);
4238 }
4239 createSysAlias(Encoding: GICR->Encoding, Operands, S: S2);
4240 }
4241
4242 if (parseToken(T: AsmToken::EndOfStatement, Msg: "unexpected token in argument list"))
4243 return true;
4244
4245 return false;
4246}
4247
4248/// parseSyspAlias - The TLBIP instructions are simple aliases for
4249/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
4250bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
4251 OperandVector &Operands) {
4252 if (Name.contains(C: '.'))
4253 return TokError(Msg: "invalid operand");
4254
4255 Mnemonic = Name;
4256 Operands.push_back(
4257 Elt: AArch64Operand::CreateToken(Str: "sysp", S: NameLoc, Ctx&: getContext()));
4258
4259 const AsmToken &Tok = getTok();
4260 StringRef Op = Tok.getString();
4261 SMLoc S = Tok.getLoc();
4262
4263 if (Mnemonic == "tlbip") {
4264 const AArch64TLBIP::TLBIP *TLBIP = AArch64TLBIP::lookupTLBIPByName(Name: Op);
4265 if (!TLBIP)
4266 return TokError(Msg: "invalid operand for TLBIP instruction");
4267 if (!getSTI().hasFeature(Feature: AArch64::FeatureD128) &&
4268 !getSTI().hasFeature(Feature: AArch64::FeatureAll))
4269 return TokError(Msg: "instruction requires: d128");
4270 if (!TLBIP->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4271 std::string Str("instruction requires: ");
4272 setRequiredFeatureString(FBS: TLBIP->getRequiredFeatures(), Str);
4273 return TokError(Msg: Str);
4274 }
4275 createSysAlias(Encoding: TLBIP->Encoding, Operands, S);
4276 }
4277
4278 Lex(); // Eat operand.
4279
4280 if (parseComma())
4281 return true;
4282
4283 if (Tok.isNot(K: AsmToken::Identifier))
4284 return TokError(Msg: "expected register identifier");
4285 auto Result = tryParseSyspXzrPair(Operands);
4286 if (Result.isNoMatch())
4287 Result = tryParseGPRSeqPair(Operands);
4288 if (!Result.isSuccess())
4289 return TokError(Msg: "specified " + Mnemonic +
4290 " op requires a pair of registers");
4291
4292 if (parseToken(T: AsmToken::EndOfStatement, Msg: "unexpected token in argument list"))
4293 return true;
4294
4295 return false;
4296}
4297
4298ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
4299 MCAsmParser &Parser = getParser();
4300 const AsmToken &Tok = getTok();
4301
4302 if (Mnemonic == "tsb" && Tok.isNot(K: AsmToken::Identifier))
4303 return TokError(Msg: "'csync' operand expected");
4304 if (parseOptionalToken(T: AsmToken::Hash) || Tok.is(K: AsmToken::Integer)) {
4305 // Immediate operand.
4306 const MCExpr *ImmVal;
4307 SMLoc ExprLoc = getLoc();
4308 AsmToken IntTok = Tok;
4309 if (getParser().parseExpression(Res&: ImmVal))
4310 return ParseStatus::Failure;
4311 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
4312 if (!MCE)
4313 return Error(L: ExprLoc, Msg: "immediate value expected for barrier operand");
4314 int64_t Value = MCE->getValue();
4315 if (Mnemonic == "dsb" && Value > 15) {
4316 // This case is a no match here, but it might be matched by the nXS
4317 // variant. Deliberately not unlex the optional '#' as it is not necessary
4318 // to characterize an integer immediate.
4319 Parser.getLexer().UnLex(Token: IntTok);
4320 return ParseStatus::NoMatch;
4321 }
4322 if (Value < 0 || Value > 15)
4323 return Error(L: ExprLoc, Msg: "barrier operand out of range");
4324 auto DB = AArch64DB::lookupDBByEncoding(Encoding: Value);
4325 Operands.push_back(Elt: AArch64Operand::CreateBarrier(Val: Value, Str: DB ? DB->Name : "",
4326 S: ExprLoc, Ctx&: getContext(),
4327 HasnXSModifier: false /*hasnXSModifier*/));
4328 return ParseStatus::Success;
4329 }
4330
4331 if (Tok.isNot(K: AsmToken::Identifier))
4332 return TokError(Msg: "invalid operand for instruction");
4333
4334 StringRef Operand = Tok.getString();
4335 auto TSB = AArch64TSB::lookupTSBByName(Name: Operand);
4336 auto DB = AArch64DB::lookupDBByName(Name: Operand);
4337 // The only valid named option for ISB is 'sy'
4338 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
4339 return TokError(Msg: "'sy' or #imm operand expected");
4340 // The only valid named option for TSB is 'csync'
4341 if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
4342 return TokError(Msg: "'csync' operand expected");
4343 if (!DB && !TSB) {
4344 if (Mnemonic == "dsb") {
4345 // This case is a no match here, but it might be matched by the nXS
4346 // variant.
4347 return ParseStatus::NoMatch;
4348 }
4349 return TokError(Msg: "invalid barrier option name");
4350 }
4351
4352 Operands.push_back(Elt: AArch64Operand::CreateBarrier(
4353 Val: DB ? DB->Encoding : TSB->Encoding, Str: Tok.getString(), S: getLoc(),
4354 Ctx&: getContext(), HasnXSModifier: false /*hasnXSModifier*/));
4355 Lex(); // Consume the option
4356
4357 return ParseStatus::Success;
4358}
4359
4360ParseStatus
4361AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
4362 const AsmToken &Tok = getTok();
4363
4364 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
4365 if (Mnemonic != "dsb")
4366 return ParseStatus::Failure;
4367
4368 if (parseOptionalToken(T: AsmToken::Hash) || Tok.is(K: AsmToken::Integer)) {
4369 // Immediate operand.
4370 const MCExpr *ImmVal;
4371 SMLoc ExprLoc = getLoc();
4372 if (getParser().parseExpression(Res&: ImmVal))
4373 return ParseStatus::Failure;
4374 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
4375 if (!MCE)
4376 return Error(L: ExprLoc, Msg: "immediate value expected for barrier operand");
4377 int64_t Value = MCE->getValue();
4378 // v8.7-A DSB in the nXS variant accepts only the following immediate
4379 // values: 16, 20, 24, 28.
4380 if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
4381 return Error(L: ExprLoc, Msg: "barrier operand out of range");
4382 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(ImmValue: Value);
4383 Operands.push_back(Elt: AArch64Operand::CreateBarrier(Val: DB->Encoding, Str: DB->Name,
4384 S: ExprLoc, Ctx&: getContext(),
4385 HasnXSModifier: true /*hasnXSModifier*/));
4386 return ParseStatus::Success;
4387 }
4388
4389 if (Tok.isNot(K: AsmToken::Identifier))
4390 return TokError(Msg: "invalid operand for instruction");
4391
4392 StringRef Operand = Tok.getString();
4393 auto DB = AArch64DBnXS::lookupDBnXSByName(Name: Operand);
4394
4395 if (!DB)
4396 return TokError(Msg: "invalid barrier option name");
4397
4398 Operands.push_back(
4399 Elt: AArch64Operand::CreateBarrier(Val: DB->Encoding, Str: Tok.getString(), S: getLoc(),
4400 Ctx&: getContext(), HasnXSModifier: true /*hasnXSModifier*/));
4401 Lex(); // Consume the option
4402
4403 return ParseStatus::Success;
4404}
4405
4406ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4407 const AsmToken &Tok = getTok();
4408
4409 if (Tok.isNot(K: AsmToken::Identifier))
4410 return ParseStatus::NoMatch;
4411
4412 if (AArch64SVCR::lookupSVCRByName(Name: Tok.getString()))
4413 return ParseStatus::NoMatch;
4414
4415 int MRSReg, MSRReg;
4416 auto SysReg = AArch64SysReg::lookupSysRegByName(Name: Tok.getString());
4417 if (SysReg && SysReg->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4418 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4419 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4420 } else
4421 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Name: Tok.getString());
4422
4423 unsigned PStateImm = -1;
4424 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Name: Tok.getString());
4425 if (PState15 && PState15->haveFeatures(ActiveFeatures: getSTI().getFeatureBits()))
4426 PStateImm = PState15->Encoding;
4427 if (!PState15) {
4428 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Name: Tok.getString());
4429 if (PState1 && PState1->haveFeatures(ActiveFeatures: getSTI().getFeatureBits()))
4430 PStateImm = PState1->Encoding;
4431 }
4432
4433 Operands.push_back(
4434 Elt: AArch64Operand::CreateSysReg(Str: Tok.getString(), S: getLoc(), MRSReg, MSRReg,
4435 PStateField: PStateImm, Ctx&: getContext()));
4436 Lex(); // Eat identifier
4437
4438 return ParseStatus::Success;
4439}
4440
4441ParseStatus
4442AArch64AsmParser::tryParsePHintInstOperand(OperandVector &Operands) {
4443 SMLoc S = getLoc();
4444 const AsmToken &Tok = getTok();
4445 if (Tok.isNot(K: AsmToken::Identifier))
4446 return TokError(Msg: "invalid operand for instruction");
4447
4448 auto PH = AArch64PHint::lookupPHintByName(Tok.getString());
4449 if (!PH)
4450 return TokError(Msg: "invalid operand for instruction");
4451
4452 Operands.push_back(Elt: AArch64Operand::CreatePHintInst(
4453 Val: PH->Encoding, Str: Tok.getString(), S, Ctx&: getContext()));
4454 Lex(); // Eat identifier token.
4455 return ParseStatus::Success;
4456}
4457
4458/// tryParseNeonVectorRegister - Parse a vector register operand.
4459bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4460 if (getTok().isNot(K: AsmToken::Identifier))
4461 return true;
4462
4463 SMLoc S = getLoc();
4464 // Check for a vector register specifier first.
4465 StringRef Kind;
4466 MCRegister Reg;
4467 ParseStatus Res = tryParseVectorRegister(Reg, Kind, MatchKind: RegKind::NeonVector);
4468 if (!Res.isSuccess())
4469 return true;
4470
4471 const auto &KindRes = parseVectorKind(Suffix: Kind, VectorKind: RegKind::NeonVector);
4472 if (!KindRes)
4473 return true;
4474
4475 unsigned ElementWidth = KindRes->second;
4476 Operands.push_back(
4477 Elt: AArch64Operand::CreateVectorReg(Reg, Kind: RegKind::NeonVector, ElementWidth,
4478 S, E: getLoc(), Ctx&: getContext()));
4479
4480 // If there was an explicit qualifier, that goes on as a literal text
4481 // operand.
4482 if (!Kind.empty())
4483 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: Kind, S, Ctx&: getContext()));
4484
4485 return tryParseVectorIndex(Operands).isFailure();
4486}
4487
4488ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4489 SMLoc SIdx = getLoc();
4490 if (parseOptionalToken(T: AsmToken::LBrac)) {
4491 const MCExpr *ImmVal;
4492 if (getParser().parseExpression(Res&: ImmVal))
4493 return ParseStatus::NoMatch;
4494 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
4495 if (!MCE)
4496 return TokError(Msg: "immediate value expected for vector index");
4497
4498 SMLoc E = getLoc();
4499
4500 if (parseToken(T: AsmToken::RBrac, Msg: "']' expected"))
4501 return ParseStatus::Failure;
4502
4503 Operands.push_back(Elt: AArch64Operand::CreateVectorIndex(Idx: MCE->getValue(), S: SIdx,
4504 E, Ctx&: getContext()));
4505 return ParseStatus::Success;
4506 }
4507
4508 return ParseStatus::NoMatch;
4509}
4510
4511// tryParseVectorRegister - Try to parse a vector register name with
4512// optional kind specifier. If it is a register specifier, eat the token
4513// and return it.
4514ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4515 StringRef &Kind,
4516 RegKind MatchKind) {
4517 const AsmToken &Tok = getTok();
4518
4519 if (Tok.isNot(K: AsmToken::Identifier))
4520 return ParseStatus::NoMatch;
4521
4522 StringRef Name = Tok.getString();
4523 // If there is a kind specifier, it's separated from the register name by
4524 // a '.'.
4525 size_t Start = 0, Next = Name.find(C: '.');
4526 StringRef Head = Name.slice(Start, End: Next);
4527 MCRegister RegNum = matchRegisterNameAlias(Name: Head, Kind: MatchKind);
4528
4529 if (RegNum) {
4530 if (Next != StringRef::npos) {
4531 Kind = Name.substr(Start: Next);
4532 if (!isValidVectorKind(Suffix: Kind, VectorKind: MatchKind))
4533 return TokError(Msg: "invalid vector kind qualifier");
4534 }
4535 Lex(); // Eat the register token.
4536
4537 Reg = RegNum;
4538 return ParseStatus::Success;
4539 }
4540
4541 return ParseStatus::NoMatch;
4542}
4543
4544ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector(
4545 OperandVector &Operands) {
4546 ParseStatus Status =
4547 tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(Operands);
4548 if (!Status.isSuccess())
4549 Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(Operands);
4550 return Status;
4551}
4552
4553/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4554template <RegKind RK>
4555ParseStatus
4556AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4557 // Check for a SVE predicate register specifier first.
4558 const SMLoc S = getLoc();
4559 StringRef Kind;
4560 MCRegister RegNum;
4561 auto Res = tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RK);
4562 if (!Res.isSuccess())
4563 return Res;
4564
4565 const auto &KindRes = parseVectorKind(Suffix: Kind, VectorKind: RK);
4566 if (!KindRes)
4567 return ParseStatus::NoMatch;
4568
4569 unsigned ElementWidth = KindRes->second;
4570 Operands.push_back(Elt: AArch64Operand::CreateVectorReg(
4571 Reg: RegNum, Kind: RK, ElementWidth, S,
4572 E: getLoc(), Ctx&: getContext()));
4573
4574 if (getLexer().is(K: AsmToken::LBrac)) {
4575 if (RK == RegKind::SVEPredicateAsCounter) {
4576 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4577 if (ResIndex.isSuccess())
4578 return ParseStatus::Success;
4579 } else {
4580 // Indexed predicate, there's no comma so try parse the next operand
4581 // immediately.
4582 if (parseOperand(Operands, isCondCode: false, invertCondCode: false))
4583 return ParseStatus::NoMatch;
4584 }
4585 }
4586
4587 // Not all predicates are followed by a '/m' or '/z'.
4588 if (getTok().isNot(K: AsmToken::Slash))
4589 return ParseStatus::Success;
4590
4591 // But when they do they shouldn't have an element type suffix.
4592 if (!Kind.empty())
4593 return Error(L: S, Msg: "not expecting size suffix");
4594
4595 // Add a literal slash as operand
4596 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: "/", S: getLoc(), Ctx&: getContext()));
4597
4598 Lex(); // Eat the slash.
4599
4600 // Zeroing or merging?
4601 auto Pred = getTok().getString().lower();
4602 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4603 return Error(L: getLoc(), Msg: "expecting 'z' predication");
4604
4605 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4606 return Error(L: getLoc(), Msg: "expecting 'm' or 'z' predication");
4607
4608 // Add zero/merge token.
4609 const char *ZM = Pred == "z" ? "z" : "m";
4610 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: ZM, S: getLoc(), Ctx&: getContext()));
4611
4612 Lex(); // Eat zero/merge token.
4613 return ParseStatus::Success;
4614}
4615
4616/// parseRegister - Parse a register operand.
4617bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4618 // Try for a Neon vector register.
4619 if (!tryParseNeonVectorRegister(Operands))
4620 return false;
4621
4622 if (tryParseZTOperand(Operands).isSuccess())
4623 return false;
4624
4625 // Otherwise try for a scalar register.
4626 if (tryParseGPROperand<false>(Operands).isSuccess())
4627 return false;
4628
4629 return true;
4630}
4631
4632bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4633 bool HasELFModifier = false;
4634 AArch64::Specifier RefKind;
4635 SMLoc Loc = getLexer().getLoc();
4636 if (parseOptionalToken(T: AsmToken::Colon)) {
4637 HasELFModifier = true;
4638
4639 if (getTok().isNot(K: AsmToken::Identifier))
4640 return TokError(Msg: "expect relocation specifier in operand after ':'");
4641
4642 std::string LowerCase = getTok().getIdentifier().lower();
4643 RefKind = StringSwitch<AArch64::Specifier>(LowerCase)
4644 .Case(S: "lo12", Value: AArch64::S_LO12)
4645 .Case(S: "abs_g3", Value: AArch64::S_ABS_G3)
4646 .Case(S: "abs_g2", Value: AArch64::S_ABS_G2)
4647 .Case(S: "abs_g2_s", Value: AArch64::S_ABS_G2_S)
4648 .Case(S: "abs_g2_nc", Value: AArch64::S_ABS_G2_NC)
4649 .Case(S: "abs_g1", Value: AArch64::S_ABS_G1)
4650 .Case(S: "abs_g1_s", Value: AArch64::S_ABS_G1_S)
4651 .Case(S: "abs_g1_nc", Value: AArch64::S_ABS_G1_NC)
4652 .Case(S: "abs_g0", Value: AArch64::S_ABS_G0)
4653 .Case(S: "abs_g0_s", Value: AArch64::S_ABS_G0_S)
4654 .Case(S: "abs_g0_nc", Value: AArch64::S_ABS_G0_NC)
4655 .Case(S: "prel_g3", Value: AArch64::S_PREL_G3)
4656 .Case(S: "prel_g2", Value: AArch64::S_PREL_G2)
4657 .Case(S: "prel_g2_nc", Value: AArch64::S_PREL_G2_NC)
4658 .Case(S: "prel_g1", Value: AArch64::S_PREL_G1)
4659 .Case(S: "prel_g1_nc", Value: AArch64::S_PREL_G1_NC)
4660 .Case(S: "prel_g0", Value: AArch64::S_PREL_G0)
4661 .Case(S: "prel_g0_nc", Value: AArch64::S_PREL_G0_NC)
4662 .Case(S: "dtprel_g2", Value: AArch64::S_DTPREL_G2)
4663 .Case(S: "dtprel_g1", Value: AArch64::S_DTPREL_G1)
4664 .Case(S: "dtprel_g1_nc", Value: AArch64::S_DTPREL_G1_NC)
4665 .Case(S: "dtprel_g0", Value: AArch64::S_DTPREL_G0)
4666 .Case(S: "dtprel_g0_nc", Value: AArch64::S_DTPREL_G0_NC)
4667 .Case(S: "dtprel_hi12", Value: AArch64::S_DTPREL_HI12)
4668 .Case(S: "dtprel_lo12", Value: AArch64::S_DTPREL_LO12)
4669 .Case(S: "dtprel_lo12_nc", Value: AArch64::S_DTPREL_LO12_NC)
4670 .Case(S: "pg_hi21_nc", Value: AArch64::S_ABS_PAGE_NC)
4671 .Case(S: "tprel_g2", Value: AArch64::S_TPREL_G2)
4672 .Case(S: "tprel_g1", Value: AArch64::S_TPREL_G1)
4673 .Case(S: "tprel_g1_nc", Value: AArch64::S_TPREL_G1_NC)
4674 .Case(S: "tprel_g0", Value: AArch64::S_TPREL_G0)
4675 .Case(S: "tprel_g0_nc", Value: AArch64::S_TPREL_G0_NC)
4676 .Case(S: "tprel_hi12", Value: AArch64::S_TPREL_HI12)
4677 .Case(S: "tprel_lo12", Value: AArch64::S_TPREL_LO12)
4678 .Case(S: "tprel_lo12_nc", Value: AArch64::S_TPREL_LO12_NC)
4679 .Case(S: "tlsdesc_lo12", Value: AArch64::S_TLSDESC_LO12)
4680 .Case(S: "tlsdesc_auth_lo12", Value: AArch64::S_TLSDESC_AUTH_LO12)
4681 .Case(S: "got", Value: AArch64::S_GOT_PAGE)
4682 .Case(S: "gotpage_lo15", Value: AArch64::S_GOT_PAGE_LO15)
4683 .Case(S: "got_lo12", Value: AArch64::S_GOT_LO12)
4684 .Case(S: "got_auth", Value: AArch64::S_GOT_AUTH_PAGE)
4685 .Case(S: "got_auth_lo12", Value: AArch64::S_GOT_AUTH_LO12)
4686 .Case(S: "gottprel", Value: AArch64::S_GOTTPREL_PAGE)
4687 .Case(S: "gottprel_lo12", Value: AArch64::S_GOTTPREL_LO12_NC)
4688 .Case(S: "gottprel_g1", Value: AArch64::S_GOTTPREL_G1)
4689 .Case(S: "gottprel_g0_nc", Value: AArch64::S_GOTTPREL_G0_NC)
4690 .Case(S: "tlsdesc", Value: AArch64::S_TLSDESC_PAGE)
4691 .Case(S: "tlsdesc_auth", Value: AArch64::S_TLSDESC_AUTH_PAGE)
4692 .Case(S: "secrel_lo12", Value: AArch64::S_SECREL_LO12)
4693 .Case(S: "secrel_hi12", Value: AArch64::S_SECREL_HI12)
4694 .Default(Value: AArch64::S_INVALID);
4695
4696 if (RefKind == AArch64::S_INVALID)
4697 return TokError(Msg: "expect relocation specifier in operand after ':'");
4698
4699 Lex(); // Eat identifier
4700
4701 if (parseToken(T: AsmToken::Colon, Msg: "expect ':' after relocation specifier"))
4702 return true;
4703 }
4704
4705 if (getParser().parseExpression(Res&: ImmVal))
4706 return true;
4707
4708 if (HasELFModifier)
4709 ImmVal = MCSpecifierExpr::create(Expr: ImmVal, S: RefKind, Ctx&: getContext(), Loc);
4710
4711 SMLoc EndLoc;
4712 if (getContext().getAsmInfo()->hasSubsectionsViaSymbols()) {
4713 if (getParser().parseAtSpecifier(Res&: ImmVal, EndLoc))
4714 return true;
4715 const MCExpr *Term;
4716 MCBinaryExpr::Opcode Opcode;
4717 if (parseOptionalToken(T: AsmToken::Plus))
4718 Opcode = MCBinaryExpr::Add;
4719 else if (parseOptionalToken(T: AsmToken::Minus))
4720 Opcode = MCBinaryExpr::Sub;
4721 else
4722 return false;
4723 if (getParser().parsePrimaryExpr(Res&: Term, EndLoc))
4724 return true;
4725 ImmVal = MCBinaryExpr::create(Op: Opcode, LHS: ImmVal, RHS: Term, Ctx&: getContext());
4726 }
4727
4728 return false;
4729}
4730
4731ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4732 if (getTok().isNot(K: AsmToken::LCurly))
4733 return ParseStatus::NoMatch;
4734
4735 auto ParseMatrixTile = [this](unsigned &Reg,
4736 unsigned &ElementWidth) -> ParseStatus {
4737 StringRef Name = getTok().getString();
4738 size_t DotPosition = Name.find(C: '.');
4739 if (DotPosition == StringRef::npos)
4740 return ParseStatus::NoMatch;
4741
4742 unsigned RegNum = matchMatrixTileListRegName(Name);
4743 if (!RegNum)
4744 return ParseStatus::NoMatch;
4745
4746 StringRef Tail = Name.drop_front(N: DotPosition);
4747 const std::optional<std::pair<int, int>> &KindRes =
4748 parseVectorKind(Suffix: Tail, VectorKind: RegKind::Matrix);
4749 if (!KindRes)
4750 return TokError(
4751 Msg: "Expected the register to be followed by element width suffix");
4752 ElementWidth = KindRes->second;
4753 Reg = RegNum;
4754 Lex(); // Eat the register.
4755 return ParseStatus::Success;
4756 };
4757
4758 SMLoc S = getLoc();
4759 auto LCurly = getTok();
4760 Lex(); // Eat left bracket token.
4761
4762 // Empty matrix list
4763 if (parseOptionalToken(T: AsmToken::RCurly)) {
4764 Operands.push_back(Elt: AArch64Operand::CreateMatrixTileList(
4765 /*RegMask=*/0, S, E: getLoc(), Ctx&: getContext()));
4766 return ParseStatus::Success;
4767 }
4768
4769 // Try parse {za} alias early
4770 if (getTok().getString().equals_insensitive(RHS: "za")) {
4771 Lex(); // Eat 'za'
4772
4773 if (parseToken(T: AsmToken::RCurly, Msg: "'}' expected"))
4774 return ParseStatus::Failure;
4775
4776 Operands.push_back(Elt: AArch64Operand::CreateMatrixTileList(
4777 /*RegMask=*/0xFF, S, E: getLoc(), Ctx&: getContext()));
4778 return ParseStatus::Success;
4779 }
4780
4781 SMLoc TileLoc = getLoc();
4782
4783 unsigned FirstReg, ElementWidth;
4784 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4785 if (!ParseRes.isSuccess()) {
4786 getLexer().UnLex(Token: LCurly);
4787 return ParseRes;
4788 }
4789
4790 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4791
4792 unsigned PrevReg = FirstReg;
4793
4794 SmallSet<unsigned, 8> DRegs;
4795 AArch64Operand::ComputeRegsForAlias(Reg: FirstReg, OutRegs&: DRegs, ElementWidth);
4796
4797 SmallSet<unsigned, 8> SeenRegs;
4798 SeenRegs.insert(V: FirstReg);
4799
4800 while (parseOptionalToken(T: AsmToken::Comma)) {
4801 TileLoc = getLoc();
4802 unsigned Reg, NextElementWidth;
4803 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4804 if (!ParseRes.isSuccess())
4805 return ParseRes;
4806
4807 // Element size must match on all regs in the list.
4808 if (ElementWidth != NextElementWidth)
4809 return Error(L: TileLoc, Msg: "mismatched register size suffix");
4810
4811 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(Reg: PrevReg)))
4812 Warning(L: TileLoc, Msg: "tile list not in ascending order");
4813
4814 if (SeenRegs.contains(V: Reg))
4815 Warning(L: TileLoc, Msg: "duplicate tile in list");
4816 else {
4817 SeenRegs.insert(V: Reg);
4818 AArch64Operand::ComputeRegsForAlias(Reg, OutRegs&: DRegs, ElementWidth);
4819 }
4820
4821 PrevReg = Reg;
4822 }
4823
4824 if (parseToken(T: AsmToken::RCurly, Msg: "'}' expected"))
4825 return ParseStatus::Failure;
4826
4827 unsigned RegMask = 0;
4828 for (auto Reg : DRegs)
4829 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4830 RI->getEncodingValue(Reg: AArch64::ZAD0));
4831 Operands.push_back(
4832 Elt: AArch64Operand::CreateMatrixTileList(RegMask, S, E: getLoc(), Ctx&: getContext()));
4833
4834 return ParseStatus::Success;
4835}
4836
4837template <RegKind VectorKind>
4838ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4839 bool ExpectMatch) {
4840 MCAsmParser &Parser = getParser();
4841 if (!getTok().is(K: AsmToken::LCurly))
4842 return ParseStatus::NoMatch;
4843
4844 // Wrapper around parse function
4845 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4846 bool NoMatchIsError) -> ParseStatus {
4847 auto RegTok = getTok();
4848 auto ParseRes = tryParseVectorRegister(Reg, Kind, MatchKind: VectorKind);
4849 if (ParseRes.isSuccess()) {
4850 if (parseVectorKind(Suffix: Kind, VectorKind))
4851 return ParseRes;
4852 llvm_unreachable("Expected a valid vector kind");
4853 }
4854
4855 if (RegTok.is(K: AsmToken::Identifier) && ParseRes.isNoMatch() &&
4856 RegTok.getString().equals_insensitive(RHS: "zt0"))
4857 return ParseStatus::NoMatch;
4858
4859 if (RegTok.isNot(K: AsmToken::Identifier) || ParseRes.isFailure() ||
4860 (ParseRes.isNoMatch() && NoMatchIsError &&
4861 !RegTok.getString().starts_with_insensitive(Prefix: "za")))
4862 return Error(L: Loc, Msg: "vector register expected");
4863
4864 return ParseStatus::NoMatch;
4865 };
4866
4867 unsigned NumRegs = getNumRegsForRegKind(K: VectorKind);
4868 SMLoc S = getLoc();
4869 auto LCurly = getTok();
4870 Lex(); // Eat left bracket token.
4871
4872 StringRef Kind;
4873 MCRegister FirstReg;
4874 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4875
4876 // Put back the original left bracket if there was no match, so that
4877 // different types of list-operands can be matched (e.g. SVE, Neon).
4878 if (ParseRes.isNoMatch())
4879 Parser.getLexer().UnLex(Token: LCurly);
4880
4881 if (!ParseRes.isSuccess())
4882 return ParseRes;
4883
4884 MCRegister PrevReg = FirstReg;
4885 unsigned Count = 1;
4886
4887 unsigned Stride = 1;
4888 if (parseOptionalToken(T: AsmToken::Minus)) {
4889 SMLoc Loc = getLoc();
4890 StringRef NextKind;
4891
4892 MCRegister Reg;
4893 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4894 if (!ParseRes.isSuccess())
4895 return ParseRes;
4896
4897 // Any Kind suffices must match on all regs in the list.
4898 if (Kind != NextKind)
4899 return Error(L: Loc, Msg: "mismatched register size suffix");
4900
4901 unsigned Space =
4902 (PrevReg < Reg) ? (Reg - PrevReg) : (NumRegs - (PrevReg - Reg));
4903
4904 if (Space == 0 || Space > 3)
4905 return Error(L: Loc, Msg: "invalid number of vectors");
4906
4907 Count += Space;
4908 }
4909 else {
4910 bool HasCalculatedStride = false;
4911 while (parseOptionalToken(T: AsmToken::Comma)) {
4912 SMLoc Loc = getLoc();
4913 StringRef NextKind;
4914 MCRegister Reg;
4915 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4916 if (!ParseRes.isSuccess())
4917 return ParseRes;
4918
4919 // Any Kind suffices must match on all regs in the list.
4920 if (Kind != NextKind)
4921 return Error(L: Loc, Msg: "mismatched register size suffix");
4922
4923 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4924 unsigned PrevRegVal =
4925 getContext().getRegisterInfo()->getEncodingValue(Reg: PrevReg);
4926 if (!HasCalculatedStride) {
4927 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4928 : (NumRegs - (PrevRegVal - RegVal));
4929 HasCalculatedStride = true;
4930 }
4931
4932 // Register must be incremental (with a wraparound at last register).
4933 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4934 return Error(L: Loc, Msg: "registers must have the same sequential stride");
4935
4936 PrevReg = Reg;
4937 ++Count;
4938 }
4939 }
4940
4941 if (parseToken(T: AsmToken::RCurly, Msg: "'}' expected"))
4942 return ParseStatus::Failure;
4943
4944 if (Count > 4)
4945 return Error(L: S, Msg: "invalid number of vectors");
4946
4947 unsigned NumElements = 0;
4948 unsigned ElementWidth = 0;
4949 if (!Kind.empty()) {
4950 if (const auto &VK = parseVectorKind(Suffix: Kind, VectorKind))
4951 std::tie(args&: NumElements, args&: ElementWidth) = *VK;
4952 }
4953
4954 Operands.push_back(Elt: AArch64Operand::CreateVectorList(
4955 Reg: FirstReg, Count, Stride, NumElements, ElementWidth, RegisterKind: VectorKind, S,
4956 E: getLoc(), Ctx&: getContext()));
4957
4958 if (getTok().is(K: AsmToken::LBrac)) {
4959 ParseStatus Res = tryParseVectorIndex(Operands);
4960 if (Res.isFailure())
4961 return ParseStatus::Failure;
4962 return ParseStatus::Success;
4963 }
4964
4965 return ParseStatus::Success;
4966}
4967
4968/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4969bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4970 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, ExpectMatch: true);
4971 if (!ParseRes.isSuccess())
4972 return true;
4973
4974 return tryParseVectorIndex(Operands).isFailure();
4975}
4976
4977ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4978 SMLoc StartLoc = getLoc();
4979
4980 MCRegister RegNum;
4981 ParseStatus Res = tryParseScalarRegister(RegNum);
4982 if (!Res.isSuccess())
4983 return Res;
4984
4985 if (!parseOptionalToken(T: AsmToken::Comma)) {
4986 Operands.push_back(Elt: AArch64Operand::CreateReg(
4987 Reg: RegNum, Kind: RegKind::Scalar, S: StartLoc, E: getLoc(), Ctx&: getContext()));
4988 return ParseStatus::Success;
4989 }
4990
4991 parseOptionalToken(T: AsmToken::Hash);
4992
4993 if (getTok().isNot(K: AsmToken::Integer))
4994 return Error(L: getLoc(), Msg: "index must be absent or #0");
4995
4996 const MCExpr *ImmVal;
4997 if (getParser().parseExpression(Res&: ImmVal) || !isa<MCConstantExpr>(Val: ImmVal) ||
4998 cast<MCConstantExpr>(Val: ImmVal)->getValue() != 0)
4999 return Error(L: getLoc(), Msg: "index must be absent or #0");
5000
5001 Operands.push_back(Elt: AArch64Operand::CreateReg(
5002 Reg: RegNum, Kind: RegKind::Scalar, S: StartLoc, E: getLoc(), Ctx&: getContext()));
5003 return ParseStatus::Success;
5004}
5005
5006ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
5007 SMLoc StartLoc = getLoc();
5008 const AsmToken &Tok = getTok();
5009 std::string Name = Tok.getString().lower();
5010
5011 MCRegister Reg = matchRegisterNameAlias(Name, Kind: RegKind::LookupTable);
5012
5013 if (!Reg)
5014 return ParseStatus::NoMatch;
5015
5016 Operands.push_back(Elt: AArch64Operand::CreateReg(
5017 Reg, Kind: RegKind::LookupTable, S: StartLoc, E: getLoc(), Ctx&: getContext()));
5018 Lex(); // Eat register.
5019
5020 // Check if register is followed by an index
5021 if (parseOptionalToken(T: AsmToken::LBrac)) {
5022 Operands.push_back(
5023 Elt: AArch64Operand::CreateToken(Str: "[", S: getLoc(), Ctx&: getContext()));
5024 const MCExpr *ImmVal;
5025 if (getParser().parseExpression(Res&: ImmVal))
5026 return ParseStatus::NoMatch;
5027 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
5028 if (!MCE)
5029 return TokError(Msg: "immediate value expected for vector index");
5030 Operands.push_back(Elt: AArch64Operand::CreateImm(
5031 Val: MCConstantExpr::create(Value: MCE->getValue(), Ctx&: getContext()), S: StartLoc,
5032 E: getLoc(), Ctx&: getContext()));
5033 if (parseOptionalToken(T: AsmToken::Comma))
5034 if (parseOptionalMulOperand(Operands))
5035 return ParseStatus::Failure;
5036 if (parseToken(T: AsmToken::RBrac, Msg: "']' expected"))
5037 return ParseStatus::Failure;
5038 Operands.push_back(
5039 Elt: AArch64Operand::CreateToken(Str: "]", S: getLoc(), Ctx&: getContext()));
5040 }
5041 return ParseStatus::Success;
5042}
5043
5044template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
5045ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
5046 SMLoc StartLoc = getLoc();
5047
5048 MCRegister RegNum;
5049 ParseStatus Res = tryParseScalarRegister(RegNum);
5050 if (!Res.isSuccess())
5051 return Res;
5052
5053 // No shift/extend is the default.
5054 if (!ParseShiftExtend || getTok().isNot(K: AsmToken::Comma)) {
5055 Operands.push_back(Elt: AArch64Operand::CreateReg(
5056 Reg: RegNum, Kind: RegKind::Scalar, S: StartLoc, E: getLoc(), Ctx&: getContext(), EqTy));
5057 return ParseStatus::Success;
5058 }
5059
5060 // Eat the comma
5061 Lex();
5062
5063 // Match the shift
5064 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
5065 Res = tryParseOptionalShiftExtend(Operands&: ExtOpnd);
5066 if (!Res.isSuccess())
5067 return Res;
5068
5069 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
5070 Operands.push_back(Elt: AArch64Operand::CreateReg(
5071 Reg: RegNum, Kind: RegKind::Scalar, S: StartLoc, E: Ext->getEndLoc(), Ctx&: getContext(), EqTy,
5072 ExtTy: Ext->getShiftExtendType(), ShiftAmount: Ext->getShiftExtendAmount(),
5073 HasExplicitAmount: Ext->hasShiftExtendAmount()));
5074
5075 return ParseStatus::Success;
5076}
5077
5078bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
5079 MCAsmParser &Parser = getParser();
5080
5081 // Some SVE instructions have a decoration after the immediate, i.e.
5082 // "mul vl". We parse them here and add tokens, which must be present in the
5083 // asm string in the tablegen instruction.
5084 bool NextIsVL =
5085 Parser.getLexer().peekTok().getString().equals_insensitive(RHS: "vl");
5086 bool NextIsHash = Parser.getLexer().peekTok().is(K: AsmToken::Hash);
5087 if (!getTok().getString().equals_insensitive(RHS: "mul") ||
5088 !(NextIsVL || NextIsHash))
5089 return true;
5090
5091 Operands.push_back(
5092 Elt: AArch64Operand::CreateToken(Str: "mul", S: getLoc(), Ctx&: getContext()));
5093 Lex(); // Eat the "mul"
5094
5095 if (NextIsVL) {
5096 Operands.push_back(
5097 Elt: AArch64Operand::CreateToken(Str: "vl", S: getLoc(), Ctx&: getContext()));
5098 Lex(); // Eat the "vl"
5099 return false;
5100 }
5101
5102 if (NextIsHash) {
5103 Lex(); // Eat the #
5104 SMLoc S = getLoc();
5105
5106 // Parse immediate operand.
5107 const MCExpr *ImmVal;
5108 if (!Parser.parseExpression(Res&: ImmVal))
5109 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal)) {
5110 Operands.push_back(Elt: AArch64Operand::CreateImm(
5111 Val: MCConstantExpr::create(Value: MCE->getValue(), Ctx&: getContext()), S, E: getLoc(),
5112 Ctx&: getContext()));
5113 return false;
5114 }
5115 }
5116
5117 return Error(L: getLoc(), Msg: "expected 'vl' or '#<imm>'");
5118}
5119
5120bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
5121 StringRef &VecGroup) {
5122 MCAsmParser &Parser = getParser();
5123 auto Tok = Parser.getTok();
5124 if (Tok.isNot(K: AsmToken::Identifier))
5125 return true;
5126
5127 StringRef VG = StringSwitch<StringRef>(Tok.getString().lower())
5128 .Case(S: "vgx2", Value: "vgx2")
5129 .Case(S: "vgx4", Value: "vgx4")
5130 .Default(Value: "");
5131
5132 if (VG.empty())
5133 return true;
5134
5135 VecGroup = VG;
5136 Parser.Lex(); // Eat vgx[2|4]
5137 return false;
5138}
5139
5140bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
5141 auto Tok = getTok();
5142 if (Tok.isNot(K: AsmToken::Identifier))
5143 return true;
5144
5145 auto Keyword = Tok.getString();
5146 Keyword = StringSwitch<StringRef>(Keyword.lower())
5147 .Case(S: "sm", Value: "sm")
5148 .Case(S: "za", Value: "za")
5149 .Default(Value: Keyword);
5150 Operands.push_back(
5151 Elt: AArch64Operand::CreateToken(Str: Keyword, S: Tok.getLoc(), Ctx&: getContext()));
5152
5153 Lex();
5154 return false;
5155}
5156
5157/// parseOperand - Parse a arm instruction operand. For now this parses the
5158/// operand regardless of the mnemonic.
5159bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
5160 bool invertCondCode) {
5161 MCAsmParser &Parser = getParser();
5162
5163 ParseStatus ResTy =
5164 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
5165
5166 // Check if the current operand has a custom associated parser, if so, try to
5167 // custom parse the operand, or fallback to the general approach.
5168 if (ResTy.isSuccess())
5169 return false;
5170 // If there wasn't a custom match, try the generic matcher below. Otherwise,
5171 // there was a match, but an error occurred, in which case, just return that
5172 // the operand parsing failed.
5173 if (ResTy.isFailure())
5174 return true;
5175
5176 // Nothing custom, so do general case parsing.
5177 SMLoc S, E;
5178 auto parseOptionalShiftExtend = [&](AsmToken SavedTok) {
5179 if (parseOptionalToken(T: AsmToken::Comma)) {
5180 ParseStatus Res = tryParseOptionalShiftExtend(Operands);
5181 if (!Res.isNoMatch())
5182 return Res.isFailure();
5183 getLexer().UnLex(Token: SavedTok);
5184 }
5185 return false;
5186 };
5187 switch (getLexer().getKind()) {
5188 default: {
5189 SMLoc S = getLoc();
5190 const MCExpr *Expr;
5191 if (parseSymbolicImmVal(ImmVal&: Expr))
5192 return Error(L: S, Msg: "invalid operand");
5193
5194 SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
5195 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: Expr, S, E, Ctx&: getContext()));
5196 return parseOptionalShiftExtend(getTok());
5197 }
5198 case AsmToken::LBrac: {
5199 Operands.push_back(
5200 Elt: AArch64Operand::CreateToken(Str: "[", S: getLoc(), Ctx&: getContext()));
5201 Lex(); // Eat '['
5202
5203 // There's no comma after a '[', so we can parse the next operand
5204 // immediately.
5205 return parseOperand(Operands, isCondCode: false, invertCondCode: false);
5206 }
5207 case AsmToken::LCurly: {
5208 if (!parseNeonVectorList(Operands))
5209 return false;
5210
5211 Operands.push_back(
5212 Elt: AArch64Operand::CreateToken(Str: "{", S: getLoc(), Ctx&: getContext()));
5213 Lex(); // Eat '{'
5214
5215 // There's no comma after a '{', so we can parse the next operand
5216 // immediately.
5217 return parseOperand(Operands, isCondCode: false, invertCondCode: false);
5218 }
5219 case AsmToken::Identifier: {
5220 // See if this is a "VG" decoration used by SME instructions.
5221 StringRef VecGroup;
5222 if (!parseOptionalVGOperand(Operands, VecGroup)) {
5223 Operands.push_back(
5224 Elt: AArch64Operand::CreateToken(Str: VecGroup, S: getLoc(), Ctx&: getContext()));
5225 return false;
5226 }
5227 // If we're expecting a Condition Code operand, then just parse that.
5228 if (isCondCode)
5229 return parseCondCode(Operands, invertCondCode);
5230
5231 // If it's a register name, parse it.
5232 if (!parseRegister(Operands)) {
5233 // Parse an optional shift/extend modifier.
5234 AsmToken SavedTok = getTok();
5235 if (parseOptionalToken(T: AsmToken::Comma)) {
5236 // The operand after the register may be a label (e.g. ADR/ADRP). Check
5237 // such cases and don't report an error when <label> happens to match a
5238 // shift/extend modifier.
5239 ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic,
5240 /*ParseForAllFeatures=*/true);
5241 if (!Res.isNoMatch())
5242 return Res.isFailure();
5243 Res = tryParseOptionalShiftExtend(Operands);
5244 if (!Res.isNoMatch())
5245 return Res.isFailure();
5246 getLexer().UnLex(Token: SavedTok);
5247 }
5248 return false;
5249 }
5250
5251 // See if this is a "mul vl" decoration or "mul #<int>" operand used
5252 // by SVE instructions.
5253 if (!parseOptionalMulOperand(Operands))
5254 return false;
5255
5256 // If this is a two-word mnemonic, parse its special keyword
5257 // operand as an identifier.
5258 if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
5259 Mnemonic == "gcsb")
5260 return parseKeywordOperand(Operands);
5261
5262 // This was not a register so parse other operands that start with an
5263 // identifier (like labels) as expressions and create them as immediates.
5264 const MCExpr *IdVal, *Term;
5265 S = getLoc();
5266 if (getParser().parseExpression(Res&: IdVal))
5267 return true;
5268 if (getParser().parseAtSpecifier(Res&: IdVal, EndLoc&: E))
5269 return true;
5270 std::optional<MCBinaryExpr::Opcode> Opcode;
5271 if (parseOptionalToken(T: AsmToken::Plus))
5272 Opcode = MCBinaryExpr::Add;
5273 else if (parseOptionalToken(T: AsmToken::Minus))
5274 Opcode = MCBinaryExpr::Sub;
5275 if (Opcode) {
5276 if (getParser().parsePrimaryExpr(Res&: Term, EndLoc&: E))
5277 return true;
5278 IdVal = MCBinaryExpr::create(Op: *Opcode, LHS: IdVal, RHS: Term, Ctx&: getContext());
5279 }
5280 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: IdVal, S, E, Ctx&: getContext()));
5281
5282 // Parse an optional shift/extend modifier.
5283 return parseOptionalShiftExtend(getTok());
5284 }
5285 case AsmToken::Integer:
5286 case AsmToken::Real:
5287 case AsmToken::Hash: {
5288 // #42 -> immediate.
5289 S = getLoc();
5290
5291 parseOptionalToken(T: AsmToken::Hash);
5292
5293 // Parse a negative sign
5294 bool isNegative = false;
5295 if (getTok().is(K: AsmToken::Minus)) {
5296 isNegative = true;
5297 // We need to consume this token only when we have a Real, otherwise
5298 // we let parseSymbolicImmVal take care of it
5299 if (Parser.getLexer().peekTok().is(K: AsmToken::Real))
5300 Lex();
5301 }
5302
5303 // The only Real that should come through here is a literal #0.0 for
5304 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
5305 // so convert the value.
5306 const AsmToken &Tok = getTok();
5307 if (Tok.is(K: AsmToken::Real)) {
5308 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
5309 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5310 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
5311 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
5312 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
5313 return TokError(Msg: "unexpected floating point literal");
5314 else if (IntVal != 0 || isNegative)
5315 return TokError(Msg: "expected floating-point constant #0.0");
5316 Lex(); // Eat the token.
5317
5318 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: "#0", S, Ctx&: getContext()));
5319 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: ".0", S, Ctx&: getContext()));
5320 return false;
5321 }
5322
5323 const MCExpr *ImmVal;
5324 if (parseSymbolicImmVal(ImmVal))
5325 return true;
5326
5327 E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
5328 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: ImmVal, S, E, Ctx&: getContext()));
5329
5330 // Parse an optional shift/extend modifier.
5331 return parseOptionalShiftExtend(Tok);
5332 }
5333 case AsmToken::Equal: {
5334 SMLoc Loc = getLoc();
5335 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
5336 return TokError(Msg: "unexpected token in operand");
5337 Lex(); // Eat '='
5338 const MCExpr *SubExprVal;
5339 if (getParser().parseExpression(Res&: SubExprVal))
5340 return true;
5341
5342 if (Operands.size() < 2 ||
5343 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
5344 return Error(L: Loc, Msg: "Only valid when first operand is register");
5345
5346 bool IsXReg =
5347 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5348 Reg: Operands[1]->getReg());
5349
5350 MCContext& Ctx = getContext();
5351 E = SMLoc::getFromPointer(Ptr: Loc.getPointer() - 1);
5352 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
5353 if (isa<MCConstantExpr>(Val: SubExprVal)) {
5354 uint64_t Imm = (cast<MCConstantExpr>(Val: SubExprVal))->getValue();
5355 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
5356 while (Imm > 0xFFFF && llvm::countr_zero(Val: Imm) >= 16) {
5357 ShiftAmt += 16;
5358 Imm >>= 16;
5359 }
5360 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
5361 Operands[0] = AArch64Operand::CreateToken(Str: "movz", S: Loc, Ctx);
5362 Operands.push_back(Elt: AArch64Operand::CreateImm(
5363 Val: MCConstantExpr::create(Value: Imm, Ctx), S, E, Ctx));
5364 if (ShiftAmt)
5365 Operands.push_back(Elt: AArch64Operand::CreateShiftExtend(ShOp: AArch64_AM::LSL,
5366 Val: ShiftAmt, HasExplicitAmount: true, S, E, Ctx));
5367 return false;
5368 }
5369 APInt Simm = APInt(64, Imm << ShiftAmt);
5370 // check if the immediate is an unsigned or signed 32-bit int for W regs
5371 if (!IsXReg && !(Simm.isIntN(N: 32) || Simm.isSignedIntN(N: 32)))
5372 return Error(L: Loc, Msg: "Immediate too large for register");
5373 }
5374 // If it is a label or an imm that cannot fit in a movz, put it into CP.
5375 const MCExpr *CPLoc =
5376 getTargetStreamer().addConstantPoolEntry(SubExprVal, Size: IsXReg ? 8 : 4, Loc);
5377 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: CPLoc, S, E, Ctx));
5378 return false;
5379 }
5380 }
5381}
5382
5383bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
5384 const MCExpr *Expr = nullptr;
5385 SMLoc L = getLoc();
5386 if (check(P: getParser().parseExpression(Res&: Expr), Loc: L, Msg: "expected expression"))
5387 return true;
5388 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Val: Expr);
5389 if (check(P: !Value, Loc: L, Msg: "expected constant expression"))
5390 return true;
5391 Out = Value->getValue();
5392 return false;
5393}
5394
5395bool AArch64AsmParser::parseComma() {
5396 if (check(P: getTok().isNot(K: AsmToken::Comma), Loc: getLoc(), Msg: "expected comma"))
5397 return true;
5398 // Eat the comma
5399 Lex();
5400 return false;
5401}
5402
5403bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
5404 unsigned First, unsigned Last) {
5405 MCRegister Reg;
5406 SMLoc Start, End;
5407 if (check(P: parseRegister(Reg, StartLoc&: Start, EndLoc&: End), Loc: getLoc(), Msg: "expected register"))
5408 return true;
5409
5410 // Special handling for FP and LR; they aren't linearly after x28 in
5411 // the registers enum.
5412 unsigned RangeEnd = Last;
5413 if (Base == AArch64::X0) {
5414 if (Last == AArch64::FP) {
5415 RangeEnd = AArch64::X28;
5416 if (Reg == AArch64::FP) {
5417 Out = 29;
5418 return false;
5419 }
5420 }
5421 if (Last == AArch64::LR) {
5422 RangeEnd = AArch64::X28;
5423 if (Reg == AArch64::FP) {
5424 Out = 29;
5425 return false;
5426 } else if (Reg == AArch64::LR) {
5427 Out = 30;
5428 return false;
5429 }
5430 }
5431 }
5432
5433 if (check(P: Reg < First || Reg > RangeEnd, Loc: Start,
5434 Msg: Twine("expected register in range ") +
5435 AArch64InstPrinter::getRegisterName(Reg: First) + " to " +
5436 AArch64InstPrinter::getRegisterName(Reg: Last)))
5437 return true;
5438 Out = Reg - Base;
5439 return false;
5440}
5441
5442bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1,
5443 const MCParsedAsmOperand &Op2) const {
5444 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
5445 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
5446
5447 if (AOp1.isVectorList() && AOp2.isVectorList())
5448 return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
5449 AOp1.getVectorListStart() == AOp2.getVectorListStart() &&
5450 AOp1.getVectorListStride() == AOp2.getVectorListStride();
5451
5452 if (!AOp1.isReg() || !AOp2.isReg())
5453 return false;
5454
5455 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
5456 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
5457 return MCTargetAsmParser::areEqualRegs(Op1, Op2);
5458
5459 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
5460 "Testing equality of non-scalar registers not supported");
5461
5462 // Check if a registers match their sub/super register classes.
5463 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
5464 return getXRegFromWReg(Reg: Op1.getReg()) == Op2.getReg();
5465 if (AOp1.getRegEqualityTy() == EqualsSubReg)
5466 return getWRegFromXReg(Reg: Op1.getReg()) == Op2.getReg();
5467 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
5468 return getXRegFromWReg(Reg: Op2.getReg()) == Op1.getReg();
5469 if (AOp2.getRegEqualityTy() == EqualsSubReg)
5470 return getWRegFromXReg(Reg: Op2.getReg()) == Op1.getReg();
5471
5472 return false;
5473}
5474
5475/// Parse an AArch64 instruction mnemonic followed by its operands.
5476bool AArch64AsmParser::parseInstruction(ParseInstructionInfo &Info,
5477 StringRef Name, SMLoc NameLoc,
5478 OperandVector &Operands) {
5479 Name = StringSwitch<StringRef>(Name.lower())
5480 .Case(S: "beq", Value: "b.eq")
5481 .Case(S: "bne", Value: "b.ne")
5482 .Case(S: "bhs", Value: "b.hs")
5483 .Case(S: "bcs", Value: "b.cs")
5484 .Case(S: "blo", Value: "b.lo")
5485 .Case(S: "bcc", Value: "b.cc")
5486 .Case(S: "bmi", Value: "b.mi")
5487 .Case(S: "bpl", Value: "b.pl")
5488 .Case(S: "bvs", Value: "b.vs")
5489 .Case(S: "bvc", Value: "b.vc")
5490 .Case(S: "bhi", Value: "b.hi")
5491 .Case(S: "bls", Value: "b.ls")
5492 .Case(S: "bge", Value: "b.ge")
5493 .Case(S: "blt", Value: "b.lt")
5494 .Case(S: "bgt", Value: "b.gt")
5495 .Case(S: "ble", Value: "b.le")
5496 .Case(S: "bal", Value: "b.al")
5497 .Case(S: "bnv", Value: "b.nv")
5498 .Default(Value: Name);
5499
5500 // First check for the AArch64-specific .req directive.
5501 if (getTok().is(K: AsmToken::Identifier) &&
5502 getTok().getIdentifier().lower() == ".req") {
5503 parseDirectiveReq(Name, L: NameLoc);
5504 // We always return 'error' for this, as we're done with this
5505 // statement and don't need to match the 'instruction."
5506 return true;
5507 }
5508
5509 // Create the leading tokens for the mnemonic, split by '.' characters.
5510 size_t Start = 0, Next = Name.find(C: '.');
5511 StringRef Head = Name.slice(Start, End: Next);
5512
5513 // IC, DC, AT, TLBI, MLBI, PLBI, GIC{R}, GSB and Prediction invalidation
5514 // instructions are aliases for the SYS instruction.
5515 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
5516 Head == "cfp" || Head == "dvp" || Head == "cpp" || Head == "cosp" ||
5517 Head == "mlbi" || Head == "plbi" || Head == "gic" || Head == "gsb")
5518 return parseSysAlias(Name: Head, NameLoc, Operands);
5519
5520 // GICR instructions are aliases for the SYSL instruction.
5521 if (Head == "gicr")
5522 return parseSyslAlias(Name: Head, NameLoc, Operands);
5523
5524 // TLBIP instructions are aliases for the SYSP instruction.
5525 if (Head == "tlbip")
5526 return parseSyspAlias(Name: Head, NameLoc, Operands);
5527
5528 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: Head, S: NameLoc, Ctx&: getContext()));
5529 Mnemonic = Head;
5530
5531 // Handle condition codes for a branch mnemonic
5532 if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
5533 Start = Next;
5534 Next = Name.find(C: '.', From: Start + 1);
5535 Head = Name.slice(Start: Start + 1, End: Next);
5536
5537 SMLoc SuffixLoc = SMLoc::getFromPointer(Ptr: NameLoc.getPointer() +
5538 (Head.data() - Name.data()));
5539 std::string Suggestion;
5540 AArch64CC::CondCode CC = parseCondCodeString(Cond: Head, Suggestion);
5541 if (CC == AArch64CC::Invalid) {
5542 std::string Msg = "invalid condition code";
5543 if (!Suggestion.empty())
5544 Msg += ", did you mean " + Suggestion + "?";
5545 return Error(L: SuffixLoc, Msg);
5546 }
5547 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: ".", S: SuffixLoc, Ctx&: getContext(),
5548 /*IsSuffix=*/true));
5549 Operands.push_back(
5550 Elt: AArch64Operand::CreateCondCode(Code: CC, S: NameLoc, E: NameLoc, Ctx&: getContext()));
5551 }
5552
5553 // Add the remaining tokens in the mnemonic.
5554 while (Next != StringRef::npos) {
5555 Start = Next;
5556 Next = Name.find(C: '.', From: Start + 1);
5557 Head = Name.slice(Start, End: Next);
5558 SMLoc SuffixLoc = SMLoc::getFromPointer(Ptr: NameLoc.getPointer() +
5559 (Head.data() - Name.data()) + 1);
5560 Operands.push_back(Elt: AArch64Operand::CreateToken(
5561 Str: Head, S: SuffixLoc, Ctx&: getContext(), /*IsSuffix=*/true));
5562 }
5563
5564 // Conditional compare instructions have a Condition Code operand, which needs
5565 // to be parsed and an immediate operand created.
5566 bool condCodeFourthOperand =
5567 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
5568 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
5569 Head == "csinc" || Head == "csinv" || Head == "csneg");
5570
5571 // These instructions are aliases to some of the conditional select
5572 // instructions. However, the condition code is inverted in the aliased
5573 // instruction.
5574 //
5575 // FIXME: Is this the correct way to handle these? Or should the parser
5576 // generate the aliased instructions directly?
5577 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
5578 bool condCodeThirdOperand =
5579 (Head == "cinc" || Head == "cinv" || Head == "cneg");
5580
5581 // Read the remaining operands.
5582 if (getLexer().isNot(K: AsmToken::EndOfStatement)) {
5583
5584 unsigned N = 1;
5585 do {
5586 // Parse and remember the operand.
5587 if (parseOperand(Operands, isCondCode: (N == 4 && condCodeFourthOperand) ||
5588 (N == 3 && condCodeThirdOperand) ||
5589 (N == 2 && condCodeSecondOperand),
5590 invertCondCode: condCodeSecondOperand || condCodeThirdOperand)) {
5591 return true;
5592 }
5593
5594 // After successfully parsing some operands there are three special cases
5595 // to consider (i.e. notional operands not separated by commas). Two are
5596 // due to memory specifiers:
5597 // + An RBrac will end an address for load/store/prefetch
5598 // + An '!' will indicate a pre-indexed operation.
5599 //
5600 // And a further case is '}', which ends a group of tokens specifying the
5601 // SME accumulator array 'ZA' or tile vector, i.e.
5602 //
5603 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
5604 //
5605 // It's someone else's responsibility to make sure these tokens are sane
5606 // in the given context!
5607
5608 if (parseOptionalToken(T: AsmToken::RBrac))
5609 Operands.push_back(
5610 Elt: AArch64Operand::CreateToken(Str: "]", S: getLoc(), Ctx&: getContext()));
5611 if (parseOptionalToken(T: AsmToken::Exclaim))
5612 Operands.push_back(
5613 Elt: AArch64Operand::CreateToken(Str: "!", S: getLoc(), Ctx&: getContext()));
5614 if (parseOptionalToken(T: AsmToken::RCurly))
5615 Operands.push_back(
5616 Elt: AArch64Operand::CreateToken(Str: "}", S: getLoc(), Ctx&: getContext()));
5617
5618 ++N;
5619 } while (parseOptionalToken(T: AsmToken::Comma));
5620 }
5621
5622 if (parseToken(T: AsmToken::EndOfStatement, Msg: "unexpected token in argument list"))
5623 return true;
5624
5625 return false;
5626}
5627
5628static inline bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg) {
5629 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
5630 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
5631 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
5632 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
5633 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
5634 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
5635 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
5636}
5637
5638// FIXME: This entire function is a giant hack to provide us with decent
5639// operand range validation/diagnostics until TableGen/MC can be extended
5640// to support autogeneration of this kind of validation.
5641bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
5642 SmallVectorImpl<SMLoc> &Loc) {
5643 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5644 const MCInstrDesc &MCID = MII.get(Opcode: Inst.getOpcode());
5645
5646 // A prefix only applies to the instruction following it. Here we extract
5647 // prefix information for the next instruction before validating the current
5648 // one so that in the case of failure we don't erroneously continue using the
5649 // current prefix.
5650 PrefixInfo Prefix = NextPrefix;
5651 NextPrefix = PrefixInfo::CreateFromInst(Inst, TSFlags: MCID.TSFlags);
5652
5653 // Before validating the instruction in isolation we run through the rules
5654 // applicable when it follows a prefix instruction.
5655 // NOTE: brk & hlt can be prefixed but require no additional validation.
5656 if (Prefix.isActive() &&
5657 (Inst.getOpcode() != AArch64::BRK) &&
5658 (Inst.getOpcode() != AArch64::HLT)) {
5659
5660 // Prefixed instructions must have a destructive operand.
5661 if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
5662 AArch64::NotDestructive)
5663 return Error(L: IDLoc, Msg: "instruction is unpredictable when following a"
5664 " movprfx, suggest replacing movprfx with mov");
5665
5666 // Destination operands must match.
5667 if (Inst.getOperand(i: 0).getReg() != Prefix.getDstReg())
5668 return Error(L: Loc[0], Msg: "instruction is unpredictable when following a"
5669 " movprfx writing to a different destination");
5670
5671 // Destination operand must not be used in any other location.
5672 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
5673 if (Inst.getOperand(i).isReg() &&
5674 (MCID.getOperandConstraint(OpNum: i, Constraint: MCOI::TIED_TO) == -1) &&
5675 isMatchingOrAlias(ZReg: Prefix.getDstReg(), Reg: Inst.getOperand(i).getReg()))
5676 return Error(L: Loc[0], Msg: "instruction is unpredictable when following a"
5677 " movprfx and destination also used as non-destructive"
5678 " source");
5679 }
5680
5681 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
5682 if (Prefix.isPredicated()) {
5683 int PgIdx = -1;
5684
5685 // Find the instructions general predicate.
5686 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
5687 if (Inst.getOperand(i).isReg() &&
5688 PPRRegClass.contains(Reg: Inst.getOperand(i).getReg())) {
5689 PgIdx = i;
5690 break;
5691 }
5692
5693 // Instruction must be predicated if the movprfx is predicated.
5694 if (PgIdx == -1 ||
5695 (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
5696 return Error(L: IDLoc, Msg: "instruction is unpredictable when following a"
5697 " predicated movprfx, suggest using unpredicated movprfx");
5698
5699 // Instruction must use same general predicate as the movprfx.
5700 if (Inst.getOperand(i: PgIdx).getReg() != Prefix.getPgReg())
5701 return Error(L: IDLoc, Msg: "instruction is unpredictable when following a"
5702 " predicated movprfx using a different general predicate");
5703
5704 // Instruction element type must match the movprfx.
5705 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
5706 return Error(L: IDLoc, Msg: "instruction is unpredictable when following a"
5707 " predicated movprfx with a different element size");
5708 }
5709 }
5710
5711 // On ARM64EC, only valid registers may be used. Warn against using
5712 // explicitly disallowed registers.
5713 if (IsWindowsArm64EC) {
5714 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) {
5715 if (Inst.getOperand(i).isReg()) {
5716 MCRegister Reg = Inst.getOperand(i).getReg();
5717 // At this point, vector registers are matched to their
5718 // appropriately sized alias.
5719 if ((Reg == AArch64::W13 || Reg == AArch64::X13) ||
5720 (Reg == AArch64::W14 || Reg == AArch64::X14) ||
5721 (Reg == AArch64::W23 || Reg == AArch64::X23) ||
5722 (Reg == AArch64::W24 || Reg == AArch64::X24) ||
5723 (Reg == AArch64::W28 || Reg == AArch64::X28) ||
5724 (Reg >= AArch64::Q16 && Reg <= AArch64::Q31) ||
5725 (Reg >= AArch64::D16 && Reg <= AArch64::D31) ||
5726 (Reg >= AArch64::S16 && Reg <= AArch64::S31) ||
5727 (Reg >= AArch64::H16 && Reg <= AArch64::H31) ||
5728 (Reg >= AArch64::B16 && Reg <= AArch64::B31)) {
5729 Warning(L: IDLoc, Msg: "register " + Twine(RI->getName(RegNo: Reg)) +
5730 " is disallowed on ARM64EC.");
5731 }
5732 }
5733 }
5734 }
5735
5736 // Check for indexed addressing modes w/ the base register being the
5737 // same as a destination/source register or pair load where
5738 // the Rt == Rt2. All of those are undefined behaviour.
5739 switch (Inst.getOpcode()) {
5740 case AArch64::LDPSWpre:
5741 case AArch64::LDPWpost:
5742 case AArch64::LDPWpre:
5743 case AArch64::LDPXpost:
5744 case AArch64::LDPXpre: {
5745 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5746 MCRegister Rt2 = Inst.getOperand(i: 2).getReg();
5747 MCRegister Rn = Inst.getOperand(i: 3).getReg();
5748 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt))
5749 return Error(L: Loc[0], Msg: "unpredictable LDP instruction, writeback base "
5750 "is also a destination");
5751 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt2))
5752 return Error(L: Loc[1], Msg: "unpredictable LDP instruction, writeback base "
5753 "is also a destination");
5754 [[fallthrough]];
5755 }
5756 case AArch64::LDR_ZA:
5757 case AArch64::STR_ZA: {
5758 if (Inst.getOperand(i: 2).isImm() && Inst.getOperand(i: 4).isImm() &&
5759 Inst.getOperand(i: 2).getImm() != Inst.getOperand(i: 4).getImm())
5760 return Error(L: Loc[1],
5761 Msg: "unpredictable instruction, immediate and offset mismatch.");
5762 break;
5763 }
5764 case AArch64::LDPDi:
5765 case AArch64::LDPQi:
5766 case AArch64::LDPSi:
5767 case AArch64::LDPSWi:
5768 case AArch64::LDPWi:
5769 case AArch64::LDPXi: {
5770 MCRegister Rt = Inst.getOperand(i: 0).getReg();
5771 MCRegister Rt2 = Inst.getOperand(i: 1).getReg();
5772 if (Rt == Rt2)
5773 return Error(L: Loc[1], Msg: "unpredictable LDP instruction, Rt2==Rt");
5774 break;
5775 }
5776 case AArch64::LDPDpost:
5777 case AArch64::LDPDpre:
5778 case AArch64::LDPQpost:
5779 case AArch64::LDPQpre:
5780 case AArch64::LDPSpost:
5781 case AArch64::LDPSpre:
5782 case AArch64::LDPSWpost: {
5783 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5784 MCRegister Rt2 = Inst.getOperand(i: 2).getReg();
5785 if (Rt == Rt2)
5786 return Error(L: Loc[1], Msg: "unpredictable LDP instruction, Rt2==Rt");
5787 break;
5788 }
5789 case AArch64::STPDpost:
5790 case AArch64::STPDpre:
5791 case AArch64::STPQpost:
5792 case AArch64::STPQpre:
5793 case AArch64::STPSpost:
5794 case AArch64::STPSpre:
5795 case AArch64::STPWpost:
5796 case AArch64::STPWpre:
5797 case AArch64::STPXpost:
5798 case AArch64::STPXpre: {
5799 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5800 MCRegister Rt2 = Inst.getOperand(i: 2).getReg();
5801 MCRegister Rn = Inst.getOperand(i: 3).getReg();
5802 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt))
5803 return Error(L: Loc[0], Msg: "unpredictable STP instruction, writeback base "
5804 "is also a source");
5805 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt2))
5806 return Error(L: Loc[1], Msg: "unpredictable STP instruction, writeback base "
5807 "is also a source");
5808 break;
5809 }
5810 case AArch64::LDRBBpre:
5811 case AArch64::LDRBpre:
5812 case AArch64::LDRHHpre:
5813 case AArch64::LDRHpre:
5814 case AArch64::LDRSBWpre:
5815 case AArch64::LDRSBXpre:
5816 case AArch64::LDRSHWpre:
5817 case AArch64::LDRSHXpre:
5818 case AArch64::LDRSWpre:
5819 case AArch64::LDRWpre:
5820 case AArch64::LDRXpre:
5821 case AArch64::LDRBBpost:
5822 case AArch64::LDRBpost:
5823 case AArch64::LDRHHpost:
5824 case AArch64::LDRHpost:
5825 case AArch64::LDRSBWpost:
5826 case AArch64::LDRSBXpost:
5827 case AArch64::LDRSHWpost:
5828 case AArch64::LDRSHXpost:
5829 case AArch64::LDRSWpost:
5830 case AArch64::LDRWpost:
5831 case AArch64::LDRXpost: {
5832 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5833 MCRegister Rn = Inst.getOperand(i: 2).getReg();
5834 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt))
5835 return Error(L: Loc[0], Msg: "unpredictable LDR instruction, writeback base "
5836 "is also a source");
5837 break;
5838 }
5839 case AArch64::STRBBpost:
5840 case AArch64::STRBpost:
5841 case AArch64::STRHHpost:
5842 case AArch64::STRHpost:
5843 case AArch64::STRWpost:
5844 case AArch64::STRXpost:
5845 case AArch64::STRBBpre:
5846 case AArch64::STRBpre:
5847 case AArch64::STRHHpre:
5848 case AArch64::STRHpre:
5849 case AArch64::STRWpre:
5850 case AArch64::STRXpre: {
5851 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5852 MCRegister Rn = Inst.getOperand(i: 2).getReg();
5853 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt))
5854 return Error(L: Loc[0], Msg: "unpredictable STR instruction, writeback base "
5855 "is also a source");
5856 break;
5857 }
5858 case AArch64::STXRB:
5859 case AArch64::STXRH:
5860 case AArch64::STXRW:
5861 case AArch64::STXRX:
5862 case AArch64::STLXRB:
5863 case AArch64::STLXRH:
5864 case AArch64::STLXRW:
5865 case AArch64::STLXRX: {
5866 MCRegister Rs = Inst.getOperand(i: 0).getReg();
5867 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5868 MCRegister Rn = Inst.getOperand(i: 2).getReg();
5869 if (RI->isSubRegisterEq(RegA: Rt, RegB: Rs) ||
5870 (RI->isSubRegisterEq(RegA: Rn, RegB: Rs) && Rn != AArch64::SP))
5871 return Error(L: Loc[0],
5872 Msg: "unpredictable STXR instruction, status is also a source");
5873 break;
5874 }
5875 case AArch64::STXPW:
5876 case AArch64::STXPX:
5877 case AArch64::STLXPW:
5878 case AArch64::STLXPX: {
5879 MCRegister Rs = Inst.getOperand(i: 0).getReg();
5880 MCRegister Rt1 = Inst.getOperand(i: 1).getReg();
5881 MCRegister Rt2 = Inst.getOperand(i: 2).getReg();
5882 MCRegister Rn = Inst.getOperand(i: 3).getReg();
5883 if (RI->isSubRegisterEq(RegA: Rt1, RegB: Rs) || RI->isSubRegisterEq(RegA: Rt2, RegB: Rs) ||
5884 (RI->isSubRegisterEq(RegA: Rn, RegB: Rs) && Rn != AArch64::SP))
5885 return Error(L: Loc[0],
5886 Msg: "unpredictable STXP instruction, status is also a source");
5887 break;
5888 }
5889 case AArch64::LDRABwriteback:
5890 case AArch64::LDRAAwriteback: {
5891 MCRegister Xt = Inst.getOperand(i: 0).getReg();
5892 MCRegister Xn = Inst.getOperand(i: 1).getReg();
5893 if (Xt == Xn)
5894 return Error(L: Loc[0],
5895 Msg: "unpredictable LDRA instruction, writeback base"
5896 " is also a destination");
5897 break;
5898 }
5899 }
5900
5901 // Check v8.8-A memops instructions.
5902 switch (Inst.getOpcode()) {
5903 case AArch64::CPYFP:
5904 case AArch64::CPYFPWN:
5905 case AArch64::CPYFPRN:
5906 case AArch64::CPYFPN:
5907 case AArch64::CPYFPWT:
5908 case AArch64::CPYFPWTWN:
5909 case AArch64::CPYFPWTRN:
5910 case AArch64::CPYFPWTN:
5911 case AArch64::CPYFPRT:
5912 case AArch64::CPYFPRTWN:
5913 case AArch64::CPYFPRTRN:
5914 case AArch64::CPYFPRTN:
5915 case AArch64::CPYFPT:
5916 case AArch64::CPYFPTWN:
5917 case AArch64::CPYFPTRN:
5918 case AArch64::CPYFPTN:
5919 case AArch64::CPYFM:
5920 case AArch64::CPYFMWN:
5921 case AArch64::CPYFMRN:
5922 case AArch64::CPYFMN:
5923 case AArch64::CPYFMWT:
5924 case AArch64::CPYFMWTWN:
5925 case AArch64::CPYFMWTRN:
5926 case AArch64::CPYFMWTN:
5927 case AArch64::CPYFMRT:
5928 case AArch64::CPYFMRTWN:
5929 case AArch64::CPYFMRTRN:
5930 case AArch64::CPYFMRTN:
5931 case AArch64::CPYFMT:
5932 case AArch64::CPYFMTWN:
5933 case AArch64::CPYFMTRN:
5934 case AArch64::CPYFMTN:
5935 case AArch64::CPYFE:
5936 case AArch64::CPYFEWN:
5937 case AArch64::CPYFERN:
5938 case AArch64::CPYFEN:
5939 case AArch64::CPYFEWT:
5940 case AArch64::CPYFEWTWN:
5941 case AArch64::CPYFEWTRN:
5942 case AArch64::CPYFEWTN:
5943 case AArch64::CPYFERT:
5944 case AArch64::CPYFERTWN:
5945 case AArch64::CPYFERTRN:
5946 case AArch64::CPYFERTN:
5947 case AArch64::CPYFET:
5948 case AArch64::CPYFETWN:
5949 case AArch64::CPYFETRN:
5950 case AArch64::CPYFETN:
5951 case AArch64::CPYP:
5952 case AArch64::CPYPWN:
5953 case AArch64::CPYPRN:
5954 case AArch64::CPYPN:
5955 case AArch64::CPYPWT:
5956 case AArch64::CPYPWTWN:
5957 case AArch64::CPYPWTRN:
5958 case AArch64::CPYPWTN:
5959 case AArch64::CPYPRT:
5960 case AArch64::CPYPRTWN:
5961 case AArch64::CPYPRTRN:
5962 case AArch64::CPYPRTN:
5963 case AArch64::CPYPT:
5964 case AArch64::CPYPTWN:
5965 case AArch64::CPYPTRN:
5966 case AArch64::CPYPTN:
5967 case AArch64::CPYM:
5968 case AArch64::CPYMWN:
5969 case AArch64::CPYMRN:
5970 case AArch64::CPYMN:
5971 case AArch64::CPYMWT:
5972 case AArch64::CPYMWTWN:
5973 case AArch64::CPYMWTRN:
5974 case AArch64::CPYMWTN:
5975 case AArch64::CPYMRT:
5976 case AArch64::CPYMRTWN:
5977 case AArch64::CPYMRTRN:
5978 case AArch64::CPYMRTN:
5979 case AArch64::CPYMT:
5980 case AArch64::CPYMTWN:
5981 case AArch64::CPYMTRN:
5982 case AArch64::CPYMTN:
5983 case AArch64::CPYE:
5984 case AArch64::CPYEWN:
5985 case AArch64::CPYERN:
5986 case AArch64::CPYEN:
5987 case AArch64::CPYEWT:
5988 case AArch64::CPYEWTWN:
5989 case AArch64::CPYEWTRN:
5990 case AArch64::CPYEWTN:
5991 case AArch64::CPYERT:
5992 case AArch64::CPYERTWN:
5993 case AArch64::CPYERTRN:
5994 case AArch64::CPYERTN:
5995 case AArch64::CPYET:
5996 case AArch64::CPYETWN:
5997 case AArch64::CPYETRN:
5998 case AArch64::CPYETN: {
5999 // Xd_wb == op0, Xs_wb == op1, Xn_wb == op2
6000 MCRegister Xd = Inst.getOperand(i: 3).getReg();
6001 MCRegister Xs = Inst.getOperand(i: 4).getReg();
6002 MCRegister Xn = Inst.getOperand(i: 5).getReg();
6003
6004 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6005 assert(Xs == Inst.getOperand(1).getReg() && "Xs_wb and Xs do not match");
6006 assert(Xn == Inst.getOperand(2).getReg() && "Xn_wb and Xn do not match");
6007
6008 if (Xd == Xs)
6009 return Error(L: Loc[0], Msg: "invalid CPY instruction, destination and source"
6010 " registers are the same");
6011 if (Xd == Xn)
6012 return Error(L: Loc[0], Msg: "invalid CPY instruction, destination and size"
6013 " registers are the same");
6014 if (Xs == Xn)
6015 return Error(L: Loc[0], Msg: "invalid CPY instruction, source and size"
6016 " registers are the same");
6017 break;
6018 }
6019 case AArch64::SETP:
6020 case AArch64::SETPT:
6021 case AArch64::SETPN:
6022 case AArch64::SETPTN:
6023 case AArch64::SETM:
6024 case AArch64::SETMT:
6025 case AArch64::SETMN:
6026 case AArch64::SETMTN:
6027 case AArch64::SETE:
6028 case AArch64::SETET:
6029 case AArch64::SETEN:
6030 case AArch64::SETETN:
6031 case AArch64::SETGP:
6032 case AArch64::SETGPT:
6033 case AArch64::SETGPN:
6034 case AArch64::SETGPTN:
6035 case AArch64::SETGM:
6036 case AArch64::SETGMT:
6037 case AArch64::SETGMN:
6038 case AArch64::SETGMTN:
6039 case AArch64::MOPSSETGE:
6040 case AArch64::MOPSSETGET:
6041 case AArch64::MOPSSETGEN:
6042 case AArch64::MOPSSETGETN: {
6043 // Xd_wb == op0, Xn_wb == op1
6044 MCRegister Xd = Inst.getOperand(i: 2).getReg();
6045 MCRegister Xn = Inst.getOperand(i: 3).getReg();
6046 MCRegister Xm = Inst.getOperand(i: 4).getReg();
6047
6048 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6049 assert(Xn == Inst.getOperand(1).getReg() && "Xn_wb and Xn do not match");
6050
6051 if (Xd == Xn)
6052 return Error(L: Loc[0], Msg: "invalid SET instruction, destination and size"
6053 " registers are the same");
6054 if (Xd == Xm)
6055 return Error(L: Loc[0], Msg: "invalid SET instruction, destination and source"
6056 " registers are the same");
6057 if (Xn == Xm)
6058 return Error(L: Loc[0], Msg: "invalid SET instruction, source and size"
6059 " registers are the same");
6060 break;
6061 }
6062 case AArch64::SETGOP:
6063 case AArch64::SETGOPT:
6064 case AArch64::SETGOPN:
6065 case AArch64::SETGOPTN:
6066 case AArch64::SETGOM:
6067 case AArch64::SETGOMT:
6068 case AArch64::SETGOMN:
6069 case AArch64::SETGOMTN:
6070 case AArch64::SETGOE:
6071 case AArch64::SETGOET:
6072 case AArch64::SETGOEN:
6073 case AArch64::SETGOETN: {
6074 // Xd_wb == op0, Xn_wb == op1
6075 MCRegister Xd = Inst.getOperand(i: 2).getReg();
6076 MCRegister Xn = Inst.getOperand(i: 3).getReg();
6077
6078 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6079 assert(Xn == Inst.getOperand(1).getReg() && "Xn_wb and Xn do not match");
6080
6081 if (Xd == Xn)
6082 return Error(L: Loc[0], Msg: "invalid SET instruction, destination and size"
6083 " registers are the same");
6084 break;
6085 }
6086 }
6087
6088 // Now check immediate ranges. Separate from the above as there is overlap
6089 // in the instructions being checked and this keeps the nested conditionals
6090 // to a minimum.
6091 switch (Inst.getOpcode()) {
6092 case AArch64::ADDSWri:
6093 case AArch64::ADDSXri:
6094 case AArch64::ADDWri:
6095 case AArch64::ADDXri:
6096 case AArch64::SUBSWri:
6097 case AArch64::SUBSXri:
6098 case AArch64::SUBWri:
6099 case AArch64::SUBXri: {
6100 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
6101 // some slight duplication here.
6102 if (Inst.getOperand(i: 2).isExpr()) {
6103 const MCExpr *Expr = Inst.getOperand(i: 2).getExpr();
6104 AArch64::Specifier ELFSpec;
6105 AArch64::Specifier DarwinSpec;
6106 int64_t Addend;
6107 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
6108
6109 // Only allow these with ADDXri.
6110 if ((DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
6111 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF) &&
6112 Inst.getOpcode() == AArch64::ADDXri)
6113 return false;
6114
6115 // Only allow these with ADDXri/ADDWri
6116 if (llvm::is_contained(
6117 Set: {AArch64::S_LO12, AArch64::S_GOT_AUTH_LO12,
6118 AArch64::S_DTPREL_HI12, AArch64::S_DTPREL_LO12,
6119 AArch64::S_DTPREL_LO12_NC, AArch64::S_TPREL_HI12,
6120 AArch64::S_TPREL_LO12, AArch64::S_TPREL_LO12_NC,
6121 AArch64::S_TLSDESC_LO12, AArch64::S_TLSDESC_AUTH_LO12,
6122 AArch64::S_SECREL_LO12, AArch64::S_SECREL_HI12},
6123 Element: ELFSpec) &&
6124 (Inst.getOpcode() == AArch64::ADDXri ||
6125 Inst.getOpcode() == AArch64::ADDWri))
6126 return false;
6127
6128 // Don't allow symbol refs in the immediate field otherwise
6129 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
6130 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
6131 // 'cmp w0, 'borked')
6132 return Error(L: Loc.back(), Msg: "invalid immediate expression");
6133 }
6134 // We don't validate more complex expressions here
6135 }
6136 return false;
6137 }
6138 default:
6139 return false;
6140 }
6141}
6142
6143static std::string AArch64MnemonicSpellCheck(StringRef S,
6144 const FeatureBitset &FBS,
6145 unsigned VariantID = 0);
6146
6147bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
6148 uint64_t ErrorInfo,
6149 OperandVector &Operands) {
6150 switch (ErrCode) {
6151 case Match_InvalidTiedOperand: {
6152 auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]);
6153 if (Op.isVectorList())
6154 return Error(L: Loc, Msg: "operand must match destination register list");
6155
6156 assert(Op.isReg() && "Unexpected operand type");
6157 switch (Op.getRegEqualityTy()) {
6158 case RegConstraintEqualityTy::EqualsSubReg:
6159 return Error(L: Loc, Msg: "operand must be 64-bit form of destination register");
6160 case RegConstraintEqualityTy::EqualsSuperReg:
6161 return Error(L: Loc, Msg: "operand must be 32-bit form of destination register");
6162 case RegConstraintEqualityTy::EqualsReg:
6163 return Error(L: Loc, Msg: "operand must match destination register");
6164 }
6165 llvm_unreachable("Unknown RegConstraintEqualityTy");
6166 }
6167 case Match_MissingFeature:
6168 return Error(L: Loc,
6169 Msg: "instruction requires a CPU feature not currently enabled");
6170 case Match_InvalidOperand:
6171 return Error(L: Loc, Msg: "invalid operand for instruction");
6172 case Match_InvalidSuffix:
6173 return Error(L: Loc, Msg: "invalid type suffix for instruction");
6174 case Match_InvalidCondCode:
6175 return Error(L: Loc, Msg: "expected AArch64 condition code");
6176 case Match_AddSubRegExtendSmall:
6177 return Error(L: Loc,
6178 Msg: "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
6179 case Match_AddSubRegExtendLarge:
6180 return Error(L: Loc,
6181 Msg: "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
6182 case Match_AddSubSecondSource:
6183 return Error(L: Loc,
6184 Msg: "expected compatible register, symbol or integer in range [0, 4095]");
6185 case Match_LogicalSecondSource:
6186 return Error(L: Loc, Msg: "expected compatible register or logical immediate");
6187 case Match_InvalidMovImm32Shift:
6188 return Error(L: Loc, Msg: "expected 'lsl' with optional integer 0 or 16");
6189 case Match_InvalidMovImm64Shift:
6190 return Error(L: Loc, Msg: "expected 'lsl' with optional integer 0, 16, 32 or 48");
6191 case Match_AddSubRegShift32:
6192 return Error(L: Loc,
6193 Msg: "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
6194 case Match_AddSubRegShift64:
6195 return Error(L: Loc,
6196 Msg: "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
6197 case Match_InvalidFPImm:
6198 return Error(L: Loc,
6199 Msg: "expected compatible register or floating-point constant");
6200 case Match_InvalidMemoryIndexedSImm6:
6201 return Error(L: Loc, Msg: "index must be an integer in range [-32, 31].");
6202 case Match_InvalidMemoryIndexedSImm5:
6203 return Error(L: Loc, Msg: "index must be an integer in range [-16, 15].");
6204 case Match_InvalidMemoryIndexed1SImm4:
6205 return Error(L: Loc, Msg: "index must be an integer in range [-8, 7].");
6206 case Match_InvalidMemoryIndexed2SImm4:
6207 return Error(L: Loc, Msg: "index must be a multiple of 2 in range [-16, 14].");
6208 case Match_InvalidMemoryIndexed3SImm4:
6209 return Error(L: Loc, Msg: "index must be a multiple of 3 in range [-24, 21].");
6210 case Match_InvalidMemoryIndexed4SImm4:
6211 return Error(L: Loc, Msg: "index must be a multiple of 4 in range [-32, 28].");
6212 case Match_InvalidMemoryIndexed16SImm4:
6213 return Error(L: Loc, Msg: "index must be a multiple of 16 in range [-128, 112].");
6214 case Match_InvalidMemoryIndexed32SImm4:
6215 return Error(L: Loc, Msg: "index must be a multiple of 32 in range [-256, 224].");
6216 case Match_InvalidMemoryIndexed1SImm6:
6217 return Error(L: Loc, Msg: "index must be an integer in range [-32, 31].");
6218 case Match_InvalidMemoryIndexedSImm8:
6219 return Error(L: Loc, Msg: "index must be an integer in range [-128, 127].");
6220 case Match_InvalidMemoryIndexedSImm9:
6221 return Error(L: Loc, Msg: "index must be an integer in range [-256, 255].");
6222 case Match_InvalidMemoryIndexed16SImm9:
6223 return Error(L: Loc, Msg: "index must be a multiple of 16 in range [-4096, 4080].");
6224 case Match_InvalidMemoryIndexed8SImm10:
6225 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [-4096, 4088].");
6226 case Match_InvalidMemoryIndexed4SImm7:
6227 return Error(L: Loc, Msg: "index must be a multiple of 4 in range [-256, 252].");
6228 case Match_InvalidMemoryIndexed8SImm7:
6229 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [-512, 504].");
6230 case Match_InvalidMemoryIndexed16SImm7:
6231 return Error(L: Loc, Msg: "index must be a multiple of 16 in range [-1024, 1008].");
6232 case Match_InvalidMemoryIndexed8UImm5:
6233 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [0, 248].");
6234 case Match_InvalidMemoryIndexed8UImm3:
6235 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [0, 56].");
6236 case Match_InvalidMemoryIndexed4UImm5:
6237 return Error(L: Loc, Msg: "index must be a multiple of 4 in range [0, 124].");
6238 case Match_InvalidMemoryIndexed2UImm5:
6239 return Error(L: Loc, Msg: "index must be a multiple of 2 in range [0, 62].");
6240 case Match_InvalidMemoryIndexed8UImm6:
6241 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [0, 504].");
6242 case Match_InvalidMemoryIndexed16UImm6:
6243 return Error(L: Loc, Msg: "index must be a multiple of 16 in range [0, 1008].");
6244 case Match_InvalidMemoryIndexed4UImm6:
6245 return Error(L: Loc, Msg: "index must be a multiple of 4 in range [0, 252].");
6246 case Match_InvalidMemoryIndexed2UImm6:
6247 return Error(L: Loc, Msg: "index must be a multiple of 2 in range [0, 126].");
6248 case Match_InvalidMemoryIndexed1UImm6:
6249 return Error(L: Loc, Msg: "index must be in range [0, 63].");
6250 case Match_InvalidMemoryWExtend8:
6251 return Error(L: Loc,
6252 Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0");
6253 case Match_InvalidMemoryWExtend16:
6254 return Error(L: Loc,
6255 Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
6256 case Match_InvalidMemoryWExtend32:
6257 return Error(L: Loc,
6258 Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
6259 case Match_InvalidMemoryWExtend64:
6260 return Error(L: Loc,
6261 Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
6262 case Match_InvalidMemoryWExtend128:
6263 return Error(L: Loc,
6264 Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
6265 case Match_InvalidMemoryXExtend8:
6266 return Error(L: Loc,
6267 Msg: "expected 'lsl' or 'sxtx' with optional shift of #0");
6268 case Match_InvalidMemoryXExtend16:
6269 return Error(L: Loc,
6270 Msg: "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
6271 case Match_InvalidMemoryXExtend32:
6272 return Error(L: Loc,
6273 Msg: "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
6274 case Match_InvalidMemoryXExtend64:
6275 return Error(L: Loc,
6276 Msg: "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
6277 case Match_InvalidMemoryXExtend128:
6278 return Error(L: Loc,
6279 Msg: "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
6280 case Match_InvalidMemoryIndexed1:
6281 return Error(L: Loc, Msg: "index must be an integer in range [0, 4095].");
6282 case Match_InvalidMemoryIndexed2:
6283 return Error(L: Loc, Msg: "index must be a multiple of 2 in range [0, 8190].");
6284 case Match_InvalidMemoryIndexed4:
6285 return Error(L: Loc, Msg: "index must be a multiple of 4 in range [0, 16380].");
6286 case Match_InvalidMemoryIndexed8:
6287 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [0, 32760].");
6288 case Match_InvalidMemoryIndexed16:
6289 return Error(L: Loc, Msg: "index must be a multiple of 16 in range [0, 65520].");
6290 case Match_InvalidImm0_0:
6291 return Error(L: Loc, Msg: "immediate must be 0.");
6292 case Match_InvalidImm0_1:
6293 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 1].");
6294 case Match_InvalidImm0_3:
6295 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 3].");
6296 case Match_InvalidImm0_7:
6297 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 7].");
6298 case Match_InvalidImm0_15:
6299 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 15].");
6300 case Match_InvalidImm0_31:
6301 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 31].");
6302 case Match_InvalidImm0_63:
6303 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 63].");
6304 case Match_InvalidImm0_127:
6305 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 127].");
6306 case Match_InvalidImm0_255:
6307 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 255].");
6308 case Match_InvalidImm0_65535:
6309 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 65535].");
6310 case Match_InvalidImm1_8:
6311 return Error(L: Loc, Msg: "immediate must be an integer in range [1, 8].");
6312 case Match_InvalidImm1_16:
6313 return Error(L: Loc, Msg: "immediate must be an integer in range [1, 16].");
6314 case Match_InvalidImm1_32:
6315 return Error(L: Loc, Msg: "immediate must be an integer in range [1, 32].");
6316 case Match_InvalidImm1_64:
6317 return Error(L: Loc, Msg: "immediate must be an integer in range [1, 64].");
6318 case Match_InvalidImmM1_62:
6319 return Error(L: Loc, Msg: "immediate must be an integer in range [-1, 62].");
6320 case Match_InvalidMemoryIndexedRange2UImm0:
6321 return Error(L: Loc, Msg: "vector select offset must be the immediate range 0:1.");
6322 case Match_InvalidMemoryIndexedRange2UImm1:
6323 return Error(L: Loc, Msg: "vector select offset must be an immediate range of the "
6324 "form <immf>:<imml>, where the first "
6325 "immediate is a multiple of 2 in the range [0, 2], and "
6326 "the second immediate is immf + 1.");
6327 case Match_InvalidMemoryIndexedRange2UImm2:
6328 case Match_InvalidMemoryIndexedRange2UImm3:
6329 return Error(
6330 L: Loc,
6331 Msg: "vector select offset must be an immediate range of the form "
6332 "<immf>:<imml>, "
6333 "where the first immediate is a multiple of 2 in the range [0, 6] or "
6334 "[0, 14] "
6335 "depending on the instruction, and the second immediate is immf + 1.");
6336 case Match_InvalidMemoryIndexedRange4UImm0:
6337 return Error(L: Loc, Msg: "vector select offset must be the immediate range 0:3.");
6338 case Match_InvalidMemoryIndexedRange4UImm1:
6339 case Match_InvalidMemoryIndexedRange4UImm2:
6340 return Error(
6341 L: Loc,
6342 Msg: "vector select offset must be an immediate range of the form "
6343 "<immf>:<imml>, "
6344 "where the first immediate is a multiple of 4 in the range [0, 4] or "
6345 "[0, 12] "
6346 "depending on the instruction, and the second immediate is immf + 3.");
6347 case Match_InvalidSVEAddSubImm8:
6348 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 255]"
6349 " with a shift amount of 0");
6350 case Match_InvalidSVEAddSubImm16:
6351 case Match_InvalidSVEAddSubImm32:
6352 case Match_InvalidSVEAddSubImm64:
6353 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 255] or a "
6354 "multiple of 256 in range [256, 65280]");
6355 case Match_InvalidSVECpyImm8:
6356 return Error(L: Loc, Msg: "immediate must be an integer in range [-128, 255]"
6357 " with a shift amount of 0");
6358 case Match_InvalidSVECpyImm16:
6359 return Error(L: Loc, Msg: "immediate must be an integer in range [-128, 127] or a "
6360 "multiple of 256 in range [-32768, 65280]");
6361 case Match_InvalidSVECpyImm32:
6362 case Match_InvalidSVECpyImm64:
6363 return Error(L: Loc, Msg: "immediate must be an integer in range [-128, 127] or a "
6364 "multiple of 256 in range [-32768, 32512]");
6365 case Match_InvalidIndexRange0_0:
6366 return Error(L: Loc, Msg: "expected lane specifier '[0]'");
6367 case Match_InvalidIndexRange1_1:
6368 return Error(L: Loc, Msg: "expected lane specifier '[1]'");
6369 case Match_InvalidIndexRange0_15:
6370 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 15].");
6371 case Match_InvalidIndexRange0_7:
6372 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 7].");
6373 case Match_InvalidIndexRange0_3:
6374 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 3].");
6375 case Match_InvalidIndexRange0_1:
6376 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 1].");
6377 case Match_InvalidSVEIndexRange0_63:
6378 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 63].");
6379 case Match_InvalidSVEIndexRange0_31:
6380 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 31].");
6381 case Match_InvalidSVEIndexRange0_15:
6382 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 15].");
6383 case Match_InvalidSVEIndexRange0_7:
6384 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 7].");
6385 case Match_InvalidSVEIndexRange0_3:
6386 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 3].");
6387 case Match_InvalidLabel:
6388 return Error(L: Loc, Msg: "expected label or encodable integer pc offset");
6389 case Match_MRS:
6390 return Error(L: Loc, Msg: "expected readable system register");
6391 case Match_MSR:
6392 case Match_InvalidSVCR:
6393 return Error(L: Loc, Msg: "expected writable system register or pstate");
6394 case Match_InvalidComplexRotationEven:
6395 return Error(L: Loc, Msg: "complex rotation must be 0, 90, 180 or 270.");
6396 case Match_InvalidComplexRotationOdd:
6397 return Error(L: Loc, Msg: "complex rotation must be 90 or 270.");
6398 case Match_MnemonicFail: {
6399 std::string Suggestion = AArch64MnemonicSpellCheck(
6400 S: ((AArch64Operand &)*Operands[0]).getToken(),
6401 FBS: ComputeAvailableFeatures(FB: STI->getFeatureBits()));
6402 return Error(L: Loc, Msg: "unrecognized instruction mnemonic" + Suggestion);
6403 }
6404 case Match_InvalidGPR64shifted8:
6405 return Error(L: Loc, Msg: "register must be x0..x30 or xzr, without shift");
6406 case Match_InvalidGPR64shifted16:
6407 return Error(L: Loc, Msg: "register must be x0..x30 or xzr, with required shift 'lsl #1'");
6408 case Match_InvalidGPR64shifted32:
6409 return Error(L: Loc, Msg: "register must be x0..x30 or xzr, with required shift 'lsl #2'");
6410 case Match_InvalidGPR64shifted64:
6411 return Error(L: Loc, Msg: "register must be x0..x30 or xzr, with required shift 'lsl #3'");
6412 case Match_InvalidGPR64shifted128:
6413 return Error(
6414 L: Loc, Msg: "register must be x0..x30 or xzr, with required shift 'lsl #4'");
6415 case Match_InvalidGPR64NoXZRshifted8:
6416 return Error(L: Loc, Msg: "register must be x0..x30 without shift");
6417 case Match_InvalidGPR64NoXZRshifted16:
6418 return Error(L: Loc, Msg: "register must be x0..x30 with required shift 'lsl #1'");
6419 case Match_InvalidGPR64NoXZRshifted32:
6420 return Error(L: Loc, Msg: "register must be x0..x30 with required shift 'lsl #2'");
6421 case Match_InvalidGPR64NoXZRshifted64:
6422 return Error(L: Loc, Msg: "register must be x0..x30 with required shift 'lsl #3'");
6423 case Match_InvalidGPR64NoXZRshifted128:
6424 return Error(L: Loc, Msg: "register must be x0..x30 with required shift 'lsl #4'");
6425 case Match_InvalidZPR32UXTW8:
6426 case Match_InvalidZPR32SXTW8:
6427 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
6428 case Match_InvalidZPR32UXTW16:
6429 case Match_InvalidZPR32SXTW16:
6430 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
6431 case Match_InvalidZPR32UXTW32:
6432 case Match_InvalidZPR32SXTW32:
6433 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
6434 case Match_InvalidZPR32UXTW64:
6435 case Match_InvalidZPR32SXTW64:
6436 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
6437 case Match_InvalidZPR64UXTW8:
6438 case Match_InvalidZPR64SXTW8:
6439 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
6440 case Match_InvalidZPR64UXTW16:
6441 case Match_InvalidZPR64SXTW16:
6442 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
6443 case Match_InvalidZPR64UXTW32:
6444 case Match_InvalidZPR64SXTW32:
6445 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
6446 case Match_InvalidZPR64UXTW64:
6447 case Match_InvalidZPR64SXTW64:
6448 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
6449 case Match_InvalidZPR32LSL8:
6450 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s'");
6451 case Match_InvalidZPR32LSL16:
6452 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
6453 case Match_InvalidZPR32LSL32:
6454 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
6455 case Match_InvalidZPR32LSL64:
6456 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
6457 case Match_InvalidZPR64LSL8:
6458 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d'");
6459 case Match_InvalidZPR64LSL16:
6460 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
6461 case Match_InvalidZPR64LSL32:
6462 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
6463 case Match_InvalidZPR64LSL64:
6464 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
6465 case Match_InvalidZPR0:
6466 return Error(L: Loc, Msg: "expected register without element width suffix");
6467 case Match_InvalidZPR8:
6468 case Match_InvalidZPR16:
6469 case Match_InvalidZPR32:
6470 case Match_InvalidZPR64:
6471 case Match_InvalidZPR128:
6472 return Error(L: Loc, Msg: "invalid element width");
6473 case Match_InvalidZPR_3b8:
6474 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.b..z7.b");
6475 case Match_InvalidZPR_3b16:
6476 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.h..z7.h");
6477 case Match_InvalidZPR_3b32:
6478 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.s..z7.s");
6479 case Match_InvalidZPR_4b8:
6480 return Error(L: Loc,
6481 Msg: "Invalid restricted vector register, expected z0.b..z15.b");
6482 case Match_InvalidZPR_4b16:
6483 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.h..z15.h");
6484 case Match_InvalidZPR_4b32:
6485 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.s..z15.s");
6486 case Match_InvalidZPR_4b64:
6487 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.d..z15.d");
6488 case Match_InvalidZPRMul2_Lo8:
6489 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6490 "register in z0.b..z14.b");
6491 case Match_InvalidZPRMul2_Hi8:
6492 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6493 "register in z16.b..z30.b");
6494 case Match_InvalidZPRMul2_Lo16:
6495 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6496 "register in z0.h..z14.h");
6497 case Match_InvalidZPRMul2_Hi16:
6498 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6499 "register in z16.h..z30.h");
6500 case Match_InvalidZPRMul2_Lo32:
6501 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6502 "register in z0.s..z14.s");
6503 case Match_InvalidZPRMul2_Hi32:
6504 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6505 "register in z16.s..z30.s");
6506 case Match_InvalidZPRMul2_Lo64:
6507 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6508 "register in z0.d..z14.d");
6509 case Match_InvalidZPRMul2_Hi64:
6510 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6511 "register in z16.d..z30.d");
6512 case Match_InvalidZPR_K0:
6513 return Error(L: Loc, Msg: "invalid restricted vector register, expected register "
6514 "in z20..z23 or z28..z31");
6515 case Match_InvalidSVEPattern:
6516 return Error(L: Loc, Msg: "invalid predicate pattern");
6517 case Match_InvalidSVEPPRorPNRAnyReg:
6518 case Match_InvalidSVEPPRorPNRBReg:
6519 case Match_InvalidSVEPredicateAnyReg:
6520 case Match_InvalidSVEPredicateBReg:
6521 case Match_InvalidSVEPredicateHReg:
6522 case Match_InvalidSVEPredicateSReg:
6523 case Match_InvalidSVEPredicateDReg:
6524 return Error(L: Loc, Msg: "invalid predicate register.");
6525 case Match_InvalidSVEPredicate3bAnyReg:
6526 return Error(L: Loc, Msg: "invalid restricted predicate register, expected p0..p7 (without element suffix)");
6527 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6528 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6529 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6530 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6531 return Error(L: Loc, Msg: "Invalid predicate register, expected PN in range "
6532 "pn8..pn15 with element suffix.");
6533 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6534 return Error(L: Loc, Msg: "invalid restricted predicate-as-counter register "
6535 "expected pn8..pn15");
6536 case Match_InvalidSVEPNPredicateBReg:
6537 case Match_InvalidSVEPNPredicateHReg:
6538 case Match_InvalidSVEPNPredicateSReg:
6539 case Match_InvalidSVEPNPredicateDReg:
6540 return Error(L: Loc, Msg: "Invalid predicate register, expected PN in range "
6541 "pn0..pn15 with element suffix.");
6542 case Match_InvalidSVEVecLenSpecifier:
6543 return Error(L: Loc, Msg: "Invalid vector length specifier, expected VLx2 or VLx4");
6544 case Match_InvalidSVEPredicateListMul2x8:
6545 case Match_InvalidSVEPredicateListMul2x16:
6546 case Match_InvalidSVEPredicateListMul2x32:
6547 case Match_InvalidSVEPredicateListMul2x64:
6548 return Error(L: Loc, Msg: "Invalid vector list, expected list with 2 consecutive "
6549 "predicate registers, where the first vector is a multiple of 2 "
6550 "and with correct element type");
6551 case Match_InvalidSVEExactFPImmOperandHalfOne:
6552 return Error(L: Loc, Msg: "Invalid floating point constant, expected 0.5 or 1.0.");
6553 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6554 return Error(L: Loc, Msg: "Invalid floating point constant, expected 0.5 or 2.0.");
6555 case Match_InvalidSVEExactFPImmOperandZeroOne:
6556 return Error(L: Loc, Msg: "Invalid floating point constant, expected 0.0 or 1.0.");
6557 case Match_InvalidMatrixTileVectorH8:
6558 case Match_InvalidMatrixTileVectorV8:
6559 return Error(L: Loc, Msg: "invalid matrix operand, expected za0h.b or za0v.b");
6560 case Match_InvalidMatrixTileVectorH16:
6561 case Match_InvalidMatrixTileVectorV16:
6562 return Error(L: Loc,
6563 Msg: "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
6564 case Match_InvalidMatrixTileVectorH32:
6565 case Match_InvalidMatrixTileVectorV32:
6566 return Error(L: Loc,
6567 Msg: "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
6568 case Match_InvalidMatrixTileVectorH64:
6569 case Match_InvalidMatrixTileVectorV64:
6570 return Error(L: Loc,
6571 Msg: "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
6572 case Match_InvalidMatrixTileVectorH128:
6573 case Match_InvalidMatrixTileVectorV128:
6574 return Error(L: Loc,
6575 Msg: "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
6576 case Match_InvalidMatrixTile16:
6577 return Error(L: Loc, Msg: "invalid matrix operand, expected za[0-1].h");
6578 case Match_InvalidMatrixTile32:
6579 return Error(L: Loc, Msg: "invalid matrix operand, expected za[0-3].s");
6580 case Match_InvalidMatrixTile64:
6581 return Error(L: Loc, Msg: "invalid matrix operand, expected za[0-7].d");
6582 case Match_InvalidMatrix:
6583 return Error(L: Loc, Msg: "invalid matrix operand, expected za");
6584 case Match_InvalidMatrix8:
6585 return Error(L: Loc, Msg: "invalid matrix operand, expected suffix .b");
6586 case Match_InvalidMatrix16:
6587 return Error(L: Loc, Msg: "invalid matrix operand, expected suffix .h");
6588 case Match_InvalidMatrix32:
6589 return Error(L: Loc, Msg: "invalid matrix operand, expected suffix .s");
6590 case Match_InvalidMatrix64:
6591 return Error(L: Loc, Msg: "invalid matrix operand, expected suffix .d");
6592 case Match_InvalidMatrixIndexGPR32_12_15:
6593 return Error(L: Loc, Msg: "operand must be a register in range [w12, w15]");
6594 case Match_InvalidMatrixIndexGPR32_8_11:
6595 return Error(L: Loc, Msg: "operand must be a register in range [w8, w11]");
6596 case Match_InvalidSVEVectorList2x8Mul2:
6597 case Match_InvalidSVEVectorList2x16Mul2:
6598 case Match_InvalidSVEVectorList2x32Mul2:
6599 case Match_InvalidSVEVectorList2x64Mul2:
6600 case Match_InvalidSVEVectorList2x128Mul2:
6601 return Error(L: Loc, Msg: "Invalid vector list, expected list with 2 consecutive "
6602 "SVE vectors, where the first vector is a multiple of 2 "
6603 "and with matching element types");
6604 case Match_InvalidSVEVectorList2x8Mul2_Lo:
6605 case Match_InvalidSVEVectorList2x16Mul2_Lo:
6606 case Match_InvalidSVEVectorList2x32Mul2_Lo:
6607 case Match_InvalidSVEVectorList2x64Mul2_Lo:
6608 return Error(L: Loc, Msg: "Invalid vector list, expected list with 2 consecutive "
6609 "SVE vectors in the range z0-z14, where the first vector "
6610 "is a multiple of 2 "
6611 "and with matching element types");
6612 case Match_InvalidSVEVectorList2x8Mul2_Hi:
6613 case Match_InvalidSVEVectorList2x16Mul2_Hi:
6614 case Match_InvalidSVEVectorList2x32Mul2_Hi:
6615 case Match_InvalidSVEVectorList2x64Mul2_Hi:
6616 return Error(L: Loc,
6617 Msg: "Invalid vector list, expected list with 2 consecutive "
6618 "SVE vectors in the range z16-z30, where the first vector "
6619 "is a multiple of 2 "
6620 "and with matching element types");
6621 case Match_InvalidSVEVectorList4x8Mul4:
6622 case Match_InvalidSVEVectorList4x16Mul4:
6623 case Match_InvalidSVEVectorList4x32Mul4:
6624 case Match_InvalidSVEVectorList4x64Mul4:
6625 case Match_InvalidSVEVectorList4x128Mul4:
6626 return Error(L: Loc, Msg: "Invalid vector list, expected list with 4 consecutive "
6627 "SVE vectors, where the first vector is a multiple of 4 "
6628 "and with matching element types");
6629 case Match_InvalidLookupTable:
6630 return Error(L: Loc, Msg: "Invalid lookup table, expected zt0");
6631 case Match_InvalidSVEVectorListStrided2x8:
6632 case Match_InvalidSVEVectorListStrided2x16:
6633 case Match_InvalidSVEVectorListStrided2x32:
6634 case Match_InvalidSVEVectorListStrided2x64:
6635 return Error(
6636 L: Loc,
6637 Msg: "Invalid vector list, expected list with each SVE vector in the list "
6638 "8 registers apart, and the first register in the range [z0, z7] or "
6639 "[z16, z23] and with correct element type");
6640 case Match_InvalidSVEVectorListStrided4x8:
6641 case Match_InvalidSVEVectorListStrided4x16:
6642 case Match_InvalidSVEVectorListStrided4x32:
6643 case Match_InvalidSVEVectorListStrided4x64:
6644 return Error(
6645 L: Loc,
6646 Msg: "Invalid vector list, expected list with each SVE vector in the list "
6647 "4 registers apart, and the first register in the range [z0, z3] or "
6648 "[z16, z19] and with correct element type");
6649 case Match_AddSubLSLImm3ShiftLarge:
6650 return Error(L: Loc,
6651 Msg: "expected 'lsl' with optional integer in range [0, 7]");
6652 default:
6653 llvm_unreachable("unexpected error code!");
6654 }
6655}
6656
6657static const char *getSubtargetFeatureName(uint64_t Val);
6658
6659bool AArch64AsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
6660 OperandVector &Operands,
6661 MCStreamer &Out,
6662 uint64_t &ErrorInfo,
6663 bool MatchingInlineAsm) {
6664 assert(!Operands.empty() && "Unexpected empty operand list!");
6665 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
6666 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
6667
6668 StringRef Tok = Op.getToken();
6669 unsigned NumOperands = Operands.size();
6670
6671 if (NumOperands == 4 && Tok == "lsl") {
6672 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6673 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6674 if (Op2.isScalarReg() && Op3.isImm()) {
6675 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Val: Op3.getImm());
6676 if (Op3CE) {
6677 uint64_t Op3Val = Op3CE->getValue();
6678 uint64_t NewOp3Val = 0;
6679 uint64_t NewOp4Val = 0;
6680 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
6681 Reg: Op2.getReg())) {
6682 NewOp3Val = (32 - Op3Val) & 0x1f;
6683 NewOp4Val = 31 - Op3Val;
6684 } else {
6685 NewOp3Val = (64 - Op3Val) & 0x3f;
6686 NewOp4Val = 63 - Op3Val;
6687 }
6688
6689 const MCExpr *NewOp3 = MCConstantExpr::create(Value: NewOp3Val, Ctx&: getContext());
6690 const MCExpr *NewOp4 = MCConstantExpr::create(Value: NewOp4Val, Ctx&: getContext());
6691
6692 Operands[0] =
6693 AArch64Operand::CreateToken(Str: "ubfm", S: Op.getStartLoc(), Ctx&: getContext());
6694 Operands.push_back(Elt: AArch64Operand::CreateImm(
6695 Val: NewOp4, S: Op3.getStartLoc(), E: Op3.getEndLoc(), Ctx&: getContext()));
6696 Operands[3] = AArch64Operand::CreateImm(Val: NewOp3, S: Op3.getStartLoc(),
6697 E: Op3.getEndLoc(), Ctx&: getContext());
6698 }
6699 }
6700 } else if (NumOperands == 4 && Tok == "bfc") {
6701 // FIXME: Horrible hack to handle BFC->BFM alias.
6702 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6703 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
6704 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
6705
6706 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
6707 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(Val: LSBOp.getImm());
6708 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(Val: WidthOp.getImm());
6709
6710 if (LSBCE && WidthCE) {
6711 uint64_t LSB = LSBCE->getValue();
6712 uint64_t Width = WidthCE->getValue();
6713
6714 uint64_t RegWidth = 0;
6715 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6716 Reg: Op1.getReg()))
6717 RegWidth = 64;
6718 else
6719 RegWidth = 32;
6720
6721 if (LSB >= RegWidth)
6722 return Error(L: LSBOp.getStartLoc(),
6723 Msg: "expected integer in range [0, 31]");
6724 if (Width < 1 || Width > RegWidth)
6725 return Error(L: WidthOp.getStartLoc(),
6726 Msg: "expected integer in range [1, 32]");
6727
6728 uint64_t ImmR = 0;
6729 if (RegWidth == 32)
6730 ImmR = (32 - LSB) & 0x1f;
6731 else
6732 ImmR = (64 - LSB) & 0x3f;
6733
6734 uint64_t ImmS = Width - 1;
6735
6736 if (ImmR != 0 && ImmS >= ImmR)
6737 return Error(L: WidthOp.getStartLoc(),
6738 Msg: "requested insert overflows register");
6739
6740 const MCExpr *ImmRExpr = MCConstantExpr::create(Value: ImmR, Ctx&: getContext());
6741 const MCExpr *ImmSExpr = MCConstantExpr::create(Value: ImmS, Ctx&: getContext());
6742 Operands[0] =
6743 AArch64Operand::CreateToken(Str: "bfm", S: Op.getStartLoc(), Ctx&: getContext());
6744 Operands[2] = AArch64Operand::CreateReg(
6745 Reg: RegWidth == 32 ? AArch64::WZR : AArch64::XZR, Kind: RegKind::Scalar,
6746 S: SMLoc(), E: SMLoc(), Ctx&: getContext());
6747 Operands[3] = AArch64Operand::CreateImm(
6748 Val: ImmRExpr, S: LSBOp.getStartLoc(), E: LSBOp.getEndLoc(), Ctx&: getContext());
6749 Operands.emplace_back(
6750 Args: AArch64Operand::CreateImm(Val: ImmSExpr, S: WidthOp.getStartLoc(),
6751 E: WidthOp.getEndLoc(), Ctx&: getContext()));
6752 }
6753 }
6754 } else if (NumOperands == 5) {
6755 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
6756 // UBFIZ -> UBFM aliases.
6757 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
6758 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6759 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6760 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6761
6762 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6763 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Val: Op3.getImm());
6764 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Val: Op4.getImm());
6765
6766 if (Op3CE && Op4CE) {
6767 uint64_t Op3Val = Op3CE->getValue();
6768 uint64_t Op4Val = Op4CE->getValue();
6769
6770 uint64_t RegWidth = 0;
6771 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6772 Reg: Op1.getReg()))
6773 RegWidth = 64;
6774 else
6775 RegWidth = 32;
6776
6777 if (Op3Val >= RegWidth)
6778 return Error(L: Op3.getStartLoc(),
6779 Msg: "expected integer in range [0, 31]");
6780 if (Op4Val < 1 || Op4Val > RegWidth)
6781 return Error(L: Op4.getStartLoc(),
6782 Msg: "expected integer in range [1, 32]");
6783
6784 uint64_t NewOp3Val = 0;
6785 if (RegWidth == 32)
6786 NewOp3Val = (32 - Op3Val) & 0x1f;
6787 else
6788 NewOp3Val = (64 - Op3Val) & 0x3f;
6789
6790 uint64_t NewOp4Val = Op4Val - 1;
6791
6792 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
6793 return Error(L: Op4.getStartLoc(),
6794 Msg: "requested insert overflows register");
6795
6796 const MCExpr *NewOp3 =
6797 MCConstantExpr::create(Value: NewOp3Val, Ctx&: getContext());
6798 const MCExpr *NewOp4 =
6799 MCConstantExpr::create(Value: NewOp4Val, Ctx&: getContext());
6800 Operands[3] = AArch64Operand::CreateImm(
6801 Val: NewOp3, S: Op3.getStartLoc(), E: Op3.getEndLoc(), Ctx&: getContext());
6802 Operands[4] = AArch64Operand::CreateImm(
6803 Val: NewOp4, S: Op4.getStartLoc(), E: Op4.getEndLoc(), Ctx&: getContext());
6804 if (Tok == "bfi")
6805 Operands[0] = AArch64Operand::CreateToken(Str: "bfm", S: Op.getStartLoc(),
6806 Ctx&: getContext());
6807 else if (Tok == "sbfiz")
6808 Operands[0] = AArch64Operand::CreateToken(Str: "sbfm", S: Op.getStartLoc(),
6809 Ctx&: getContext());
6810 else if (Tok == "ubfiz")
6811 Operands[0] = AArch64Operand::CreateToken(Str: "ubfm", S: Op.getStartLoc(),
6812 Ctx&: getContext());
6813 else
6814 llvm_unreachable("No valid mnemonic for alias?");
6815 }
6816 }
6817
6818 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
6819 // UBFX -> UBFM aliases.
6820 } else if (NumOperands == 5 &&
6821 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
6822 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6823 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6824 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6825
6826 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6827 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Val: Op3.getImm());
6828 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Val: Op4.getImm());
6829
6830 if (Op3CE && Op4CE) {
6831 uint64_t Op3Val = Op3CE->getValue();
6832 uint64_t Op4Val = Op4CE->getValue();
6833
6834 uint64_t RegWidth = 0;
6835 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6836 Reg: Op1.getReg()))
6837 RegWidth = 64;
6838 else
6839 RegWidth = 32;
6840
6841 if (Op3Val >= RegWidth)
6842 return Error(L: Op3.getStartLoc(),
6843 Msg: "expected integer in range [0, 31]");
6844 if (Op4Val < 1 || Op4Val > RegWidth)
6845 return Error(L: Op4.getStartLoc(),
6846 Msg: "expected integer in range [1, 32]");
6847
6848 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
6849
6850 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
6851 return Error(L: Op4.getStartLoc(),
6852 Msg: "requested extract overflows register");
6853
6854 const MCExpr *NewOp4 =
6855 MCConstantExpr::create(Value: NewOp4Val, Ctx&: getContext());
6856 Operands[4] = AArch64Operand::CreateImm(
6857 Val: NewOp4, S: Op4.getStartLoc(), E: Op4.getEndLoc(), Ctx&: getContext());
6858 if (Tok == "bfxil")
6859 Operands[0] = AArch64Operand::CreateToken(Str: "bfm", S: Op.getStartLoc(),
6860 Ctx&: getContext());
6861 else if (Tok == "sbfx")
6862 Operands[0] = AArch64Operand::CreateToken(Str: "sbfm", S: Op.getStartLoc(),
6863 Ctx&: getContext());
6864 else if (Tok == "ubfx")
6865 Operands[0] = AArch64Operand::CreateToken(Str: "ubfm", S: Op.getStartLoc(),
6866 Ctx&: getContext());
6867 else
6868 llvm_unreachable("No valid mnemonic for alias?");
6869 }
6870 }
6871 }
6872 }
6873
6874 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
6875 // instruction for FP registers correctly in some rare circumstances. Convert
6876 // it to a safe instruction and warn (because silently changing someone's
6877 // assembly is rude).
6878 if (getSTI().hasFeature(Feature: AArch64::FeatureZCZeroingFPWorkaround) &&
6879 NumOperands == 4 && Tok == "movi") {
6880 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6881 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6882 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6883 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
6884 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
6885 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
6886 if (Suffix.lower() == ".2d" &&
6887 cast<MCConstantExpr>(Val: Op3.getImm())->getValue() == 0) {
6888 Warning(L: IDLoc, Msg: "instruction movi.2d with immediate #0 may not function"
6889 " correctly on this CPU, converting to equivalent movi.16b");
6890 // Switch the suffix to .16b.
6891 unsigned Idx = Op1.isToken() ? 1 : 2;
6892 Operands[Idx] =
6893 AArch64Operand::CreateToken(Str: ".16b", S: IDLoc, Ctx&: getContext());
6894 }
6895 }
6896 }
6897
6898 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
6899 // InstAlias can't quite handle this since the reg classes aren't
6900 // subclasses.
6901 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
6902 // The source register can be Wn here, but the matcher expects a
6903 // GPR64. Twiddle it here if necessary.
6904 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6905 if (Op.isScalarReg()) {
6906 MCRegister Reg = getXRegFromWReg(Reg: Op.getReg());
6907 Operands[2] = AArch64Operand::CreateReg(Reg, Kind: RegKind::Scalar,
6908 S: Op.getStartLoc(), E: Op.getEndLoc(),
6909 Ctx&: getContext());
6910 }
6911 }
6912 // FIXME: Likewise for sxt[bh] with a Xd dst operand
6913 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
6914 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6915 if (Op.isScalarReg() &&
6916 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6917 Reg: Op.getReg())) {
6918 // The source register can be Wn here, but the matcher expects a
6919 // GPR64. Twiddle it here if necessary.
6920 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6921 if (Op.isScalarReg()) {
6922 MCRegister Reg = getXRegFromWReg(Reg: Op.getReg());
6923 Operands[2] = AArch64Operand::CreateReg(Reg, Kind: RegKind::Scalar,
6924 S: Op.getStartLoc(),
6925 E: Op.getEndLoc(), Ctx&: getContext());
6926 }
6927 }
6928 }
6929 // FIXME: Likewise for uxt[bh] with a Xd dst operand
6930 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
6931 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6932 if (Op.isScalarReg() &&
6933 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6934 Reg: Op.getReg())) {
6935 // The source register can be Wn here, but the matcher expects a
6936 // GPR32. Twiddle it here if necessary.
6937 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6938 if (Op.isScalarReg()) {
6939 MCRegister Reg = getWRegFromXReg(Reg: Op.getReg());
6940 Operands[1] = AArch64Operand::CreateReg(Reg, Kind: RegKind::Scalar,
6941 S: Op.getStartLoc(),
6942 E: Op.getEndLoc(), Ctx&: getContext());
6943 }
6944 }
6945 }
6946
6947 MCInst Inst;
6948 FeatureBitset MissingFeatures;
6949 // First try to match against the secondary set of tables containing the
6950 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
6951 unsigned MatchResult =
6952 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6953 matchingInlineAsm: MatchingInlineAsm, VariantID: 1);
6954
6955 // If that fails, try against the alternate table containing long-form NEON:
6956 // "fadd v0.2s, v1.2s, v2.2s"
6957 if (MatchResult != Match_Success) {
6958 // But first, save the short-form match result: we can use it in case the
6959 // long-form match also fails.
6960 auto ShortFormNEONErrorInfo = ErrorInfo;
6961 auto ShortFormNEONMatchResult = MatchResult;
6962 auto ShortFormNEONMissingFeatures = MissingFeatures;
6963
6964 MatchResult =
6965 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6966 matchingInlineAsm: MatchingInlineAsm, VariantID: 0);
6967
6968 // Now, both matches failed, and the long-form match failed on the mnemonic
6969 // suffix token operand. The short-form match failure is probably more
6970 // relevant: use it instead.
6971 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
6972 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
6973 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
6974 MatchResult = ShortFormNEONMatchResult;
6975 ErrorInfo = ShortFormNEONErrorInfo;
6976 MissingFeatures = ShortFormNEONMissingFeatures;
6977 }
6978 }
6979
6980 switch (MatchResult) {
6981 case Match_Success: {
6982 // Perform range checking and other semantic validations
6983 SmallVector<SMLoc, 8> OperandLocs;
6984 NumOperands = Operands.size();
6985 for (unsigned i = 1; i < NumOperands; ++i)
6986 OperandLocs.push_back(Elt: Operands[i]->getStartLoc());
6987 if (validateInstruction(Inst, IDLoc, Loc&: OperandLocs))
6988 return true;
6989
6990 Inst.setLoc(IDLoc);
6991 Out.emitInstruction(Inst, STI: getSTI());
6992 return false;
6993 }
6994 case Match_MissingFeature: {
6995 assert(MissingFeatures.any() && "Unknown missing feature!");
6996 // Special case the error message for the very common case where only
6997 // a single subtarget feature is missing (neon, e.g.).
6998 std::string Msg = "instruction requires:";
6999 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
7000 if (MissingFeatures[i]) {
7001 Msg += " ";
7002 Msg += getSubtargetFeatureName(Val: i);
7003 }
7004 }
7005 return Error(L: IDLoc, Msg);
7006 }
7007 case Match_MnemonicFail:
7008 return showMatchError(Loc: IDLoc, ErrCode: MatchResult, ErrorInfo, Operands);
7009 case Match_InvalidOperand: {
7010 SMLoc ErrorLoc = IDLoc;
7011
7012 if (ErrorInfo != ~0ULL) {
7013 if (ErrorInfo >= Operands.size())
7014 return Error(L: IDLoc, Msg: "too few operands for instruction",
7015 Range: SMRange(IDLoc, getTok().getLoc()));
7016
7017 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
7018 if (ErrorLoc == SMLoc())
7019 ErrorLoc = IDLoc;
7020 }
7021 // If the match failed on a suffix token operand, tweak the diagnostic
7022 // accordingly.
7023 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
7024 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
7025 MatchResult = Match_InvalidSuffix;
7026
7027 return showMatchError(Loc: ErrorLoc, ErrCode: MatchResult, ErrorInfo, Operands);
7028 }
7029 case Match_InvalidTiedOperand:
7030 case Match_InvalidMemoryIndexed1:
7031 case Match_InvalidMemoryIndexed2:
7032 case Match_InvalidMemoryIndexed4:
7033 case Match_InvalidMemoryIndexed8:
7034 case Match_InvalidMemoryIndexed16:
7035 case Match_InvalidCondCode:
7036 case Match_AddSubLSLImm3ShiftLarge:
7037 case Match_AddSubRegExtendSmall:
7038 case Match_AddSubRegExtendLarge:
7039 case Match_AddSubSecondSource:
7040 case Match_LogicalSecondSource:
7041 case Match_AddSubRegShift32:
7042 case Match_AddSubRegShift64:
7043 case Match_InvalidMovImm32Shift:
7044 case Match_InvalidMovImm64Shift:
7045 case Match_InvalidFPImm:
7046 case Match_InvalidMemoryWExtend8:
7047 case Match_InvalidMemoryWExtend16:
7048 case Match_InvalidMemoryWExtend32:
7049 case Match_InvalidMemoryWExtend64:
7050 case Match_InvalidMemoryWExtend128:
7051 case Match_InvalidMemoryXExtend8:
7052 case Match_InvalidMemoryXExtend16:
7053 case Match_InvalidMemoryXExtend32:
7054 case Match_InvalidMemoryXExtend64:
7055 case Match_InvalidMemoryXExtend128:
7056 case Match_InvalidMemoryIndexed1SImm4:
7057 case Match_InvalidMemoryIndexed2SImm4:
7058 case Match_InvalidMemoryIndexed3SImm4:
7059 case Match_InvalidMemoryIndexed4SImm4:
7060 case Match_InvalidMemoryIndexed1SImm6:
7061 case Match_InvalidMemoryIndexed16SImm4:
7062 case Match_InvalidMemoryIndexed32SImm4:
7063 case Match_InvalidMemoryIndexed4SImm7:
7064 case Match_InvalidMemoryIndexed8SImm7:
7065 case Match_InvalidMemoryIndexed16SImm7:
7066 case Match_InvalidMemoryIndexed8UImm5:
7067 case Match_InvalidMemoryIndexed8UImm3:
7068 case Match_InvalidMemoryIndexed4UImm5:
7069 case Match_InvalidMemoryIndexed2UImm5:
7070 case Match_InvalidMemoryIndexed1UImm6:
7071 case Match_InvalidMemoryIndexed2UImm6:
7072 case Match_InvalidMemoryIndexed4UImm6:
7073 case Match_InvalidMemoryIndexed8UImm6:
7074 case Match_InvalidMemoryIndexed16UImm6:
7075 case Match_InvalidMemoryIndexedSImm6:
7076 case Match_InvalidMemoryIndexedSImm5:
7077 case Match_InvalidMemoryIndexedSImm8:
7078 case Match_InvalidMemoryIndexedSImm9:
7079 case Match_InvalidMemoryIndexed16SImm9:
7080 case Match_InvalidMemoryIndexed8SImm10:
7081 case Match_InvalidImm0_0:
7082 case Match_InvalidImm0_1:
7083 case Match_InvalidImm0_3:
7084 case Match_InvalidImm0_7:
7085 case Match_InvalidImm0_15:
7086 case Match_InvalidImm0_31:
7087 case Match_InvalidImm0_63:
7088 case Match_InvalidImm0_127:
7089 case Match_InvalidImm0_255:
7090 case Match_InvalidImm0_65535:
7091 case Match_InvalidImm1_8:
7092 case Match_InvalidImm1_16:
7093 case Match_InvalidImm1_32:
7094 case Match_InvalidImm1_64:
7095 case Match_InvalidImmM1_62:
7096 case Match_InvalidMemoryIndexedRange2UImm0:
7097 case Match_InvalidMemoryIndexedRange2UImm1:
7098 case Match_InvalidMemoryIndexedRange2UImm2:
7099 case Match_InvalidMemoryIndexedRange2UImm3:
7100 case Match_InvalidMemoryIndexedRange4UImm0:
7101 case Match_InvalidMemoryIndexedRange4UImm1:
7102 case Match_InvalidMemoryIndexedRange4UImm2:
7103 case Match_InvalidSVEAddSubImm8:
7104 case Match_InvalidSVEAddSubImm16:
7105 case Match_InvalidSVEAddSubImm32:
7106 case Match_InvalidSVEAddSubImm64:
7107 case Match_InvalidSVECpyImm8:
7108 case Match_InvalidSVECpyImm16:
7109 case Match_InvalidSVECpyImm32:
7110 case Match_InvalidSVECpyImm64:
7111 case Match_InvalidIndexRange0_0:
7112 case Match_InvalidIndexRange1_1:
7113 case Match_InvalidIndexRange0_15:
7114 case Match_InvalidIndexRange0_7:
7115 case Match_InvalidIndexRange0_3:
7116 case Match_InvalidIndexRange0_1:
7117 case Match_InvalidSVEIndexRange0_63:
7118 case Match_InvalidSVEIndexRange0_31:
7119 case Match_InvalidSVEIndexRange0_15:
7120 case Match_InvalidSVEIndexRange0_7:
7121 case Match_InvalidSVEIndexRange0_3:
7122 case Match_InvalidLabel:
7123 case Match_InvalidComplexRotationEven:
7124 case Match_InvalidComplexRotationOdd:
7125 case Match_InvalidGPR64shifted8:
7126 case Match_InvalidGPR64shifted16:
7127 case Match_InvalidGPR64shifted32:
7128 case Match_InvalidGPR64shifted64:
7129 case Match_InvalidGPR64shifted128:
7130 case Match_InvalidGPR64NoXZRshifted8:
7131 case Match_InvalidGPR64NoXZRshifted16:
7132 case Match_InvalidGPR64NoXZRshifted32:
7133 case Match_InvalidGPR64NoXZRshifted64:
7134 case Match_InvalidGPR64NoXZRshifted128:
7135 case Match_InvalidZPR32UXTW8:
7136 case Match_InvalidZPR32UXTW16:
7137 case Match_InvalidZPR32UXTW32:
7138 case Match_InvalidZPR32UXTW64:
7139 case Match_InvalidZPR32SXTW8:
7140 case Match_InvalidZPR32SXTW16:
7141 case Match_InvalidZPR32SXTW32:
7142 case Match_InvalidZPR32SXTW64:
7143 case Match_InvalidZPR64UXTW8:
7144 case Match_InvalidZPR64SXTW8:
7145 case Match_InvalidZPR64UXTW16:
7146 case Match_InvalidZPR64SXTW16:
7147 case Match_InvalidZPR64UXTW32:
7148 case Match_InvalidZPR64SXTW32:
7149 case Match_InvalidZPR64UXTW64:
7150 case Match_InvalidZPR64SXTW64:
7151 case Match_InvalidZPR32LSL8:
7152 case Match_InvalidZPR32LSL16:
7153 case Match_InvalidZPR32LSL32:
7154 case Match_InvalidZPR32LSL64:
7155 case Match_InvalidZPR64LSL8:
7156 case Match_InvalidZPR64LSL16:
7157 case Match_InvalidZPR64LSL32:
7158 case Match_InvalidZPR64LSL64:
7159 case Match_InvalidZPR0:
7160 case Match_InvalidZPR8:
7161 case Match_InvalidZPR16:
7162 case Match_InvalidZPR32:
7163 case Match_InvalidZPR64:
7164 case Match_InvalidZPR128:
7165 case Match_InvalidZPR_3b8:
7166 case Match_InvalidZPR_3b16:
7167 case Match_InvalidZPR_3b32:
7168 case Match_InvalidZPR_4b8:
7169 case Match_InvalidZPR_4b16:
7170 case Match_InvalidZPR_4b32:
7171 case Match_InvalidZPR_4b64:
7172 case Match_InvalidSVEPPRorPNRAnyReg:
7173 case Match_InvalidSVEPPRorPNRBReg:
7174 case Match_InvalidSVEPredicateAnyReg:
7175 case Match_InvalidSVEPattern:
7176 case Match_InvalidSVEVecLenSpecifier:
7177 case Match_InvalidSVEPredicateBReg:
7178 case Match_InvalidSVEPredicateHReg:
7179 case Match_InvalidSVEPredicateSReg:
7180 case Match_InvalidSVEPredicateDReg:
7181 case Match_InvalidSVEPredicate3bAnyReg:
7182 case Match_InvalidSVEPNPredicateB_p8to15Reg:
7183 case Match_InvalidSVEPNPredicateH_p8to15Reg:
7184 case Match_InvalidSVEPNPredicateS_p8to15Reg:
7185 case Match_InvalidSVEPNPredicateD_p8to15Reg:
7186 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
7187 case Match_InvalidSVEPNPredicateBReg:
7188 case Match_InvalidSVEPNPredicateHReg:
7189 case Match_InvalidSVEPNPredicateSReg:
7190 case Match_InvalidSVEPNPredicateDReg:
7191 case Match_InvalidSVEPredicateListMul2x8:
7192 case Match_InvalidSVEPredicateListMul2x16:
7193 case Match_InvalidSVEPredicateListMul2x32:
7194 case Match_InvalidSVEPredicateListMul2x64:
7195 case Match_InvalidSVEExactFPImmOperandHalfOne:
7196 case Match_InvalidSVEExactFPImmOperandHalfTwo:
7197 case Match_InvalidSVEExactFPImmOperandZeroOne:
7198 case Match_InvalidMatrixTile16:
7199 case Match_InvalidMatrixTile32:
7200 case Match_InvalidMatrixTile64:
7201 case Match_InvalidMatrix:
7202 case Match_InvalidMatrix8:
7203 case Match_InvalidMatrix16:
7204 case Match_InvalidMatrix32:
7205 case Match_InvalidMatrix64:
7206 case Match_InvalidMatrixTileVectorH8:
7207 case Match_InvalidMatrixTileVectorH16:
7208 case Match_InvalidMatrixTileVectorH32:
7209 case Match_InvalidMatrixTileVectorH64:
7210 case Match_InvalidMatrixTileVectorH128:
7211 case Match_InvalidMatrixTileVectorV8:
7212 case Match_InvalidMatrixTileVectorV16:
7213 case Match_InvalidMatrixTileVectorV32:
7214 case Match_InvalidMatrixTileVectorV64:
7215 case Match_InvalidMatrixTileVectorV128:
7216 case Match_InvalidSVCR:
7217 case Match_InvalidMatrixIndexGPR32_12_15:
7218 case Match_InvalidMatrixIndexGPR32_8_11:
7219 case Match_InvalidLookupTable:
7220 case Match_InvalidZPRMul2_Lo8:
7221 case Match_InvalidZPRMul2_Hi8:
7222 case Match_InvalidZPRMul2_Lo16:
7223 case Match_InvalidZPRMul2_Hi16:
7224 case Match_InvalidZPRMul2_Lo32:
7225 case Match_InvalidZPRMul2_Hi32:
7226 case Match_InvalidZPRMul2_Lo64:
7227 case Match_InvalidZPRMul2_Hi64:
7228 case Match_InvalidZPR_K0:
7229 case Match_InvalidSVEVectorList2x8Mul2:
7230 case Match_InvalidSVEVectorList2x16Mul2:
7231 case Match_InvalidSVEVectorList2x32Mul2:
7232 case Match_InvalidSVEVectorList2x64Mul2:
7233 case Match_InvalidSVEVectorList2x128Mul2:
7234 case Match_InvalidSVEVectorList4x8Mul4:
7235 case Match_InvalidSVEVectorList4x16Mul4:
7236 case Match_InvalidSVEVectorList4x32Mul4:
7237 case Match_InvalidSVEVectorList4x64Mul4:
7238 case Match_InvalidSVEVectorList4x128Mul4:
7239 case Match_InvalidSVEVectorList2x8Mul2_Lo:
7240 case Match_InvalidSVEVectorList2x16Mul2_Lo:
7241 case Match_InvalidSVEVectorList2x32Mul2_Lo:
7242 case Match_InvalidSVEVectorList2x64Mul2_Lo:
7243 case Match_InvalidSVEVectorList2x8Mul2_Hi:
7244 case Match_InvalidSVEVectorList2x16Mul2_Hi:
7245 case Match_InvalidSVEVectorList2x32Mul2_Hi:
7246 case Match_InvalidSVEVectorList2x64Mul2_Hi:
7247 case Match_InvalidSVEVectorListStrided2x8:
7248 case Match_InvalidSVEVectorListStrided2x16:
7249 case Match_InvalidSVEVectorListStrided2x32:
7250 case Match_InvalidSVEVectorListStrided2x64:
7251 case Match_InvalidSVEVectorListStrided4x8:
7252 case Match_InvalidSVEVectorListStrided4x16:
7253 case Match_InvalidSVEVectorListStrided4x32:
7254 case Match_InvalidSVEVectorListStrided4x64:
7255 case Match_MSR:
7256 case Match_MRS: {
7257 if (ErrorInfo >= Operands.size())
7258 return Error(L: IDLoc, Msg: "too few operands for instruction", Range: SMRange(IDLoc, (*Operands.back()).getEndLoc()));
7259 // Any time we get here, there's nothing fancy to do. Just get the
7260 // operand SMLoc and display the diagnostic.
7261 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
7262 if (ErrorLoc == SMLoc())
7263 ErrorLoc = IDLoc;
7264 return showMatchError(Loc: ErrorLoc, ErrCode: MatchResult, ErrorInfo, Operands);
7265 }
7266 }
7267
7268 llvm_unreachable("Implement any new match types added!");
7269}
7270
7271/// ParseDirective parses the arm specific directives
7272bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
7273 const MCContext::Environment Format = getContext().getObjectFileType();
7274 bool IsMachO = Format == MCContext::IsMachO;
7275 bool IsCOFF = Format == MCContext::IsCOFF;
7276 bool IsELF = Format == MCContext::IsELF;
7277
7278 auto IDVal = DirectiveID.getIdentifier().lower();
7279 SMLoc Loc = DirectiveID.getLoc();
7280 if (IDVal == ".arch")
7281 parseDirectiveArch(L: Loc);
7282 else if (IDVal == ".cpu")
7283 parseDirectiveCPU(L: Loc);
7284 else if (IDVal == ".tlsdesccall")
7285 parseDirectiveTLSDescCall(L: Loc);
7286 else if (IDVal == ".ltorg" || IDVal == ".pool")
7287 parseDirectiveLtorg(L: Loc);
7288 else if (IDVal == ".unreq")
7289 parseDirectiveUnreq(L: Loc);
7290 else if (IDVal == ".inst")
7291 parseDirectiveInst(L: Loc);
7292 else if (IDVal == ".cfi_negate_ra_state")
7293 parseDirectiveCFINegateRAState();
7294 else if (IDVal == ".cfi_negate_ra_state_with_pc")
7295 parseDirectiveCFINegateRAStateWithPC();
7296 else if (IDVal == ".cfi_b_key_frame")
7297 parseDirectiveCFIBKeyFrame();
7298 else if (IDVal == ".cfi_mte_tagged_frame")
7299 parseDirectiveCFIMTETaggedFrame();
7300 else if (IDVal == ".arch_extension")
7301 parseDirectiveArchExtension(L: Loc);
7302 else if (IDVal == ".variant_pcs")
7303 parseDirectiveVariantPCS(L: Loc);
7304 else if (IsMachO) {
7305 if (IDVal == MCLOHDirectiveName())
7306 parseDirectiveLOH(LOH: IDVal, L: Loc);
7307 else
7308 return true;
7309 } else if (IsCOFF) {
7310 if (IDVal == ".seh_stackalloc")
7311 parseDirectiveSEHAllocStack(L: Loc);
7312 else if (IDVal == ".seh_endprologue")
7313 parseDirectiveSEHPrologEnd(L: Loc);
7314 else if (IDVal == ".seh_save_r19r20_x")
7315 parseDirectiveSEHSaveR19R20X(L: Loc);
7316 else if (IDVal == ".seh_save_fplr")
7317 parseDirectiveSEHSaveFPLR(L: Loc);
7318 else if (IDVal == ".seh_save_fplr_x")
7319 parseDirectiveSEHSaveFPLRX(L: Loc);
7320 else if (IDVal == ".seh_save_reg")
7321 parseDirectiveSEHSaveReg(L: Loc);
7322 else if (IDVal == ".seh_save_reg_x")
7323 parseDirectiveSEHSaveRegX(L: Loc);
7324 else if (IDVal == ".seh_save_regp")
7325 parseDirectiveSEHSaveRegP(L: Loc);
7326 else if (IDVal == ".seh_save_regp_x")
7327 parseDirectiveSEHSaveRegPX(L: Loc);
7328 else if (IDVal == ".seh_save_lrpair")
7329 parseDirectiveSEHSaveLRPair(L: Loc);
7330 else if (IDVal == ".seh_save_freg")
7331 parseDirectiveSEHSaveFReg(L: Loc);
7332 else if (IDVal == ".seh_save_freg_x")
7333 parseDirectiveSEHSaveFRegX(L: Loc);
7334 else if (IDVal == ".seh_save_fregp")
7335 parseDirectiveSEHSaveFRegP(L: Loc);
7336 else if (IDVal == ".seh_save_fregp_x")
7337 parseDirectiveSEHSaveFRegPX(L: Loc);
7338 else if (IDVal == ".seh_set_fp")
7339 parseDirectiveSEHSetFP(L: Loc);
7340 else if (IDVal == ".seh_add_fp")
7341 parseDirectiveSEHAddFP(L: Loc);
7342 else if (IDVal == ".seh_nop")
7343 parseDirectiveSEHNop(L: Loc);
7344 else if (IDVal == ".seh_save_next")
7345 parseDirectiveSEHSaveNext(L: Loc);
7346 else if (IDVal == ".seh_startepilogue")
7347 parseDirectiveSEHEpilogStart(L: Loc);
7348 else if (IDVal == ".seh_endepilogue")
7349 parseDirectiveSEHEpilogEnd(L: Loc);
7350 else if (IDVal == ".seh_trap_frame")
7351 parseDirectiveSEHTrapFrame(L: Loc);
7352 else if (IDVal == ".seh_pushframe")
7353 parseDirectiveSEHMachineFrame(L: Loc);
7354 else if (IDVal == ".seh_context")
7355 parseDirectiveSEHContext(L: Loc);
7356 else if (IDVal == ".seh_ec_context")
7357 parseDirectiveSEHECContext(L: Loc);
7358 else if (IDVal == ".seh_clear_unwound_to_call")
7359 parseDirectiveSEHClearUnwoundToCall(L: Loc);
7360 else if (IDVal == ".seh_pac_sign_lr")
7361 parseDirectiveSEHPACSignLR(L: Loc);
7362 else if (IDVal == ".seh_save_any_reg")
7363 parseDirectiveSEHSaveAnyReg(L: Loc, Paired: false, Writeback: false);
7364 else if (IDVal == ".seh_save_any_reg_p")
7365 parseDirectiveSEHSaveAnyReg(L: Loc, Paired: true, Writeback: false);
7366 else if (IDVal == ".seh_save_any_reg_x")
7367 parseDirectiveSEHSaveAnyReg(L: Loc, Paired: false, Writeback: true);
7368 else if (IDVal == ".seh_save_any_reg_px")
7369 parseDirectiveSEHSaveAnyReg(L: Loc, Paired: true, Writeback: true);
7370 else if (IDVal == ".seh_allocz")
7371 parseDirectiveSEHAllocZ(L: Loc);
7372 else if (IDVal == ".seh_save_zreg")
7373 parseDirectiveSEHSaveZReg(L: Loc);
7374 else if (IDVal == ".seh_save_preg")
7375 parseDirectiveSEHSavePReg(L: Loc);
7376 else
7377 return true;
7378 } else if (IsELF) {
7379 if (IDVal == ".aeabi_subsection")
7380 parseDirectiveAeabiSubSectionHeader(L: Loc);
7381 else if (IDVal == ".aeabi_attribute")
7382 parseDirectiveAeabiAArch64Attr(L: Loc);
7383 else
7384 return true;
7385 } else
7386 return true;
7387 return false;
7388}
7389
7390static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo,
7391 SmallVector<StringRef, 4> &RequestedExtensions) {
7392 const bool NoCrypto = llvm::is_contained(Range&: RequestedExtensions, Element: "nocrypto");
7393 const bool Crypto = llvm::is_contained(Range&: RequestedExtensions, Element: "crypto");
7394
7395 if (!NoCrypto && Crypto) {
7396 // Map 'generic' (and others) to sha2 and aes, because
7397 // that was the traditional meaning of crypto.
7398 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7399 ArchInfo == AArch64::ARMV8_3A) {
7400 RequestedExtensions.push_back(Elt: "sha2");
7401 RequestedExtensions.push_back(Elt: "aes");
7402 }
7403 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7404 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7405 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7406 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7407 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7408 ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) {
7409 RequestedExtensions.push_back(Elt: "sm4");
7410 RequestedExtensions.push_back(Elt: "sha3");
7411 RequestedExtensions.push_back(Elt: "sha2");
7412 RequestedExtensions.push_back(Elt: "aes");
7413 }
7414 } else if (NoCrypto) {
7415 // Map 'generic' (and others) to sha2 and aes, because
7416 // that was the traditional meaning of crypto.
7417 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7418 ArchInfo == AArch64::ARMV8_3A) {
7419 RequestedExtensions.push_back(Elt: "nosha2");
7420 RequestedExtensions.push_back(Elt: "noaes");
7421 }
7422 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7423 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7424 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7425 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7426 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7427 ArchInfo == AArch64::ARMV9_4A) {
7428 RequestedExtensions.push_back(Elt: "nosm4");
7429 RequestedExtensions.push_back(Elt: "nosha3");
7430 RequestedExtensions.push_back(Elt: "nosha2");
7431 RequestedExtensions.push_back(Elt: "noaes");
7432 }
7433 }
7434}
7435
7436static SMLoc incrementLoc(SMLoc L, int Offset) {
7437 return SMLoc::getFromPointer(Ptr: L.getPointer() + Offset);
7438}
7439
7440/// parseDirectiveArch
7441/// ::= .arch token
7442bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
7443 SMLoc CurLoc = getLoc();
7444
7445 StringRef Name = getParser().parseStringToEndOfStatement().trim();
7446 StringRef Arch, ExtensionString;
7447 std::tie(args&: Arch, args&: ExtensionString) = Name.split(Separator: '+');
7448
7449 const AArch64::ArchInfo *ArchInfo = AArch64::parseArch(Arch);
7450 if (!ArchInfo)
7451 return Error(L: CurLoc, Msg: "unknown arch name");
7452
7453 if (parseToken(T: AsmToken::EndOfStatement))
7454 return true;
7455
7456 // Get the architecture and extension features.
7457 std::vector<StringRef> AArch64Features;
7458 AArch64Features.push_back(x: ArchInfo->ArchFeature);
7459 AArch64::getExtensionFeatures(Extensions: ArchInfo->DefaultExts, Features&: AArch64Features);
7460
7461 MCSubtargetInfo &STI = copySTI();
7462 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
7463 STI.setDefaultFeatures(CPU: "generic", /*TuneCPU*/ "generic",
7464 FS: join(Begin: ArchFeatures.begin(), End: ArchFeatures.end(), Separator: ","));
7465
7466 SmallVector<StringRef, 4> RequestedExtensions;
7467 if (!ExtensionString.empty())
7468 ExtensionString.split(A&: RequestedExtensions, Separator: '+');
7469
7470 ExpandCryptoAEK(ArchInfo: *ArchInfo, RequestedExtensions);
7471 CurLoc = incrementLoc(L: CurLoc, Offset: Arch.size());
7472
7473 for (auto Name : RequestedExtensions) {
7474 // Advance source location past '+'.
7475 CurLoc = incrementLoc(L: CurLoc, Offset: 1);
7476
7477 bool EnableFeature = !Name.consume_front_insensitive(Prefix: "no");
7478
7479 auto It = llvm::find_if(Range: ExtensionMap, P: [&Name](const auto &Extension) {
7480 return Extension.Name == Name;
7481 });
7482
7483 if (It == std::end(arr: ExtensionMap))
7484 return Error(L: CurLoc, Msg: "unsupported architectural extension: " + Name);
7485
7486 if (EnableFeature)
7487 STI.SetFeatureBitsTransitively(It->Features);
7488 else
7489 STI.ClearFeatureBitsTransitively(FB: It->Features);
7490 CurLoc = incrementLoc(L: CurLoc, Offset: Name.size());
7491 }
7492 FeatureBitset Features = ComputeAvailableFeatures(FB: STI.getFeatureBits());
7493 setAvailableFeatures(Features);
7494
7495 getTargetStreamer().emitDirectiveArch(Name);
7496 return false;
7497}
7498
7499/// parseDirectiveArchExtension
7500/// ::= .arch_extension [no]feature
7501bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
7502 SMLoc ExtLoc = getLoc();
7503
7504 StringRef FullName = getParser().parseStringToEndOfStatement().trim();
7505
7506 if (parseEOL())
7507 return true;
7508
7509 bool EnableFeature = true;
7510 StringRef Name = FullName;
7511 if (Name.starts_with_insensitive(Prefix: "no")) {
7512 EnableFeature = false;
7513 Name = Name.substr(Start: 2);
7514 }
7515
7516 auto It = llvm::find_if(Range: ExtensionMap, P: [&Name](const auto &Extension) {
7517 return Extension.Name == Name;
7518 });
7519
7520 if (It == std::end(arr: ExtensionMap))
7521 return Error(L: ExtLoc, Msg: "unsupported architectural extension: " + Name);
7522
7523 MCSubtargetInfo &STI = copySTI();
7524 if (EnableFeature)
7525 STI.SetFeatureBitsTransitively(It->Features);
7526 else
7527 STI.ClearFeatureBitsTransitively(FB: It->Features);
7528 FeatureBitset Features = ComputeAvailableFeatures(FB: STI.getFeatureBits());
7529 setAvailableFeatures(Features);
7530
7531 getTargetStreamer().emitDirectiveArchExtension(Name: FullName);
7532 return false;
7533}
7534
7535/// parseDirectiveCPU
7536/// ::= .cpu id
7537bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
7538 SMLoc CurLoc = getLoc();
7539
7540 StringRef CPU, ExtensionString;
7541 std::tie(args&: CPU, args&: ExtensionString) =
7542 getParser().parseStringToEndOfStatement().trim().split(Separator: '+');
7543
7544 if (parseToken(T: AsmToken::EndOfStatement))
7545 return true;
7546
7547 SmallVector<StringRef, 4> RequestedExtensions;
7548 if (!ExtensionString.empty())
7549 ExtensionString.split(A&: RequestedExtensions, Separator: '+');
7550
7551 const llvm::AArch64::ArchInfo *CpuArch = llvm::AArch64::getArchForCpu(CPU);
7552 if (!CpuArch) {
7553 Error(L: CurLoc, Msg: "unknown CPU name");
7554 return false;
7555 }
7556 ExpandCryptoAEK(ArchInfo: *CpuArch, RequestedExtensions);
7557
7558 MCSubtargetInfo &STI = copySTI();
7559 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, FS: "");
7560 CurLoc = incrementLoc(L: CurLoc, Offset: CPU.size());
7561
7562 for (auto Name : RequestedExtensions) {
7563 // Advance source location past '+'.
7564 CurLoc = incrementLoc(L: CurLoc, Offset: 1);
7565
7566 bool EnableFeature = !Name.consume_front_insensitive(Prefix: "no");
7567
7568 auto It = llvm::find_if(Range: ExtensionMap, P: [&Name](const auto &Extension) {
7569 return Extension.Name == Name;
7570 });
7571
7572 if (It == std::end(arr: ExtensionMap))
7573 return Error(L: CurLoc, Msg: "unsupported architectural extension: " + Name);
7574
7575 if (EnableFeature)
7576 STI.SetFeatureBitsTransitively(It->Features);
7577 else
7578 STI.ClearFeatureBitsTransitively(FB: It->Features);
7579 CurLoc = incrementLoc(L: CurLoc, Offset: Name.size());
7580 }
7581 FeatureBitset Features = ComputeAvailableFeatures(FB: STI.getFeatureBits());
7582 setAvailableFeatures(Features);
7583 return false;
7584}
7585
7586/// parseDirectiveInst
7587/// ::= .inst opcode [, ...]
7588bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
7589 if (getLexer().is(K: AsmToken::EndOfStatement))
7590 return Error(L: Loc, Msg: "expected expression following '.inst' directive");
7591
7592 auto parseOp = [&]() -> bool {
7593 SMLoc L = getLoc();
7594 const MCExpr *Expr = nullptr;
7595 if (check(P: getParser().parseExpression(Res&: Expr), Loc: L, Msg: "expected expression"))
7596 return true;
7597 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Val: Expr);
7598 if (check(P: !Value, Loc: L, Msg: "expected constant expression"))
7599 return true;
7600 getTargetStreamer().emitInst(Inst: Value->getValue());
7601 return false;
7602 };
7603
7604 return parseMany(parseOne: parseOp);
7605}
7606
7607// parseDirectiveTLSDescCall:
7608// ::= .tlsdesccall symbol
7609bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
7610 StringRef Name;
7611 if (check(P: getParser().parseIdentifier(Res&: Name), Loc: L, Msg: "expected symbol") ||
7612 parseToken(T: AsmToken::EndOfStatement))
7613 return true;
7614
7615 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
7616 const MCExpr *Expr = MCSymbolRefExpr::create(Symbol: Sym, Ctx&: getContext());
7617 Expr = MCSpecifierExpr::create(Expr, S: AArch64::S_TLSDESC, Ctx&: getContext());
7618
7619 MCInst Inst;
7620 Inst.setOpcode(AArch64::TLSDESCCALL);
7621 Inst.addOperand(Op: MCOperand::createExpr(Val: Expr));
7622
7623 getParser().getStreamer().emitInstruction(Inst, STI: getSTI());
7624 return false;
7625}
7626
7627/// ::= .loh <lohName | lohId> label1, ..., labelN
7628/// The number of arguments depends on the loh identifier.
7629bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
7630 MCLOHType Kind;
7631 if (getTok().isNot(K: AsmToken::Identifier)) {
7632 if (getTok().isNot(K: AsmToken::Integer))
7633 return TokError(Msg: "expected an identifier or a number in directive");
7634 // We successfully get a numeric value for the identifier.
7635 // Check if it is valid.
7636 int64_t Id = getTok().getIntVal();
7637 if (Id <= -1U && !isValidMCLOHType(Kind: Id))
7638 return TokError(Msg: "invalid numeric identifier in directive");
7639 Kind = (MCLOHType)Id;
7640 } else {
7641 StringRef Name = getTok().getIdentifier();
7642 // We successfully parse an identifier.
7643 // Check if it is a recognized one.
7644 int Id = MCLOHNameToId(Name);
7645
7646 if (Id == -1)
7647 return TokError(Msg: "invalid identifier in directive");
7648 Kind = (MCLOHType)Id;
7649 }
7650 // Consume the identifier.
7651 Lex();
7652 // Get the number of arguments of this LOH.
7653 int NbArgs = MCLOHIdToNbArgs(Kind);
7654
7655 assert(NbArgs != -1 && "Invalid number of arguments");
7656
7657 SmallVector<MCSymbol *, 3> Args;
7658 for (int Idx = 0; Idx < NbArgs; ++Idx) {
7659 StringRef Name;
7660 if (getParser().parseIdentifier(Res&: Name))
7661 return TokError(Msg: "expected identifier in directive");
7662 Args.push_back(Elt: getContext().getOrCreateSymbol(Name));
7663
7664 if (Idx + 1 == NbArgs)
7665 break;
7666 if (parseComma())
7667 return true;
7668 }
7669 if (parseEOL())
7670 return true;
7671
7672 getStreamer().emitLOHDirective(Kind, Args);
7673 return false;
7674}
7675
7676/// parseDirectiveLtorg
7677/// ::= .ltorg | .pool
7678bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
7679 if (parseEOL())
7680 return true;
7681 getTargetStreamer().emitCurrentConstantPool();
7682 return false;
7683}
7684
7685/// parseDirectiveReq
7686/// ::= name .req registername
7687bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7688 Lex(); // Eat the '.req' token.
7689 SMLoc SRegLoc = getLoc();
7690 RegKind RegisterKind = RegKind::Scalar;
7691 MCRegister RegNum;
7692 ParseStatus ParseRes = tryParseScalarRegister(RegNum);
7693
7694 if (!ParseRes.isSuccess()) {
7695 StringRef Kind;
7696 RegisterKind = RegKind::NeonVector;
7697 ParseRes = tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::NeonVector);
7698
7699 if (ParseRes.isFailure())
7700 return true;
7701
7702 if (ParseRes.isSuccess() && !Kind.empty())
7703 return Error(L: SRegLoc, Msg: "vector register without type specifier expected");
7704 }
7705
7706 if (!ParseRes.isSuccess()) {
7707 StringRef Kind;
7708 RegisterKind = RegKind::SVEDataVector;
7709 ParseRes =
7710 tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::SVEDataVector);
7711
7712 if (ParseRes.isFailure())
7713 return true;
7714
7715 if (ParseRes.isSuccess() && !Kind.empty())
7716 return Error(L: SRegLoc,
7717 Msg: "sve vector register without type specifier expected");
7718 }
7719
7720 if (!ParseRes.isSuccess()) {
7721 StringRef Kind;
7722 RegisterKind = RegKind::SVEPredicateVector;
7723 ParseRes = tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::SVEPredicateVector);
7724
7725 if (ParseRes.isFailure())
7726 return true;
7727
7728 if (ParseRes.isSuccess() && !Kind.empty())
7729 return Error(L: SRegLoc,
7730 Msg: "sve predicate register without type specifier expected");
7731 }
7732
7733 if (!ParseRes.isSuccess())
7734 return Error(L: SRegLoc, Msg: "register name or alias expected");
7735
7736 // Shouldn't be anything else.
7737 if (parseEOL())
7738 return true;
7739
7740 auto pair = std::make_pair(x&: RegisterKind, y&: RegNum);
7741 if (RegisterReqs.insert(KV: std::make_pair(x&: Name, y&: pair)).first->second != pair)
7742 Warning(L, Msg: "ignoring redefinition of register alias '" + Name + "'");
7743
7744 return false;
7745}
7746
7747/// parseDirectiveUneq
7748/// ::= .unreq registername
7749bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
7750 if (getTok().isNot(K: AsmToken::Identifier))
7751 return TokError(Msg: "unexpected input in .unreq directive.");
7752 RegisterReqs.erase(Key: getTok().getIdentifier().lower());
7753 Lex(); // Eat the identifier.
7754 return parseToken(T: AsmToken::EndOfStatement);
7755}
7756
7757bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
7758 if (parseEOL())
7759 return true;
7760 getStreamer().emitCFINegateRAState();
7761 return false;
7762}
7763
7764bool AArch64AsmParser::parseDirectiveCFINegateRAStateWithPC() {
7765 if (parseEOL())
7766 return true;
7767 getStreamer().emitCFINegateRAStateWithPC();
7768 return false;
7769}
7770
7771/// parseDirectiveCFIBKeyFrame
7772/// ::= .cfi_b_key
7773bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
7774 if (parseEOL())
7775 return true;
7776 getStreamer().emitCFIBKeyFrame();
7777 return false;
7778}
7779
7780/// parseDirectiveCFIMTETaggedFrame
7781/// ::= .cfi_mte_tagged_frame
7782bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() {
7783 if (parseEOL())
7784 return true;
7785 getStreamer().emitCFIMTETaggedFrame();
7786 return false;
7787}
7788
7789/// parseDirectiveVariantPCS
7790/// ::= .variant_pcs symbolname
7791bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
7792 StringRef Name;
7793 if (getParser().parseIdentifier(Res&: Name))
7794 return TokError(Msg: "expected symbol name");
7795 if (parseEOL())
7796 return true;
7797 getTargetStreamer().emitDirectiveVariantPCS(
7798 Symbol: getContext().getOrCreateSymbol(Name));
7799 return false;
7800}
7801
7802/// parseDirectiveSEHAllocStack
7803/// ::= .seh_stackalloc
7804bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
7805 int64_t Size;
7806 if (parseImmExpr(Out&: Size))
7807 return true;
7808 getTargetStreamer().emitARM64WinCFIAllocStack(Size);
7809 return false;
7810}
7811
7812/// parseDirectiveSEHPrologEnd
7813/// ::= .seh_endprologue
7814bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
7815 getTargetStreamer().emitARM64WinCFIPrologEnd();
7816 return false;
7817}
7818
7819/// parseDirectiveSEHSaveR19R20X
7820/// ::= .seh_save_r19r20_x
7821bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
7822 int64_t Offset;
7823 if (parseImmExpr(Out&: Offset))
7824 return true;
7825 getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset);
7826 return false;
7827}
7828
7829/// parseDirectiveSEHSaveFPLR
7830/// ::= .seh_save_fplr
7831bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
7832 int64_t Offset;
7833 if (parseImmExpr(Out&: Offset))
7834 return true;
7835 getTargetStreamer().emitARM64WinCFISaveFPLR(Offset);
7836 return false;
7837}
7838
7839/// parseDirectiveSEHSaveFPLRX
7840/// ::= .seh_save_fplr_x
7841bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
7842 int64_t Offset;
7843 if (parseImmExpr(Out&: Offset))
7844 return true;
7845 getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset);
7846 return false;
7847}
7848
7849/// parseDirectiveSEHSaveReg
7850/// ::= .seh_save_reg
7851bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
7852 unsigned Reg;
7853 int64_t Offset;
7854 if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::LR) ||
7855 parseComma() || parseImmExpr(Out&: Offset))
7856 return true;
7857 getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset);
7858 return false;
7859}
7860
7861/// parseDirectiveSEHSaveRegX
7862/// ::= .seh_save_reg_x
7863bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
7864 unsigned Reg;
7865 int64_t Offset;
7866 if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::LR) ||
7867 parseComma() || parseImmExpr(Out&: Offset))
7868 return true;
7869 getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset);
7870 return false;
7871}
7872
7873/// parseDirectiveSEHSaveRegP
7874/// ::= .seh_save_regp
7875bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
7876 unsigned Reg;
7877 int64_t Offset;
7878 if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::FP) ||
7879 parseComma() || parseImmExpr(Out&: Offset))
7880 return true;
7881 getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset);
7882 return false;
7883}
7884
7885/// parseDirectiveSEHSaveRegPX
7886/// ::= .seh_save_regp_x
7887bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
7888 unsigned Reg;
7889 int64_t Offset;
7890 if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::FP) ||
7891 parseComma() || parseImmExpr(Out&: Offset))
7892 return true;
7893 getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset);
7894 return false;
7895}
7896
7897/// parseDirectiveSEHSaveLRPair
7898/// ::= .seh_save_lrpair
7899bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
7900 unsigned Reg;
7901 int64_t Offset;
7902 L = getLoc();
7903 if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::LR) ||
7904 parseComma() || parseImmExpr(Out&: Offset))
7905 return true;
7906 if (check(P: ((Reg - 19) % 2 != 0), Loc: L,
7907 Msg: "expected register with even offset from x19"))
7908 return true;
7909 getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset);
7910 return false;
7911}
7912
7913/// parseDirectiveSEHSaveFReg
7914/// ::= .seh_save_freg
7915bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
7916 unsigned Reg;
7917 int64_t Offset;
7918 if (parseRegisterInRange(Out&: Reg, Base: AArch64::D0, First: AArch64::D8, Last: AArch64::D15) ||
7919 parseComma() || parseImmExpr(Out&: Offset))
7920 return true;
7921 getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset);
7922 return false;
7923}
7924
7925/// parseDirectiveSEHSaveFRegX
7926/// ::= .seh_save_freg_x
7927bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
7928 unsigned Reg;
7929 int64_t Offset;
7930 if (parseRegisterInRange(Out&: Reg, Base: AArch64::D0, First: AArch64::D8, Last: AArch64::D15) ||
7931 parseComma() || parseImmExpr(Out&: Offset))
7932 return true;
7933 getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset);
7934 return false;
7935}
7936
7937/// parseDirectiveSEHSaveFRegP
7938/// ::= .seh_save_fregp
7939bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
7940 unsigned Reg;
7941 int64_t Offset;
7942 if (parseRegisterInRange(Out&: Reg, Base: AArch64::D0, First: AArch64::D8, Last: AArch64::D14) ||
7943 parseComma() || parseImmExpr(Out&: Offset))
7944 return true;
7945 getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset);
7946 return false;
7947}
7948
7949/// parseDirectiveSEHSaveFRegPX
7950/// ::= .seh_save_fregp_x
7951bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
7952 unsigned Reg;
7953 int64_t Offset;
7954 if (parseRegisterInRange(Out&: Reg, Base: AArch64::D0, First: AArch64::D8, Last: AArch64::D14) ||
7955 parseComma() || parseImmExpr(Out&: Offset))
7956 return true;
7957 getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset);
7958 return false;
7959}
7960
7961/// parseDirectiveSEHSetFP
7962/// ::= .seh_set_fp
7963bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
7964 getTargetStreamer().emitARM64WinCFISetFP();
7965 return false;
7966}
7967
7968/// parseDirectiveSEHAddFP
7969/// ::= .seh_add_fp
7970bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
7971 int64_t Size;
7972 if (parseImmExpr(Out&: Size))
7973 return true;
7974 getTargetStreamer().emitARM64WinCFIAddFP(Size);
7975 return false;
7976}
7977
7978/// parseDirectiveSEHNop
7979/// ::= .seh_nop
7980bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
7981 getTargetStreamer().emitARM64WinCFINop();
7982 return false;
7983}
7984
7985/// parseDirectiveSEHSaveNext
7986/// ::= .seh_save_next
7987bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
7988 getTargetStreamer().emitARM64WinCFISaveNext();
7989 return false;
7990}
7991
7992/// parseDirectiveSEHEpilogStart
7993/// ::= .seh_startepilogue
7994bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
7995 getTargetStreamer().emitARM64WinCFIEpilogStart();
7996 return false;
7997}
7998
7999/// parseDirectiveSEHEpilogEnd
8000/// ::= .seh_endepilogue
8001bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
8002 getTargetStreamer().emitARM64WinCFIEpilogEnd();
8003 return false;
8004}
8005
8006/// parseDirectiveSEHTrapFrame
8007/// ::= .seh_trap_frame
8008bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
8009 getTargetStreamer().emitARM64WinCFITrapFrame();
8010 return false;
8011}
8012
8013/// parseDirectiveSEHMachineFrame
8014/// ::= .seh_pushframe
8015bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
8016 getTargetStreamer().emitARM64WinCFIMachineFrame();
8017 return false;
8018}
8019
8020/// parseDirectiveSEHContext
8021/// ::= .seh_context
8022bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
8023 getTargetStreamer().emitARM64WinCFIContext();
8024 return false;
8025}
8026
8027/// parseDirectiveSEHECContext
8028/// ::= .seh_ec_context
8029bool AArch64AsmParser::parseDirectiveSEHECContext(SMLoc L) {
8030 getTargetStreamer().emitARM64WinCFIECContext();
8031 return false;
8032}
8033
8034/// parseDirectiveSEHClearUnwoundToCall
8035/// ::= .seh_clear_unwound_to_call
8036bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
8037 getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
8038 return false;
8039}
8040
8041/// parseDirectiveSEHPACSignLR
8042/// ::= .seh_pac_sign_lr
8043bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) {
8044 getTargetStreamer().emitARM64WinCFIPACSignLR();
8045 return false;
8046}
8047
8048/// parseDirectiveSEHSaveAnyReg
8049/// ::= .seh_save_any_reg
8050/// ::= .seh_save_any_reg_p
8051/// ::= .seh_save_any_reg_x
8052/// ::= .seh_save_any_reg_px
8053bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired,
8054 bool Writeback) {
8055 MCRegister Reg;
8056 SMLoc Start, End;
8057 int64_t Offset;
8058 if (check(P: parseRegister(Reg, StartLoc&: Start, EndLoc&: End), Loc: getLoc(), Msg: "expected register") ||
8059 parseComma() || parseImmExpr(Out&: Offset))
8060 return true;
8061
8062 if (Reg == AArch64::FP || Reg == AArch64::LR ||
8063 (Reg >= AArch64::X0 && Reg <= AArch64::X28)) {
8064 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
8065 return Error(L, Msg: "invalid save_any_reg offset");
8066 unsigned EncodedReg;
8067 if (Reg == AArch64::FP)
8068 EncodedReg = 29;
8069 else if (Reg == AArch64::LR)
8070 EncodedReg = 30;
8071 else
8072 EncodedReg = Reg - AArch64::X0;
8073 if (Paired) {
8074 if (Reg == AArch64::LR)
8075 return Error(L: Start, Msg: "lr cannot be paired with another register");
8076 if (Writeback)
8077 getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(Reg: EncodedReg, Offset);
8078 else
8079 getTargetStreamer().emitARM64WinCFISaveAnyRegIP(Reg: EncodedReg, Offset);
8080 } else {
8081 if (Writeback)
8082 getTargetStreamer().emitARM64WinCFISaveAnyRegIX(Reg: EncodedReg, Offset);
8083 else
8084 getTargetStreamer().emitARM64WinCFISaveAnyRegI(Reg: EncodedReg, Offset);
8085 }
8086 } else if (Reg >= AArch64::D0 && Reg <= AArch64::D31) {
8087 unsigned EncodedReg = Reg - AArch64::D0;
8088 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
8089 return Error(L, Msg: "invalid save_any_reg offset");
8090 if (Paired) {
8091 if (Reg == AArch64::D31)
8092 return Error(L: Start, Msg: "d31 cannot be paired with another register");
8093 if (Writeback)
8094 getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(Reg: EncodedReg, Offset);
8095 else
8096 getTargetStreamer().emitARM64WinCFISaveAnyRegDP(Reg: EncodedReg, Offset);
8097 } else {
8098 if (Writeback)
8099 getTargetStreamer().emitARM64WinCFISaveAnyRegDX(Reg: EncodedReg, Offset);
8100 else
8101 getTargetStreamer().emitARM64WinCFISaveAnyRegD(Reg: EncodedReg, Offset);
8102 }
8103 } else if (Reg >= AArch64::Q0 && Reg <= AArch64::Q31) {
8104 unsigned EncodedReg = Reg - AArch64::Q0;
8105 if (Offset < 0 || Offset % 16)
8106 return Error(L, Msg: "invalid save_any_reg offset");
8107 if (Paired) {
8108 if (Reg == AArch64::Q31)
8109 return Error(L: Start, Msg: "q31 cannot be paired with another register");
8110 if (Writeback)
8111 getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(Reg: EncodedReg, Offset);
8112 else
8113 getTargetStreamer().emitARM64WinCFISaveAnyRegQP(Reg: EncodedReg, Offset);
8114 } else {
8115 if (Writeback)
8116 getTargetStreamer().emitARM64WinCFISaveAnyRegQX(Reg: EncodedReg, Offset);
8117 else
8118 getTargetStreamer().emitARM64WinCFISaveAnyRegQ(Reg: EncodedReg, Offset);
8119 }
8120 } else {
8121 return Error(L: Start, Msg: "save_any_reg register must be x, q or d register");
8122 }
8123 return false;
8124}
8125
8126/// parseDirectiveAllocZ
8127/// ::= .seh_allocz
8128bool AArch64AsmParser::parseDirectiveSEHAllocZ(SMLoc L) {
8129 int64_t Offset;
8130 if (parseImmExpr(Out&: Offset))
8131 return true;
8132 getTargetStreamer().emitARM64WinCFIAllocZ(Offset);
8133 return false;
8134}
8135
8136/// parseDirectiveSEHSaveZReg
8137/// ::= .seh_save_zreg
8138bool AArch64AsmParser::parseDirectiveSEHSaveZReg(SMLoc L) {
8139 MCRegister RegNum;
8140 StringRef Kind;
8141 int64_t Offset;
8142 ParseStatus Res =
8143 tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::SVEDataVector);
8144 if (!Res.isSuccess())
8145 return true;
8146 if (check(P: RegNum < AArch64::Z8 || RegNum > AArch64::Z23, Loc: L,
8147 Msg: "expected register in range z8 to z23"))
8148 return true;
8149 if (parseComma() || parseImmExpr(Out&: Offset))
8150 return true;
8151 getTargetStreamer().emitARM64WinCFISaveZReg(Reg: RegNum - AArch64::Z0, Offset);
8152 return false;
8153}
8154
8155/// parseDirectiveSEHSavePReg
8156/// ::= .seh_save_preg
8157bool AArch64AsmParser::parseDirectiveSEHSavePReg(SMLoc L) {
8158 MCRegister RegNum;
8159 StringRef Kind;
8160 int64_t Offset;
8161 ParseStatus Res =
8162 tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::SVEPredicateVector);
8163 if (!Res.isSuccess())
8164 return true;
8165 if (check(P: RegNum < AArch64::P4 || RegNum > AArch64::P15, Loc: L,
8166 Msg: "expected register in range p4 to p15"))
8167 return true;
8168 if (parseComma() || parseImmExpr(Out&: Offset))
8169 return true;
8170 getTargetStreamer().emitARM64WinCFISavePReg(Reg: RegNum - AArch64::P0, Offset);
8171 return false;
8172}
8173
8174bool AArch64AsmParser::parseDirectiveAeabiSubSectionHeader(SMLoc L) {
8175 // Handle parsing of .aeabi_subsection directives
8176 // - On first declaration of a subsection, expect exactly three identifiers
8177 // after `.aeabi_subsection`: the subsection name and two parameters.
8178 // - When switching to an existing subsection, it is valid to provide only
8179 // the subsection name, or the name together with the two parameters.
8180 MCAsmParser &Parser = getParser();
8181
8182 // Consume the name (subsection name)
8183 StringRef SubsectionName;
8184 AArch64BuildAttributes::VendorID SubsectionNameID;
8185 if (Parser.getTok().is(K: AsmToken::Identifier)) {
8186 SubsectionName = Parser.getTok().getIdentifier();
8187 SubsectionNameID = AArch64BuildAttributes::getVendorID(Vendor: SubsectionName);
8188 } else {
8189 Error(L: Parser.getTok().getLoc(), Msg: "subsection name not found");
8190 return true;
8191 }
8192 Parser.Lex();
8193
8194 std::unique_ptr<MCELFStreamer::AttributeSubSection> SubsectionExists =
8195 getTargetStreamer().getAttributesSubsectionByName(Name: SubsectionName);
8196 // Check whether only the subsection name was provided.
8197 // If so, the user is trying to switch to a subsection that should have been
8198 // declared before.
8199 if (Parser.getTok().is(K: llvm::AsmToken::EndOfStatement)) {
8200 if (SubsectionExists) {
8201 getTargetStreamer().emitAttributesSubsection(
8202 VendorName: SubsectionName,
8203 IsOptional: static_cast<AArch64BuildAttributes::SubsectionOptional>(
8204 SubsectionExists->IsOptional),
8205 ParameterType: static_cast<AArch64BuildAttributes::SubsectionType>(
8206 SubsectionExists->ParameterType));
8207 return false;
8208 }
8209 // If subsection does not exists, report error.
8210 else {
8211 Error(L: Parser.getTok().getLoc(),
8212 Msg: "Could not switch to subsection '" + SubsectionName +
8213 "' using subsection name, subsection has not been defined");
8214 return true;
8215 }
8216 }
8217
8218 // Otherwise, expecting 2 more parameters: consume a comma
8219 // parseComma() return *false* on success, and call Lex(), no need to call
8220 // Lex() again.
8221 if (Parser.parseComma()) {
8222 return true;
8223 }
8224
8225 // Consume the first parameter (optionality parameter)
8226 AArch64BuildAttributes::SubsectionOptional IsOptional;
8227 // options: optional/required
8228 if (Parser.getTok().is(K: AsmToken::Identifier)) {
8229 StringRef Optionality = Parser.getTok().getIdentifier();
8230 IsOptional = AArch64BuildAttributes::getOptionalID(Optional: Optionality);
8231 if (AArch64BuildAttributes::OPTIONAL_NOT_FOUND == IsOptional) {
8232 Error(L: Parser.getTok().getLoc(),
8233 Msg: AArch64BuildAttributes::getSubsectionOptionalUnknownError());
8234 return true;
8235 }
8236 if (SubsectionExists) {
8237 if (IsOptional != SubsectionExists->IsOptional) {
8238 Error(L: Parser.getTok().getLoc(),
8239 Msg: "optionality mismatch! subsection '" + SubsectionName +
8240 "' already exists with optionality defined as '" +
8241 AArch64BuildAttributes::getOptionalStr(
8242 Optional: SubsectionExists->IsOptional) +
8243 "' and not '" +
8244 AArch64BuildAttributes::getOptionalStr(Optional: IsOptional) + "'");
8245 return true;
8246 }
8247 }
8248 } else {
8249 Error(L: Parser.getTok().getLoc(),
8250 Msg: "optionality parameter not found, expected required|optional");
8251 return true;
8252 }
8253 // Check for possible IsOptional unaccepted values for known subsections
8254 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID) {
8255 if (AArch64BuildAttributes::REQUIRED == IsOptional) {
8256 Error(L: Parser.getTok().getLoc(),
8257 Msg: "aeabi_feature_and_bits must be marked as optional");
8258 return true;
8259 }
8260 }
8261 if (AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
8262 if (AArch64BuildAttributes::OPTIONAL == IsOptional) {
8263 Error(L: Parser.getTok().getLoc(),
8264 Msg: "aeabi_pauthabi must be marked as required");
8265 return true;
8266 }
8267 }
8268 Parser.Lex();
8269 // consume a comma
8270 if (Parser.parseComma()) {
8271 return true;
8272 }
8273
8274 // Consume the second parameter (type parameter)
8275 AArch64BuildAttributes::SubsectionType Type;
8276 if (Parser.getTok().is(K: AsmToken::Identifier)) {
8277 StringRef Name = Parser.getTok().getIdentifier();
8278 Type = AArch64BuildAttributes::getTypeID(Type: Name);
8279 if (AArch64BuildAttributes::TYPE_NOT_FOUND == Type) {
8280 Error(L: Parser.getTok().getLoc(),
8281 Msg: AArch64BuildAttributes::getSubsectionTypeUnknownError());
8282 return true;
8283 }
8284 if (SubsectionExists) {
8285 if (Type != SubsectionExists->ParameterType) {
8286 Error(L: Parser.getTok().getLoc(),
8287 Msg: "type mismatch! subsection '" + SubsectionName +
8288 "' already exists with type defined as '" +
8289 AArch64BuildAttributes::getTypeStr(
8290 Type: SubsectionExists->ParameterType) +
8291 "' and not '" + AArch64BuildAttributes::getTypeStr(Type) +
8292 "'");
8293 return true;
8294 }
8295 }
8296 } else {
8297 Error(L: Parser.getTok().getLoc(),
8298 Msg: "type parameter not found, expected uleb128|ntbs");
8299 return true;
8300 }
8301 // Check for possible unaccepted 'type' values for known subsections
8302 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID ||
8303 AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
8304 if (AArch64BuildAttributes::NTBS == Type) {
8305 Error(L: Parser.getTok().getLoc(),
8306 Msg: SubsectionName + " must be marked as ULEB128");
8307 return true;
8308 }
8309 }
8310 Parser.Lex();
8311
8312 // Parsing finished, check for trailing tokens.
8313 if (Parser.getTok().isNot(K: llvm::AsmToken::EndOfStatement)) {
8314 Error(L: Parser.getTok().getLoc(), Msg: "unexpected token for AArch64 build "
8315 "attributes subsection header directive");
8316 return true;
8317 }
8318
8319 getTargetStreamer().emitAttributesSubsection(VendorName: SubsectionName, IsOptional, ParameterType: Type);
8320
8321 return false;
8322}
8323
8324bool AArch64AsmParser::parseDirectiveAeabiAArch64Attr(SMLoc L) {
8325 // Expecting 2 Tokens: after '.aeabi_attribute', e.g.:
8326 // .aeabi_attribute (1)Tag_Feature_BTI, (2)[uleb128|ntbs]
8327 // separated by a comma.
8328 MCAsmParser &Parser = getParser();
8329
8330 std::unique_ptr<MCELFStreamer::AttributeSubSection> ActiveSubsection =
8331 getTargetStreamer().getActiveAttributesSubsection();
8332 if (nullptr == ActiveSubsection) {
8333 Error(L: Parser.getTok().getLoc(),
8334 Msg: "no active subsection, build attribute can not be added");
8335 return true;
8336 }
8337 StringRef ActiveSubsectionName = ActiveSubsection->VendorName;
8338 unsigned ActiveSubsectionType = ActiveSubsection->ParameterType;
8339
8340 unsigned ActiveSubsectionID = AArch64BuildAttributes::VENDOR_UNKNOWN;
8341 if (AArch64BuildAttributes::getVendorName(
8342 Vendor: AArch64BuildAttributes::AEABI_PAUTHABI) == ActiveSubsectionName)
8343 ActiveSubsectionID = AArch64BuildAttributes::AEABI_PAUTHABI;
8344 if (AArch64BuildAttributes::getVendorName(
8345 Vendor: AArch64BuildAttributes::AEABI_FEATURE_AND_BITS) ==
8346 ActiveSubsectionName)
8347 ActiveSubsectionID = AArch64BuildAttributes::AEABI_FEATURE_AND_BITS;
8348
8349 StringRef TagStr = "";
8350 unsigned Tag;
8351 if (Parser.getTok().is(K: AsmToken::Integer)) {
8352 Tag = getTok().getIntVal();
8353 } else if (Parser.getTok().is(K: AsmToken::Identifier)) {
8354 TagStr = Parser.getTok().getIdentifier();
8355 switch (ActiveSubsectionID) {
8356 case AArch64BuildAttributes::VENDOR_UNKNOWN:
8357 // Tag was provided as an unrecognized string instead of an unsigned
8358 // integer
8359 Error(L: Parser.getTok().getLoc(), Msg: "unrecognized Tag: '" + TagStr +
8360 "' \nExcept for public subsections, "
8361 "tags have to be an unsigned int.");
8362 return true;
8363 break;
8364 case AArch64BuildAttributes::AEABI_PAUTHABI:
8365 Tag = AArch64BuildAttributes::getPauthABITagsID(PauthABITag: TagStr);
8366 if (AArch64BuildAttributes::PAUTHABI_TAG_NOT_FOUND == Tag) {
8367 Error(L: Parser.getTok().getLoc(), Msg: "unknown AArch64 build attribute '" +
8368 TagStr + "' for subsection '" +
8369 ActiveSubsectionName + "'");
8370 return true;
8371 }
8372 break;
8373 case AArch64BuildAttributes::AEABI_FEATURE_AND_BITS:
8374 Tag = AArch64BuildAttributes::getFeatureAndBitsTagsID(FeatureAndBitsTag: TagStr);
8375 if (AArch64BuildAttributes::FEATURE_AND_BITS_TAG_NOT_FOUND == Tag) {
8376 Error(L: Parser.getTok().getLoc(), Msg: "unknown AArch64 build attribute '" +
8377 TagStr + "' for subsection '" +
8378 ActiveSubsectionName + "'");
8379 return true;
8380 }
8381 break;
8382 }
8383 } else {
8384 Error(L: Parser.getTok().getLoc(), Msg: "AArch64 build attributes tag not found");
8385 return true;
8386 }
8387 Parser.Lex();
8388 // consume a comma
8389 // parseComma() return *false* on success, and call Lex(), no need to call
8390 // Lex() again.
8391 if (Parser.parseComma()) {
8392 return true;
8393 }
8394
8395 // Consume the second parameter (attribute value)
8396 unsigned ValueInt = unsigned(-1);
8397 std::string ValueStr = "";
8398 if (Parser.getTok().is(K: AsmToken::Integer)) {
8399 if (AArch64BuildAttributes::NTBS == ActiveSubsectionType) {
8400 Error(
8401 L: Parser.getTok().getLoc(),
8402 Msg: "active subsection type is NTBS (string), found ULEB128 (unsigned)");
8403 return true;
8404 }
8405 ValueInt = getTok().getIntVal();
8406 } else if (Parser.getTok().is(K: AsmToken::Identifier)) {
8407 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8408 Error(
8409 L: Parser.getTok().getLoc(),
8410 Msg: "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8411 return true;
8412 }
8413 ValueStr = Parser.getTok().getIdentifier();
8414 } else if (Parser.getTok().is(K: AsmToken::String)) {
8415 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8416 Error(
8417 L: Parser.getTok().getLoc(),
8418 Msg: "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8419 return true;
8420 }
8421 ValueStr = Parser.getTok().getString();
8422 } else {
8423 Error(L: Parser.getTok().getLoc(), Msg: "AArch64 build attributes value not found");
8424 return true;
8425 }
8426 // Check for possible unaccepted values for known tags
8427 // (AEABI_FEATURE_AND_BITS)
8428 if (ActiveSubsectionID == AArch64BuildAttributes::AEABI_FEATURE_AND_BITS) {
8429 if (0 != ValueInt && 1 != ValueInt) {
8430 Error(L: Parser.getTok().getLoc(),
8431 Msg: "unknown AArch64 build attributes Value for Tag '" + TagStr +
8432 "' options are 0|1");
8433 return true;
8434 }
8435 }
8436 Parser.Lex();
8437
8438 // Parsing finished. Check for trailing tokens.
8439 if (Parser.getTok().isNot(K: llvm::AsmToken::EndOfStatement)) {
8440 Error(L: Parser.getTok().getLoc(),
8441 Msg: "unexpected token for AArch64 build attributes tag and value "
8442 "attribute directive");
8443 return true;
8444 }
8445
8446 if (unsigned(-1) != ValueInt) {
8447 getTargetStreamer().emitAttribute(VendorName: ActiveSubsectionName, Tag, Value: ValueInt, String: "");
8448 }
8449 if ("" != ValueStr) {
8450 getTargetStreamer().emitAttribute(VendorName: ActiveSubsectionName, Tag, Value: unsigned(-1),
8451 String: ValueStr);
8452 }
8453 return false;
8454}
8455
8456bool AArch64AsmParser::parseExprWithSpecifier(const MCExpr *&Res, SMLoc &E) {
8457 SMLoc Loc = getLoc();
8458 if (getLexer().getKind() != AsmToken::Identifier)
8459 return TokError(Msg: "expected '%' relocation specifier");
8460 StringRef Identifier = getParser().getTok().getIdentifier();
8461 auto Spec = AArch64::parsePercentSpecifierName(Identifier);
8462 if (!Spec)
8463 return TokError(Msg: "invalid relocation specifier");
8464
8465 getParser().Lex(); // Eat the identifier
8466 if (parseToken(T: AsmToken::LParen, Msg: "expected '('"))
8467 return true;
8468
8469 const MCExpr *SubExpr;
8470 if (getParser().parseParenExpression(Res&: SubExpr, EndLoc&: E))
8471 return true;
8472
8473 Res = MCSpecifierExpr::create(Expr: SubExpr, S: Spec, Ctx&: getContext(), Loc);
8474 return false;
8475}
8476
8477bool AArch64AsmParser::parseDataExpr(const MCExpr *&Res) {
8478 SMLoc EndLoc;
8479 if (parseOptionalToken(T: AsmToken::Percent))
8480 return parseExprWithSpecifier(Res, E&: EndLoc);
8481
8482 if (getParser().parseExpression(Res))
8483 return true;
8484 MCAsmParser &Parser = getParser();
8485 if (!parseOptionalToken(T: AsmToken::At))
8486 return false;
8487 if (getLexer().getKind() != AsmToken::Identifier)
8488 return Error(L: getLoc(), Msg: "expected relocation specifier");
8489
8490 std::string Identifier = Parser.getTok().getIdentifier().lower();
8491 SMLoc Loc = getLoc();
8492 Lex();
8493 if (Identifier == "auth")
8494 return parseAuthExpr(Res, EndLoc);
8495
8496 auto Spec = AArch64::S_None;
8497 if (STI->getTargetTriple().isOSBinFormatMachO()) {
8498 if (Identifier == "got")
8499 Spec = AArch64::S_MACHO_GOT;
8500 }
8501 if (Spec == AArch64::S_None)
8502 return Error(L: Loc, Msg: "invalid relocation specifier");
8503 if (auto *SRE = dyn_cast<MCSymbolRefExpr>(Val: Res))
8504 Res = MCSymbolRefExpr::create(Symbol: &SRE->getSymbol(), specifier: Spec, Ctx&: getContext(),
8505 Loc: SRE->getLoc());
8506 else
8507 return Error(L: Loc, Msg: "@ specifier only allowed after a symbol");
8508
8509 for (;;) {
8510 std::optional<MCBinaryExpr::Opcode> Opcode;
8511 if (parseOptionalToken(T: AsmToken::Plus))
8512 Opcode = MCBinaryExpr::Add;
8513 else if (parseOptionalToken(T: AsmToken::Minus))
8514 Opcode = MCBinaryExpr::Sub;
8515 else
8516 break;
8517 const MCExpr *Term;
8518 if (getParser().parsePrimaryExpr(Res&: Term, EndLoc, TypeInfo: nullptr))
8519 return true;
8520 Res = MCBinaryExpr::create(Op: *Opcode, LHS: Res, RHS: Term, Ctx&: getContext(), Loc: Res->getLoc());
8521 }
8522 return false;
8523}
8524
8525/// parseAuthExpr
8526/// ::= _sym@AUTH(ib,123[,addr])
8527/// ::= (_sym + 5)@AUTH(ib,123[,addr])
8528/// ::= (_sym - 5)@AUTH(ib,123[,addr])
8529bool AArch64AsmParser::parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc) {
8530 MCAsmParser &Parser = getParser();
8531 MCContext &Ctx = getContext();
8532 AsmToken Tok = Parser.getTok();
8533
8534 // At this point, we encountered "<id>@AUTH". There is no fallback anymore.
8535 if (parseToken(T: AsmToken::LParen, Msg: "expected '('"))
8536 return true;
8537
8538 if (Parser.getTok().isNot(K: AsmToken::Identifier))
8539 return TokError(Msg: "expected key name");
8540
8541 StringRef KeyStr = Parser.getTok().getIdentifier();
8542 auto KeyIDOrNone = AArch64StringToPACKeyID(Name: KeyStr);
8543 if (!KeyIDOrNone)
8544 return TokError(Msg: "invalid key '" + KeyStr + "'");
8545 Parser.Lex();
8546
8547 if (parseToken(T: AsmToken::Comma, Msg: "expected ','"))
8548 return true;
8549
8550 if (Parser.getTok().isNot(K: AsmToken::Integer))
8551 return TokError(Msg: "expected integer discriminator");
8552 int64_t Discriminator = Parser.getTok().getIntVal();
8553
8554 if (!isUInt<16>(x: Discriminator))
8555 return TokError(Msg: "integer discriminator " + Twine(Discriminator) +
8556 " out of range [0, 0xFFFF]");
8557 Parser.Lex();
8558
8559 bool UseAddressDiversity = false;
8560 if (Parser.getTok().is(K: AsmToken::Comma)) {
8561 Parser.Lex();
8562 if (Parser.getTok().isNot(K: AsmToken::Identifier) ||
8563 Parser.getTok().getIdentifier() != "addr")
8564 return TokError(Msg: "expected 'addr'");
8565 UseAddressDiversity = true;
8566 Parser.Lex();
8567 }
8568
8569 EndLoc = Parser.getTok().getEndLoc();
8570 if (parseToken(T: AsmToken::RParen, Msg: "expected ')'"))
8571 return true;
8572
8573 Res = AArch64AuthMCExpr::create(Expr: Res, Discriminator, Key: *KeyIDOrNone,
8574 HasAddressDiversity: UseAddressDiversity, Ctx, Loc: Res->getLoc());
8575 return false;
8576}
8577
8578bool AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
8579 AArch64::Specifier &ELFSpec,
8580 AArch64::Specifier &DarwinSpec,
8581 int64_t &Addend) {
8582 ELFSpec = AArch64::S_INVALID;
8583 DarwinSpec = AArch64::S_None;
8584 Addend = 0;
8585
8586 if (auto *AE = dyn_cast<MCSpecifierExpr>(Val: Expr)) {
8587 ELFSpec = AE->getSpecifier();
8588 Expr = AE->getSubExpr();
8589 }
8590
8591 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Val: Expr);
8592 if (SE) {
8593 // It's a simple symbol reference with no addend.
8594 DarwinSpec = AArch64::Specifier(SE->getKind());
8595 return true;
8596 }
8597
8598 // Check that it looks like a symbol + an addend
8599 MCValue Res;
8600 bool Relocatable = Expr->evaluateAsRelocatable(Res, Asm: nullptr);
8601 if (!Relocatable || Res.getSubSym())
8602 return false;
8603
8604 // Treat expressions with an ELFSpec (like ":abs_g1:3", or
8605 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
8606 if (!Res.getAddSym() && ELFSpec == AArch64::S_INVALID)
8607 return false;
8608
8609 if (Res.getAddSym())
8610 DarwinSpec = AArch64::Specifier(Res.getSpecifier());
8611 Addend = Res.getConstant();
8612
8613 // It's some symbol reference + a constant addend, but really
8614 // shouldn't use both Darwin and ELF syntax.
8615 return ELFSpec == AArch64::S_INVALID || DarwinSpec == AArch64::S_None;
8616}
8617
8618/// Force static initialization.
8619extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void
8620LLVMInitializeAArch64AsmParser() {
8621 RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
8622 RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
8623 RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
8624 RegisterMCAsmParser<AArch64AsmParser> W(getTheARM64_32Target());
8625 RegisterMCAsmParser<AArch64AsmParser> V(getTheAArch64_32Target());
8626}
8627
8628#define GET_REGISTER_MATCHER
8629#define GET_SUBTARGET_FEATURE_NAME
8630#define GET_MATCHER_IMPLEMENTATION
8631#define GET_MNEMONIC_SPELL_CHECKER
8632#include "AArch64GenAsmMatcher.inc"
8633
8634// Define this matcher function after the auto-generated include so we
8635// have the match class enum definitions.
8636unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
8637 unsigned Kind) {
8638 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
8639
8640 auto MatchesOpImmediate = [&](int64_t ExpectedVal) -> MatchResultTy {
8641 if (!Op.isImm())
8642 return Match_InvalidOperand;
8643 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Op.getImm());
8644 if (!CE)
8645 return Match_InvalidOperand;
8646 if (CE->getValue() == ExpectedVal)
8647 return Match_Success;
8648 return Match_InvalidOperand;
8649 };
8650
8651 switch (Kind) {
8652 default:
8653 return Match_InvalidOperand;
8654 case MCK_MPR:
8655 // If the Kind is a token for the MPR register class which has the "za"
8656 // register (SME accumulator array), check if the asm is a literal "za"
8657 // token. This is for the "smstart za" alias that defines the register
8658 // as a literal token.
8659 if (Op.isTokenEqual(Str: "za"))
8660 return Match_Success;
8661 return Match_InvalidOperand;
8662
8663 // If the kind is a token for a literal immediate, check if our asm operand
8664 // matches. This is for InstAliases which have a fixed-value immediate in
8665 // the asm string, such as hints which are parsed into a specific
8666 // instruction definition.
8667#define MATCH_HASH(N) \
8668 case MCK__HASH_##N: \
8669 return MatchesOpImmediate(N);
8670 MATCH_HASH(0)
8671 MATCH_HASH(1)
8672 MATCH_HASH(2)
8673 MATCH_HASH(3)
8674 MATCH_HASH(4)
8675 MATCH_HASH(6)
8676 MATCH_HASH(7)
8677 MATCH_HASH(8)
8678 MATCH_HASH(10)
8679 MATCH_HASH(12)
8680 MATCH_HASH(14)
8681 MATCH_HASH(16)
8682 MATCH_HASH(24)
8683 MATCH_HASH(25)
8684 MATCH_HASH(26)
8685 MATCH_HASH(27)
8686 MATCH_HASH(28)
8687 MATCH_HASH(29)
8688 MATCH_HASH(30)
8689 MATCH_HASH(31)
8690 MATCH_HASH(32)
8691 MATCH_HASH(40)
8692 MATCH_HASH(48)
8693 MATCH_HASH(64)
8694#undef MATCH_HASH
8695#define MATCH_HASH_MINUS(N) \
8696 case MCK__HASH__MINUS_##N: \
8697 return MatchesOpImmediate(-N);
8698 MATCH_HASH_MINUS(4)
8699 MATCH_HASH_MINUS(8)
8700 MATCH_HASH_MINUS(16)
8701#undef MATCH_HASH_MINUS
8702 }
8703}
8704
8705ParseStatus AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
8706
8707 SMLoc S = getLoc();
8708
8709 if (getTok().isNot(K: AsmToken::Identifier))
8710 return Error(L: S, Msg: "expected register");
8711
8712 MCRegister FirstReg;
8713 ParseStatus Res = tryParseScalarRegister(RegNum&: FirstReg);
8714 if (!Res.isSuccess())
8715 return Error(L: S, Msg: "expected first even register of a consecutive same-size "
8716 "even/odd register pair");
8717
8718 const MCRegisterClass &WRegClass =
8719 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
8720 const MCRegisterClass &XRegClass =
8721 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
8722
8723 bool isXReg = XRegClass.contains(Reg: FirstReg),
8724 isWReg = WRegClass.contains(Reg: FirstReg);
8725 if (!isXReg && !isWReg)
8726 return Error(L: S, Msg: "expected first even register of a consecutive same-size "
8727 "even/odd register pair");
8728
8729 const MCRegisterInfo *RI = getContext().getRegisterInfo();
8730 unsigned FirstEncoding = RI->getEncodingValue(Reg: FirstReg);
8731
8732 if (FirstEncoding & 0x1)
8733 return Error(L: S, Msg: "expected first even register of a consecutive same-size "
8734 "even/odd register pair");
8735
8736 if (getTok().isNot(K: AsmToken::Comma))
8737 return Error(L: getLoc(), Msg: "expected comma");
8738 // Eat the comma
8739 Lex();
8740
8741 SMLoc E = getLoc();
8742 MCRegister SecondReg;
8743 Res = tryParseScalarRegister(RegNum&: SecondReg);
8744 if (!Res.isSuccess())
8745 return Error(L: E, Msg: "expected second odd register of a consecutive same-size "
8746 "even/odd register pair");
8747
8748 if (RI->getEncodingValue(Reg: SecondReg) != FirstEncoding + 1 ||
8749 (isXReg && !XRegClass.contains(Reg: SecondReg)) ||
8750 (isWReg && !WRegClass.contains(Reg: SecondReg)))
8751 return Error(L: E, Msg: "expected second odd register of a consecutive same-size "
8752 "even/odd register pair");
8753
8754 MCRegister Pair;
8755 if (isXReg) {
8756 Pair = RI->getMatchingSuperReg(Reg: FirstReg, SubIdx: AArch64::sube64,
8757 RC: &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
8758 } else {
8759 Pair = RI->getMatchingSuperReg(Reg: FirstReg, SubIdx: AArch64::sube32,
8760 RC: &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
8761 }
8762
8763 Operands.push_back(Elt: AArch64Operand::CreateReg(Reg: Pair, Kind: RegKind::Scalar, S,
8764 E: getLoc(), Ctx&: getContext()));
8765
8766 return ParseStatus::Success;
8767}
8768
8769template <bool ParseShiftExtend, bool ParseSuffix>
8770ParseStatus AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
8771 const SMLoc S = getLoc();
8772 // Check for a SVE vector register specifier first.
8773 MCRegister RegNum;
8774 StringRef Kind;
8775
8776 ParseStatus Res =
8777 tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::SVEDataVector);
8778
8779 if (!Res.isSuccess())
8780 return Res;
8781
8782 if (ParseSuffix && Kind.empty())
8783 return ParseStatus::NoMatch;
8784
8785 const auto &KindRes = parseVectorKind(Suffix: Kind, VectorKind: RegKind::SVEDataVector);
8786 if (!KindRes)
8787 return ParseStatus::NoMatch;
8788
8789 unsigned ElementWidth = KindRes->second;
8790
8791 // No shift/extend is the default.
8792 if (!ParseShiftExtend || getTok().isNot(K: AsmToken::Comma)) {
8793 Operands.push_back(Elt: AArch64Operand::CreateVectorReg(
8794 Reg: RegNum, Kind: RegKind::SVEDataVector, ElementWidth, S, E: S, Ctx&: getContext()));
8795
8796 ParseStatus Res = tryParseVectorIndex(Operands);
8797 if (Res.isFailure())
8798 return ParseStatus::Failure;
8799 return ParseStatus::Success;
8800 }
8801
8802 // Eat the comma
8803 Lex();
8804
8805 // Match the shift
8806 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
8807 Res = tryParseOptionalShiftExtend(Operands&: ExtOpnd);
8808 if (!Res.isSuccess())
8809 return Res;
8810
8811 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
8812 Operands.push_back(Elt: AArch64Operand::CreateVectorReg(
8813 Reg: RegNum, Kind: RegKind::SVEDataVector, ElementWidth, S, E: Ext->getEndLoc(),
8814 Ctx&: getContext(), ExtTy: Ext->getShiftExtendType(), ShiftAmount: Ext->getShiftExtendAmount(),
8815 HasExplicitAmount: Ext->hasShiftExtendAmount()));
8816
8817 return ParseStatus::Success;
8818}
8819
8820ParseStatus AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
8821 MCAsmParser &Parser = getParser();
8822
8823 SMLoc SS = getLoc();
8824 const AsmToken &TokE = getTok();
8825 bool IsHash = TokE.is(K: AsmToken::Hash);
8826
8827 if (!IsHash && TokE.isNot(K: AsmToken::Identifier))
8828 return ParseStatus::NoMatch;
8829
8830 int64_t Pattern;
8831 if (IsHash) {
8832 Lex(); // Eat hash
8833
8834 // Parse the immediate operand.
8835 const MCExpr *ImmVal;
8836 SS = getLoc();
8837 if (Parser.parseExpression(Res&: ImmVal))
8838 return ParseStatus::Failure;
8839
8840 auto *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
8841 if (!MCE)
8842 return TokError(Msg: "invalid operand for instruction");
8843
8844 Pattern = MCE->getValue();
8845 } else {
8846 // Parse the pattern
8847 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(Name: TokE.getString());
8848 if (!Pat)
8849 return ParseStatus::NoMatch;
8850
8851 Lex();
8852 Pattern = Pat->Encoding;
8853 assert(Pattern >= 0 && Pattern < 32);
8854 }
8855
8856 Operands.push_back(
8857 Elt: AArch64Operand::CreateImm(Val: MCConstantExpr::create(Value: Pattern, Ctx&: getContext()),
8858 S: SS, E: getLoc(), Ctx&: getContext()));
8859
8860 return ParseStatus::Success;
8861}
8862
8863ParseStatus
8864AArch64AsmParser::tryParseSVEVecLenSpecifier(OperandVector &Operands) {
8865 int64_t Pattern;
8866 SMLoc SS = getLoc();
8867 const AsmToken &TokE = getTok();
8868 // Parse the pattern
8869 auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName(
8870 Name: TokE.getString());
8871 if (!Pat)
8872 return ParseStatus::NoMatch;
8873
8874 Lex();
8875 Pattern = Pat->Encoding;
8876 assert(Pattern >= 0 && Pattern <= 1 && "Pattern does not exist");
8877
8878 Operands.push_back(
8879 Elt: AArch64Operand::CreateImm(Val: MCConstantExpr::create(Value: Pattern, Ctx&: getContext()),
8880 S: SS, E: getLoc(), Ctx&: getContext()));
8881
8882 return ParseStatus::Success;
8883}
8884
8885ParseStatus AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
8886 SMLoc SS = getLoc();
8887
8888 MCRegister XReg;
8889 if (!tryParseScalarRegister(RegNum&: XReg).isSuccess())
8890 return ParseStatus::NoMatch;
8891
8892 MCContext &ctx = getContext();
8893 const MCRegisterInfo *RI = ctx.getRegisterInfo();
8894 MCRegister X8Reg = RI->getMatchingSuperReg(
8895 Reg: XReg, SubIdx: AArch64::x8sub_0,
8896 RC: &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
8897 if (!X8Reg)
8898 return Error(L: SS,
8899 Msg: "expected an even-numbered x-register in the range [x0,x22]");
8900
8901 Operands.push_back(
8902 Elt: AArch64Operand::CreateReg(Reg: X8Reg, Kind: RegKind::Scalar, S: SS, E: getLoc(), Ctx&: ctx));
8903 return ParseStatus::Success;
8904}
8905
8906ParseStatus AArch64AsmParser::tryParseImmRange(OperandVector &Operands) {
8907 SMLoc S = getLoc();
8908
8909 if (getTok().isNot(K: AsmToken::Integer))
8910 return ParseStatus::NoMatch;
8911
8912 if (getLexer().peekTok().isNot(K: AsmToken::Colon))
8913 return ParseStatus::NoMatch;
8914
8915 const MCExpr *ImmF;
8916 if (getParser().parseExpression(Res&: ImmF))
8917 return ParseStatus::NoMatch;
8918
8919 if (getTok().isNot(K: AsmToken::Colon))
8920 return ParseStatus::NoMatch;
8921
8922 Lex(); // Eat ':'
8923 if (getTok().isNot(K: AsmToken::Integer))
8924 return ParseStatus::NoMatch;
8925
8926 SMLoc E = getTok().getLoc();
8927 const MCExpr *ImmL;
8928 if (getParser().parseExpression(Res&: ImmL))
8929 return ParseStatus::NoMatch;
8930
8931 unsigned ImmFVal = cast<MCConstantExpr>(Val: ImmF)->getValue();
8932 unsigned ImmLVal = cast<MCConstantExpr>(Val: ImmL)->getValue();
8933
8934 Operands.push_back(
8935 Elt: AArch64Operand::CreateImmRange(First: ImmFVal, Last: ImmLVal, S, E, Ctx&: getContext()));
8936 return ParseStatus::Success;
8937}
8938
8939template <int Adj>
8940ParseStatus AArch64AsmParser::tryParseAdjImm0_63(OperandVector &Operands) {
8941 SMLoc S = getLoc();
8942
8943 parseOptionalToken(T: AsmToken::Hash);
8944 bool IsNegative = parseOptionalToken(T: AsmToken::Minus);
8945
8946 if (getTok().isNot(K: AsmToken::Integer))
8947 return ParseStatus::NoMatch;
8948
8949 const MCExpr *Ex;
8950 if (getParser().parseExpression(Res&: Ex))
8951 return ParseStatus::NoMatch;
8952
8953 int64_t Imm = dyn_cast<MCConstantExpr>(Val: Ex)->getValue();
8954 if (IsNegative)
8955 Imm = -Imm;
8956
8957 // We want an adjusted immediate in the range [0, 63]. If we don't have one,
8958 // return a value, which is certain to trigger a error message about invalid
8959 // immediate range instead of a non-descriptive invalid operand error.
8960 static_assert(Adj == 1 || Adj == -1, "Unsafe immediate adjustment");
8961 if (Imm == INT64_MIN || Imm == INT64_MAX || Imm + Adj < 0 || Imm + Adj > 63)
8962 Imm = -2;
8963 else
8964 Imm += Adj;
8965
8966 SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
8967 Operands.push_back(Elt: AArch64Operand::CreateImm(
8968 Val: MCConstantExpr::create(Value: Imm, Ctx&: getContext()), S, E, Ctx&: getContext()));
8969
8970 return ParseStatus::Success;
8971}
8972