1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
10#include "MCTargetDesc/AArch64AddressingModes.h"
11#include "MCTargetDesc/AArch64InstPrinter.h"
12#include "MCTargetDesc/AArch64MCAsmInfo.h"
13#include "MCTargetDesc/AArch64MCTargetDesc.h"
14#include "MCTargetDesc/AArch64TargetStreamer.h"
15#include "TargetInfo/AArch64TargetInfo.h"
16#include "Utils/AArch64BaseInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringExtras.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
26#include "llvm/ADT/StringSwitch.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCAsmInfo.h"
29#include "llvm/MC/MCContext.h"
30#include "llvm/MC/MCExpr.h"
31#include "llvm/MC/MCInst.h"
32#include "llvm/MC/MCLinkerOptimizationHint.h"
33#include "llvm/MC/MCObjectFileInfo.h"
34#include "llvm/MC/MCParser/AsmLexer.h"
35#include "llvm/MC/MCParser/MCAsmParser.h"
36#include "llvm/MC/MCParser/MCAsmParserExtension.h"
37#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
38#include "llvm/MC/MCParser/MCTargetAsmParser.h"
39#include "llvm/MC/MCRegisterInfo.h"
40#include "llvm/MC/MCStreamer.h"
41#include "llvm/MC/MCSubtargetInfo.h"
42#include "llvm/MC/MCSymbol.h"
43#include "llvm/MC/MCTargetOptions.h"
44#include "llvm/MC/MCValue.h"
45#include "llvm/MC/TargetRegistry.h"
46#include "llvm/Support/AArch64BuildAttributes.h"
47#include "llvm/Support/Compiler.h"
48#include "llvm/Support/ErrorHandling.h"
49#include "llvm/Support/MathExtras.h"
50#include "llvm/Support/SMLoc.h"
51#include "llvm/Support/raw_ostream.h"
52#include "llvm/TargetParser/AArch64TargetParser.h"
53#include "llvm/TargetParser/SubtargetFeature.h"
54#include <cassert>
55#include <cctype>
56#include <cstdint>
57#include <cstdio>
58#include <optional>
59#include <string>
60#include <tuple>
61#include <utility>
62#include <vector>
63
64using namespace llvm;
65
66namespace {
67
68enum class RegKind {
69 Scalar,
70 NeonVector,
71 SVEDataVector,
72 SVEPredicateAsCounter,
73 SVEPredicateVector,
74 Matrix,
75 LookupTable
76};
77
78enum class MatrixKind { Array, Tile, Row, Col };
79
80enum RegConstraintEqualityTy {
81 EqualsReg,
82 EqualsSuperReg,
83 EqualsSubReg
84};
85
86class AArch64AsmParser : public MCTargetAsmParser {
87private:
88 StringRef Mnemonic; ///< Instruction mnemonic.
89
90 // Map of register aliases registers via the .req directive.
91 StringMap<std::pair<RegKind, MCRegister>> RegisterReqs;
92
93 class PrefixInfo {
94 public:
95 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
96 PrefixInfo Prefix;
97 switch (Inst.getOpcode()) {
98 case AArch64::MOVPRFX_ZZ:
99 Prefix.Active = true;
100 Prefix.Dst = Inst.getOperand(i: 0).getReg();
101 break;
102 case AArch64::MOVPRFX_ZPmZ_B:
103 case AArch64::MOVPRFX_ZPmZ_H:
104 case AArch64::MOVPRFX_ZPmZ_S:
105 case AArch64::MOVPRFX_ZPmZ_D:
106 Prefix.Active = true;
107 Prefix.Predicated = true;
108 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
109 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
110 "No destructive element size set for movprfx");
111 Prefix.Dst = Inst.getOperand(i: 0).getReg();
112 Prefix.Pg = Inst.getOperand(i: 2).getReg();
113 break;
114 case AArch64::MOVPRFX_ZPzZ_B:
115 case AArch64::MOVPRFX_ZPzZ_H:
116 case AArch64::MOVPRFX_ZPzZ_S:
117 case AArch64::MOVPRFX_ZPzZ_D:
118 Prefix.Active = true;
119 Prefix.Predicated = true;
120 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
121 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
122 "No destructive element size set for movprfx");
123 Prefix.Dst = Inst.getOperand(i: 0).getReg();
124 Prefix.Pg = Inst.getOperand(i: 1).getReg();
125 break;
126 default:
127 break;
128 }
129
130 return Prefix;
131 }
132
133 PrefixInfo() = default;
134 bool isActive() const { return Active; }
135 bool isPredicated() const { return Predicated; }
136 unsigned getElementSize() const {
137 assert(Predicated);
138 return ElementSize;
139 }
140 MCRegister getDstReg() const { return Dst; }
141 MCRegister getPgReg() const {
142 assert(Predicated);
143 return Pg;
144 }
145
146 private:
147 bool Active = false;
148 bool Predicated = false;
149 unsigned ElementSize;
150 MCRegister Dst;
151 MCRegister Pg;
152 } NextPrefix;
153
154 AArch64TargetStreamer &getTargetStreamer() {
155 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
156 return static_cast<AArch64TargetStreamer &>(TS);
157 }
158
159 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
160
161 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 bool parseSyslAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
163 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
164 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
165 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
166 std::string &Suggestion);
167 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
168 MCRegister matchRegisterNameAlias(StringRef Name, RegKind Kind);
169 bool parseRegister(OperandVector &Operands);
170 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
171 bool parseNeonVectorList(OperandVector &Operands);
172 bool parseOptionalMulOperand(OperandVector &Operands);
173 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
174 bool parseKeywordOperand(OperandVector &Operands);
175 bool parseOperand(OperandVector &Operands, bool isCondCode,
176 bool invertCondCode);
177 bool parseImmExpr(int64_t &Out);
178 bool parseComma();
179 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
180 unsigned Last);
181
182 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
183 OperandVector &Operands);
184
185 bool parseDataExpr(const MCExpr *&Res) override;
186 bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc);
187
188 bool parseDirectiveArch(SMLoc L);
189 bool parseDirectiveArchExtension(SMLoc L);
190 bool parseDirectiveCPU(SMLoc L);
191 bool parseDirectiveInst(SMLoc L);
192
193 bool parseDirectiveTLSDescCall(SMLoc L);
194
195 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
196 bool parseDirectiveLtorg(SMLoc L);
197
198 bool parseDirectiveReq(StringRef Name, SMLoc L);
199 bool parseDirectiveUnreq(SMLoc L);
200 bool parseDirectiveCFINegateRAState();
201 bool parseDirectiveCFINegateRAStateWithPC();
202 bool parseDirectiveCFIBKeyFrame();
203 bool parseDirectiveCFIMTETaggedFrame();
204
205 bool parseDirectiveVariantPCS(SMLoc L);
206
207 bool parseDirectiveSEHAllocStack(SMLoc L);
208 bool parseDirectiveSEHPrologEnd(SMLoc L);
209 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
210 bool parseDirectiveSEHSaveFPLR(SMLoc L);
211 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
212 bool parseDirectiveSEHSaveReg(SMLoc L);
213 bool parseDirectiveSEHSaveRegX(SMLoc L);
214 bool parseDirectiveSEHSaveRegP(SMLoc L);
215 bool parseDirectiveSEHSaveRegPX(SMLoc L);
216 bool parseDirectiveSEHSaveLRPair(SMLoc L);
217 bool parseDirectiveSEHSaveFReg(SMLoc L);
218 bool parseDirectiveSEHSaveFRegX(SMLoc L);
219 bool parseDirectiveSEHSaveFRegP(SMLoc L);
220 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
221 bool parseDirectiveSEHSetFP(SMLoc L);
222 bool parseDirectiveSEHAddFP(SMLoc L);
223 bool parseDirectiveSEHNop(SMLoc L);
224 bool parseDirectiveSEHSaveNext(SMLoc L);
225 bool parseDirectiveSEHEpilogStart(SMLoc L);
226 bool parseDirectiveSEHEpilogEnd(SMLoc L);
227 bool parseDirectiveSEHTrapFrame(SMLoc L);
228 bool parseDirectiveSEHMachineFrame(SMLoc L);
229 bool parseDirectiveSEHContext(SMLoc L);
230 bool parseDirectiveSEHECContext(SMLoc L);
231 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
232 bool parseDirectiveSEHPACSignLR(SMLoc L);
233 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
234 bool parseDirectiveSEHAllocZ(SMLoc L);
235 bool parseDirectiveSEHSaveZReg(SMLoc L);
236 bool parseDirectiveSEHSavePReg(SMLoc L);
237 bool parseDirectiveAeabiSubSectionHeader(SMLoc L);
238 bool parseDirectiveAeabiAArch64Attr(SMLoc L);
239
240 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
241 SmallVectorImpl<SMLoc> &Loc);
242 unsigned getNumRegsForRegKind(RegKind K);
243 bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
244 OperandVector &Operands, MCStreamer &Out,
245 uint64_t &ErrorInfo,
246 bool MatchingInlineAsm) override;
247 /// @name Auto-generated Match Functions
248 /// {
249
250#define GET_ASSEMBLER_HEADER
251#include "AArch64GenAsmMatcher.inc"
252
253 /// }
254
255 ParseStatus tryParseScalarRegister(MCRegister &Reg);
256 ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
257 RegKind MatchKind);
258 ParseStatus tryParseMatrixRegister(OperandVector &Operands);
259 ParseStatus tryParseSVCR(OperandVector &Operands);
260 ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
261 ParseStatus tryParseBarrierOperand(OperandVector &Operands);
262 ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
263 ParseStatus tryParseSysReg(OperandVector &Operands);
264 ParseStatus tryParseSysCROperand(OperandVector &Operands);
265 template <bool IsSVEPrefetch = false>
266 ParseStatus tryParsePrefetch(OperandVector &Operands);
267 ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
268 ParseStatus tryParsePSBHint(OperandVector &Operands);
269 ParseStatus tryParseBTIHint(OperandVector &Operands);
270 ParseStatus tryParseCMHPriorityHint(OperandVector &Operands);
271 ParseStatus tryParseTIndexHint(OperandVector &Operands);
272 ParseStatus tryParseAdrpLabel(OperandVector &Operands);
273 ParseStatus tryParseAdrLabel(OperandVector &Operands);
274 template <bool AddFPZeroAsLiteral>
275 ParseStatus tryParseFPImm(OperandVector &Operands);
276 ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
277 ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
278 bool tryParseNeonVectorRegister(OperandVector &Operands);
279 ParseStatus tryParseVectorIndex(OperandVector &Operands);
280 ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
281 ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
282 template <bool ParseShiftExtend,
283 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
284 ParseStatus tryParseGPROperand(OperandVector &Operands);
285 ParseStatus tryParseZTOperand(OperandVector &Operands);
286 template <bool ParseShiftExtend, bool ParseSuffix>
287 ParseStatus tryParseSVEDataVector(OperandVector &Operands);
288 template <RegKind RK>
289 ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
290 ParseStatus
291 tryParseSVEPredicateOrPredicateAsCounterVector(OperandVector &Operands);
292 template <RegKind VectorKind>
293 ParseStatus tryParseVectorList(OperandVector &Operands,
294 bool ExpectMatch = false);
295 ParseStatus tryParseMatrixTileList(OperandVector &Operands);
296 ParseStatus tryParseSVEPattern(OperandVector &Operands);
297 ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
298 ParseStatus tryParseGPR64x8(OperandVector &Operands);
299 ParseStatus tryParseImmRange(OperandVector &Operands);
300 template <int> ParseStatus tryParseAdjImm0_63(OperandVector &Operands);
301 ParseStatus tryParsePHintInstOperand(OperandVector &Operands);
302
303public:
304 enum AArch64MatchResultTy {
305 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
306#define GET_OPERAND_DIAGNOSTIC_TYPES
307#include "AArch64GenAsmMatcher.inc"
308 };
309 bool IsILP32;
310 bool IsWindowsArm64EC;
311
312 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
313 const MCInstrInfo &MII, const MCTargetOptions &Options)
314 : MCTargetAsmParser(Options, STI, MII) {
315 IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
316 IsWindowsArm64EC = STI.getTargetTriple().isWindowsArm64EC();
317 MCAsmParserExtension::Initialize(Parser);
318 MCStreamer &S = getParser().getStreamer();
319 if (S.getTargetStreamer() == nullptr)
320 new AArch64TargetStreamer(S);
321
322 // Alias .hword/.word/.[dx]word to the target-independent
323 // .2byte/.4byte/.8byte directives as they have the same form and
324 // semantics:
325 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
326 Parser.addAliasForDirective(Directive: ".hword", Alias: ".2byte");
327 Parser.addAliasForDirective(Directive: ".word", Alias: ".4byte");
328 Parser.addAliasForDirective(Directive: ".dword", Alias: ".8byte");
329 Parser.addAliasForDirective(Directive: ".xword", Alias: ".8byte");
330
331 // Initialize the set of available features.
332 setAvailableFeatures(ComputeAvailableFeatures(FB: getSTI().getFeatureBits()));
333 }
334
335 bool areEqualRegs(const MCParsedAsmOperand &Op1,
336 const MCParsedAsmOperand &Op2) const override;
337 bool parseInstruction(ParseInstructionInfo &Info, StringRef Name,
338 SMLoc NameLoc, OperandVector &Operands) override;
339 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
340 ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
341 SMLoc &EndLoc) override;
342 bool ParseDirective(AsmToken DirectiveID) override;
343 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
344 unsigned Kind) override;
345
346 static bool classifySymbolRef(const MCExpr *Expr, AArch64::Specifier &ELFSpec,
347 AArch64::Specifier &DarwinSpec,
348 int64_t &Addend);
349};
350
351/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
352/// instruction.
353class AArch64Operand : public MCParsedAsmOperand {
354private:
355 enum KindTy {
356 k_Immediate,
357 k_ShiftedImm,
358 k_ImmRange,
359 k_CondCode,
360 k_Register,
361 k_MatrixRegister,
362 k_MatrixTileList,
363 k_SVCR,
364 k_VectorList,
365 k_VectorIndex,
366 k_Token,
367 k_SysReg,
368 k_SysCR,
369 k_Prefetch,
370 k_ShiftExtend,
371 k_FPImm,
372 k_Barrier,
373 k_PSBHint,
374 k_PHint,
375 k_BTIHint,
376 k_CMHPriorityHint,
377 k_TIndexHint,
378 } Kind;
379
380 SMLoc StartLoc, EndLoc;
381
382 struct TokOp {
383 const char *Data;
384 unsigned Length;
385 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
386 };
387
388 // Separate shift/extend operand.
389 struct ShiftExtendOp {
390 AArch64_AM::ShiftExtendType Type;
391 unsigned Amount;
392 bool HasExplicitAmount;
393 };
394
395 struct RegOp {
396 MCRegister Reg;
397 RegKind Kind;
398 int ElementWidth;
399
400 // The register may be allowed as a different register class,
401 // e.g. for GPR64as32 or GPR32as64.
402 RegConstraintEqualityTy EqualityTy;
403
404 // In some cases the shift/extend needs to be explicitly parsed together
405 // with the register, rather than as a separate operand. This is needed
406 // for addressing modes where the instruction as a whole dictates the
407 // scaling/extend, rather than specific bits in the instruction.
408 // By parsing them as a single operand, we avoid the need to pass an
409 // extra operand in all CodeGen patterns (because all operands need to
410 // have an associated value), and we avoid the need to update TableGen to
411 // accept operands that have no associated bits in the instruction.
412 //
413 // An added benefit of parsing them together is that the assembler
414 // can give a sensible diagnostic if the scaling is not correct.
415 //
416 // The default is 'lsl #0' (HasExplicitAmount = false) if no
417 // ShiftExtend is specified.
418 ShiftExtendOp ShiftExtend;
419 };
420
421 struct MatrixRegOp {
422 MCRegister Reg;
423 unsigned ElementWidth;
424 MatrixKind Kind;
425 };
426
427 struct MatrixTileListOp {
428 unsigned RegMask = 0;
429 };
430
431 struct VectorListOp {
432 MCRegister Reg;
433 unsigned Count;
434 unsigned Stride;
435 unsigned NumElements;
436 unsigned ElementWidth;
437 RegKind RegisterKind;
438 };
439
440 struct VectorIndexOp {
441 int Val;
442 };
443
444 struct ImmOp {
445 const MCExpr *Val;
446 };
447
448 struct ShiftedImmOp {
449 const MCExpr *Val;
450 unsigned ShiftAmount;
451 };
452
453 struct ImmRangeOp {
454 unsigned First;
455 unsigned Last;
456 };
457
458 struct CondCodeOp {
459 AArch64CC::CondCode Code;
460 };
461
462 struct FPImmOp {
463 uint64_t Val; // APFloat value bitcasted to uint64_t.
464 bool IsExact; // describes whether parsed value was exact.
465 };
466
467 struct BarrierOp {
468 const char *Data;
469 unsigned Length;
470 unsigned Val; // Not the enum since not all values have names.
471 bool HasnXSModifier;
472 };
473
474 struct SysRegOp {
475 const char *Data;
476 unsigned Length;
477 uint32_t MRSReg;
478 uint32_t MSRReg;
479 uint32_t PStateField;
480 };
481
482 struct SysCRImmOp {
483 unsigned Val;
484 };
485
486 struct PrefetchOp {
487 const char *Data;
488 unsigned Length;
489 unsigned Val;
490 };
491
492 struct PSBHintOp {
493 const char *Data;
494 unsigned Length;
495 unsigned Val;
496 };
497 struct PHintOp {
498 const char *Data;
499 unsigned Length;
500 unsigned Val;
501 };
502 struct BTIHintOp {
503 const char *Data;
504 unsigned Length;
505 unsigned Val;
506 };
507 struct CMHPriorityHintOp {
508 const char *Data;
509 unsigned Length;
510 unsigned Val;
511 };
512 struct TIndexHintOp {
513 const char *Data;
514 unsigned Length;
515 unsigned Val;
516 };
517
518 struct SVCROp {
519 const char *Data;
520 unsigned Length;
521 unsigned PStateField;
522 };
523
524 union {
525 struct TokOp Tok;
526 struct RegOp Reg;
527 struct MatrixRegOp MatrixReg;
528 struct MatrixTileListOp MatrixTileList;
529 struct VectorListOp VectorList;
530 struct VectorIndexOp VectorIndex;
531 struct ImmOp Imm;
532 struct ShiftedImmOp ShiftedImm;
533 struct ImmRangeOp ImmRange;
534 struct CondCodeOp CondCode;
535 struct FPImmOp FPImm;
536 struct BarrierOp Barrier;
537 struct SysRegOp SysReg;
538 struct SysCRImmOp SysCRImm;
539 struct PrefetchOp Prefetch;
540 struct PSBHintOp PSBHint;
541 struct PHintOp PHint;
542 struct BTIHintOp BTIHint;
543 struct CMHPriorityHintOp CMHPriorityHint;
544 struct TIndexHintOp TIndexHint;
545 struct ShiftExtendOp ShiftExtend;
546 struct SVCROp SVCR;
547 };
548
549 // Keep the MCContext around as the MCExprs may need manipulated during
550 // the add<>Operands() calls.
551 MCContext &Ctx;
552
553public:
554 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
555
556 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
557 Kind = o.Kind;
558 StartLoc = o.StartLoc;
559 EndLoc = o.EndLoc;
560 switch (Kind) {
561 case k_Token:
562 Tok = o.Tok;
563 break;
564 case k_Immediate:
565 Imm = o.Imm;
566 break;
567 case k_ShiftedImm:
568 ShiftedImm = o.ShiftedImm;
569 break;
570 case k_ImmRange:
571 ImmRange = o.ImmRange;
572 break;
573 case k_CondCode:
574 CondCode = o.CondCode;
575 break;
576 case k_FPImm:
577 FPImm = o.FPImm;
578 break;
579 case k_Barrier:
580 Barrier = o.Barrier;
581 break;
582 case k_Register:
583 Reg = o.Reg;
584 break;
585 case k_MatrixRegister:
586 MatrixReg = o.MatrixReg;
587 break;
588 case k_MatrixTileList:
589 MatrixTileList = o.MatrixTileList;
590 break;
591 case k_VectorList:
592 VectorList = o.VectorList;
593 break;
594 case k_VectorIndex:
595 VectorIndex = o.VectorIndex;
596 break;
597 case k_SysReg:
598 SysReg = o.SysReg;
599 break;
600 case k_SysCR:
601 SysCRImm = o.SysCRImm;
602 break;
603 case k_Prefetch:
604 Prefetch = o.Prefetch;
605 break;
606 case k_PSBHint:
607 PSBHint = o.PSBHint;
608 break;
609 case k_PHint:
610 PHint = o.PHint;
611 break;
612 case k_BTIHint:
613 BTIHint = o.BTIHint;
614 break;
615 case k_CMHPriorityHint:
616 CMHPriorityHint = o.CMHPriorityHint;
617 break;
618 case k_TIndexHint:
619 TIndexHint = o.TIndexHint;
620 break;
621 case k_ShiftExtend:
622 ShiftExtend = o.ShiftExtend;
623 break;
624 case k_SVCR:
625 SVCR = o.SVCR;
626 break;
627 }
628 }
629
630 /// getStartLoc - Get the location of the first token of this operand.
631 SMLoc getStartLoc() const override { return StartLoc; }
632 /// getEndLoc - Get the location of the last token of this operand.
633 SMLoc getEndLoc() const override { return EndLoc; }
634
635 StringRef getToken() const {
636 assert(Kind == k_Token && "Invalid access!");
637 return StringRef(Tok.Data, Tok.Length);
638 }
639
640 bool isTokenSuffix() const {
641 assert(Kind == k_Token && "Invalid access!");
642 return Tok.IsSuffix;
643 }
644
645 const MCExpr *getImm() const {
646 assert(Kind == k_Immediate && "Invalid access!");
647 return Imm.Val;
648 }
649
650 const MCExpr *getShiftedImmVal() const {
651 assert(Kind == k_ShiftedImm && "Invalid access!");
652 return ShiftedImm.Val;
653 }
654
655 unsigned getShiftedImmShift() const {
656 assert(Kind == k_ShiftedImm && "Invalid access!");
657 return ShiftedImm.ShiftAmount;
658 }
659
660 unsigned getFirstImmVal() const {
661 assert(Kind == k_ImmRange && "Invalid access!");
662 return ImmRange.First;
663 }
664
665 unsigned getLastImmVal() const {
666 assert(Kind == k_ImmRange && "Invalid access!");
667 return ImmRange.Last;
668 }
669
670 AArch64CC::CondCode getCondCode() const {
671 assert(Kind == k_CondCode && "Invalid access!");
672 return CondCode.Code;
673 }
674
675 APFloat getFPImm() const {
676 assert (Kind == k_FPImm && "Invalid access!");
677 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
678 }
679
680 bool getFPImmIsExact() const {
681 assert (Kind == k_FPImm && "Invalid access!");
682 return FPImm.IsExact;
683 }
684
685 unsigned getBarrier() const {
686 assert(Kind == k_Barrier && "Invalid access!");
687 return Barrier.Val;
688 }
689
690 StringRef getBarrierName() const {
691 assert(Kind == k_Barrier && "Invalid access!");
692 return StringRef(Barrier.Data, Barrier.Length);
693 }
694
695 bool getBarriernXSModifier() const {
696 assert(Kind == k_Barrier && "Invalid access!");
697 return Barrier.HasnXSModifier;
698 }
699
700 MCRegister getReg() const override {
701 assert(Kind == k_Register && "Invalid access!");
702 return Reg.Reg;
703 }
704
705 MCRegister getMatrixReg() const {
706 assert(Kind == k_MatrixRegister && "Invalid access!");
707 return MatrixReg.Reg;
708 }
709
710 unsigned getMatrixElementWidth() const {
711 assert(Kind == k_MatrixRegister && "Invalid access!");
712 return MatrixReg.ElementWidth;
713 }
714
715 MatrixKind getMatrixKind() const {
716 assert(Kind == k_MatrixRegister && "Invalid access!");
717 return MatrixReg.Kind;
718 }
719
720 unsigned getMatrixTileListRegMask() const {
721 assert(isMatrixTileList() && "Invalid access!");
722 return MatrixTileList.RegMask;
723 }
724
725 RegConstraintEqualityTy getRegEqualityTy() const {
726 assert(Kind == k_Register && "Invalid access!");
727 return Reg.EqualityTy;
728 }
729
730 MCRegister getVectorListStart() const {
731 assert(Kind == k_VectorList && "Invalid access!");
732 return VectorList.Reg;
733 }
734
735 unsigned getVectorListCount() const {
736 assert(Kind == k_VectorList && "Invalid access!");
737 return VectorList.Count;
738 }
739
740 unsigned getVectorListStride() const {
741 assert(Kind == k_VectorList && "Invalid access!");
742 return VectorList.Stride;
743 }
744
745 int getVectorIndex() const {
746 assert(Kind == k_VectorIndex && "Invalid access!");
747 return VectorIndex.Val;
748 }
749
750 StringRef getSysReg() const {
751 assert(Kind == k_SysReg && "Invalid access!");
752 return StringRef(SysReg.Data, SysReg.Length);
753 }
754
755 unsigned getSysCR() const {
756 assert(Kind == k_SysCR && "Invalid access!");
757 return SysCRImm.Val;
758 }
759
760 unsigned getPrefetch() const {
761 assert(Kind == k_Prefetch && "Invalid access!");
762 return Prefetch.Val;
763 }
764
765 unsigned getPSBHint() const {
766 assert(Kind == k_PSBHint && "Invalid access!");
767 return PSBHint.Val;
768 }
769
770 unsigned getPHint() const {
771 assert(Kind == k_PHint && "Invalid access!");
772 return PHint.Val;
773 }
774
775 StringRef getPSBHintName() const {
776 assert(Kind == k_PSBHint && "Invalid access!");
777 return StringRef(PSBHint.Data, PSBHint.Length);
778 }
779
780 StringRef getPHintName() const {
781 assert(Kind == k_PHint && "Invalid access!");
782 return StringRef(PHint.Data, PHint.Length);
783 }
784
785 unsigned getBTIHint() const {
786 assert(Kind == k_BTIHint && "Invalid access!");
787 return BTIHint.Val;
788 }
789
790 StringRef getBTIHintName() const {
791 assert(Kind == k_BTIHint && "Invalid access!");
792 return StringRef(BTIHint.Data, BTIHint.Length);
793 }
794
795 unsigned getCMHPriorityHint() const {
796 assert(Kind == k_CMHPriorityHint && "Invalid access!");
797 return CMHPriorityHint.Val;
798 }
799
800 StringRef getCMHPriorityHintName() const {
801 assert(Kind == k_CMHPriorityHint && "Invalid access!");
802 return StringRef(CMHPriorityHint.Data, CMHPriorityHint.Length);
803 }
804
805 unsigned getTIndexHint() const {
806 assert(Kind == k_TIndexHint && "Invalid access!");
807 return TIndexHint.Val;
808 }
809
810 StringRef getTIndexHintName() const {
811 assert(Kind == k_TIndexHint && "Invalid access!");
812 return StringRef(TIndexHint.Data, TIndexHint.Length);
813 }
814
815 StringRef getSVCR() const {
816 assert(Kind == k_SVCR && "Invalid access!");
817 return StringRef(SVCR.Data, SVCR.Length);
818 }
819
820 StringRef getPrefetchName() const {
821 assert(Kind == k_Prefetch && "Invalid access!");
822 return StringRef(Prefetch.Data, Prefetch.Length);
823 }
824
825 AArch64_AM::ShiftExtendType getShiftExtendType() const {
826 if (Kind == k_ShiftExtend)
827 return ShiftExtend.Type;
828 if (Kind == k_Register)
829 return Reg.ShiftExtend.Type;
830 llvm_unreachable("Invalid access!");
831 }
832
833 unsigned getShiftExtendAmount() const {
834 if (Kind == k_ShiftExtend)
835 return ShiftExtend.Amount;
836 if (Kind == k_Register)
837 return Reg.ShiftExtend.Amount;
838 llvm_unreachable("Invalid access!");
839 }
840
841 bool hasShiftExtendAmount() const {
842 if (Kind == k_ShiftExtend)
843 return ShiftExtend.HasExplicitAmount;
844 if (Kind == k_Register)
845 return Reg.ShiftExtend.HasExplicitAmount;
846 llvm_unreachable("Invalid access!");
847 }
848
849 bool isImm() const override { return Kind == k_Immediate; }
850 bool isMem() const override { return false; }
851
852 bool isUImm6() const {
853 if (!isImm())
854 return false;
855 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
856 if (!MCE)
857 return false;
858 int64_t Val = MCE->getValue();
859 return (Val >= 0 && Val < 64);
860 }
861
862 template <int Width> bool isSImm() const {
863 return bool(isSImmScaled<Width, 1>());
864 }
865
866 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
867 return isImmScaled<Bits, Scale>(true);
868 }
869
870 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
871 DiagnosticPredicate isUImmScaled() const {
872 if (IsRange && isImmRange() &&
873 (getLastImmVal() != getFirstImmVal() + Offset))
874 return DiagnosticPredicate::NoMatch;
875
876 return isImmScaled<Bits, Scale, IsRange>(false);
877 }
878
879 template <int Bits, int Scale, bool IsRange = false>
880 DiagnosticPredicate isImmScaled(bool Signed) const {
881 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
882 (isImmRange() && !IsRange))
883 return DiagnosticPredicate::NoMatch;
884
885 int64_t Val;
886 if (isImmRange())
887 Val = getFirstImmVal();
888 else {
889 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
890 if (!MCE)
891 return DiagnosticPredicate::NoMatch;
892 Val = MCE->getValue();
893 }
894
895 int64_t MinVal, MaxVal;
896 if (Signed) {
897 int64_t Shift = Bits - 1;
898 MinVal = (int64_t(1) << Shift) * -Scale;
899 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
900 } else {
901 MinVal = 0;
902 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
903 }
904
905 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
906 return DiagnosticPredicate::Match;
907
908 return DiagnosticPredicate::NearMatch;
909 }
910
911 DiagnosticPredicate isSVEPattern() const {
912 if (!isImm())
913 return DiagnosticPredicate::NoMatch;
914 auto *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
915 if (!MCE)
916 return DiagnosticPredicate::NoMatch;
917 int64_t Val = MCE->getValue();
918 if (Val >= 0 && Val < 32)
919 return DiagnosticPredicate::Match;
920 return DiagnosticPredicate::NearMatch;
921 }
922
923 DiagnosticPredicate isSVEVecLenSpecifier() const {
924 if (!isImm())
925 return DiagnosticPredicate::NoMatch;
926 auto *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
927 if (!MCE)
928 return DiagnosticPredicate::NoMatch;
929 int64_t Val = MCE->getValue();
930 if (Val >= 0 && Val <= 1)
931 return DiagnosticPredicate::Match;
932 return DiagnosticPredicate::NearMatch;
933 }
934
935 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
936 AArch64::Specifier ELFSpec;
937 AArch64::Specifier DarwinSpec;
938 int64_t Addend;
939 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
940 Addend)) {
941 // If we don't understand the expression, assume the best and
942 // let the fixup and relocation code deal with it.
943 return true;
944 }
945
946 if (DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
947 llvm::is_contained(
948 Set: {AArch64::S_LO12, AArch64::S_GOT_LO12, AArch64::S_GOT_AUTH_LO12,
949 AArch64::S_DTPREL_LO12, AArch64::S_DTPREL_LO12_NC,
950 AArch64::S_TPREL_LO12, AArch64::S_TPREL_LO12_NC,
951 AArch64::S_GOTTPREL_LO12_NC, AArch64::S_TLSDESC_LO12,
952 AArch64::S_TLSDESC_AUTH_LO12, AArch64::S_SECREL_LO12,
953 AArch64::S_SECREL_HI12, AArch64::S_GOT_PAGE_LO15},
954 Element: ELFSpec)) {
955 // Note that we don't range-check the addend. It's adjusted modulo page
956 // size when converted, so there is no "out of range" condition when using
957 // @pageoff.
958 return true;
959 } else if (DarwinSpec == AArch64::S_MACHO_GOTPAGEOFF ||
960 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF) {
961 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
962 return Addend == 0;
963 }
964
965 return false;
966 }
967
968 template <int Scale> bool isUImm12Offset() const {
969 if (!isImm())
970 return false;
971
972 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
973 if (!MCE)
974 return isSymbolicUImm12Offset(Expr: getImm());
975
976 int64_t Val = MCE->getValue();
977 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
978 }
979
980 template <int N, int M>
981 bool isImmInRange() const {
982 if (!isImm())
983 return false;
984 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
985 if (!MCE)
986 return false;
987 int64_t Val = MCE->getValue();
988 return (Val >= N && Val <= M);
989 }
990
991 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
992 // a logical immediate can always be represented when inverted.
993 template <typename T>
994 bool isLogicalImm() const {
995 if (!isImm())
996 return false;
997 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
998 if (!MCE)
999 return false;
1000
1001 int64_t Val = MCE->getValue();
1002 // Avoid left shift by 64 directly.
1003 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
1004 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
1005 if ((Val & Upper) && (Val & Upper) != Upper)
1006 return false;
1007
1008 return AArch64_AM::isLogicalImmediate(imm: Val & ~Upper, regSize: sizeof(T) * 8);
1009 }
1010
1011 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
1012
1013 bool isImmRange() const { return Kind == k_ImmRange; }
1014
1015 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
1016 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
1017 /// immediate that can be shifted by 'Shift'.
1018 template <unsigned Width>
1019 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
1020 if (isShiftedImm() && Width == getShiftedImmShift())
1021 if (auto *CE = dyn_cast<MCConstantExpr>(Val: getShiftedImmVal()))
1022 return std::make_pair(x: CE->getValue(), y: Width);
1023
1024 if (isImm())
1025 if (auto *CE = dyn_cast<MCConstantExpr>(Val: getImm())) {
1026 int64_t Val = CE->getValue();
1027 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
1028 return std::make_pair(x: Val >> Width, y: Width);
1029 else
1030 return std::make_pair(x&: Val, y: 0u);
1031 }
1032
1033 return {};
1034 }
1035
1036 bool isAddSubImm() const {
1037 if (!isShiftedImm() && !isImm())
1038 return false;
1039
1040 const MCExpr *Expr;
1041
1042 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
1043 if (isShiftedImm()) {
1044 unsigned Shift = ShiftedImm.ShiftAmount;
1045 Expr = ShiftedImm.Val;
1046 if (Shift != 0 && Shift != 12)
1047 return false;
1048 } else {
1049 Expr = getImm();
1050 }
1051
1052 AArch64::Specifier ELFSpec;
1053 AArch64::Specifier DarwinSpec;
1054 int64_t Addend;
1055 if (AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
1056 Addend)) {
1057 return DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
1058 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF ||
1059 (DarwinSpec == AArch64::S_MACHO_GOTPAGEOFF && Addend == 0) ||
1060 llvm::is_contained(
1061 Set: {AArch64::S_LO12, AArch64::S_GOT_AUTH_LO12,
1062 AArch64::S_DTPREL_HI12, AArch64::S_DTPREL_LO12,
1063 AArch64::S_DTPREL_LO12_NC, AArch64::S_TPREL_HI12,
1064 AArch64::S_TPREL_LO12, AArch64::S_TPREL_LO12_NC,
1065 AArch64::S_TLSDESC_LO12, AArch64::S_TLSDESC_AUTH_LO12,
1066 AArch64::S_SECREL_HI12, AArch64::S_SECREL_LO12},
1067 Element: ELFSpec);
1068 }
1069
1070 // If it's a constant, it should be a real immediate in range.
1071 if (auto ShiftedVal = getShiftedVal<12>())
1072 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1073
1074 // If it's an expression, we hope for the best and let the fixup/relocation
1075 // code deal with it.
1076 return true;
1077 }
1078
1079 bool isAddSubImmNeg() const {
1080 if (!isShiftedImm() && !isImm())
1081 return false;
1082
1083 // Otherwise it should be a real negative immediate in range.
1084 if (auto ShiftedVal = getShiftedVal<12>())
1085 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1086
1087 return false;
1088 }
1089
1090 // Signed value in the range -128 to +127. For element widths of
1091 // 16 bits or higher it may also be a signed multiple of 256 in the
1092 // range -32768 to +32512.
1093 // For element-width of 8 bits a range of -128 to 255 is accepted,
1094 // since a copy of a byte can be either signed/unsigned.
1095 template <typename T>
1096 DiagnosticPredicate isSVECpyImm() const {
1097 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(Val: getImm())))
1098 return DiagnosticPredicate::NoMatch;
1099
1100 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1101 std::is_same<int8_t, T>::value;
1102 if (auto ShiftedImm = getShiftedVal<8>())
1103 if (!(IsByte && ShiftedImm->second) &&
1104 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1105 << ShiftedImm->second))
1106 return DiagnosticPredicate::Match;
1107
1108 return DiagnosticPredicate::NearMatch;
1109 }
1110
1111 // Unsigned value in the range 0 to 255. For element widths of
1112 // 16 bits or higher it may also be a signed multiple of 256 in the
1113 // range 0 to 65280.
1114 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1115 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(Val: getImm())))
1116 return DiagnosticPredicate::NoMatch;
1117
1118 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1119 std::is_same<int8_t, T>::value;
1120 if (auto ShiftedImm = getShiftedVal<8>())
1121 if (!(IsByte && ShiftedImm->second) &&
1122 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1123 << ShiftedImm->second))
1124 return DiagnosticPredicate::Match;
1125
1126 return DiagnosticPredicate::NearMatch;
1127 }
1128
1129 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1130 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1131 return DiagnosticPredicate::Match;
1132 return DiagnosticPredicate::NoMatch;
1133 }
1134
1135 bool isCondCode() const { return Kind == k_CondCode; }
1136
1137 bool isSIMDImmType10() const {
1138 if (!isImm())
1139 return false;
1140 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
1141 if (!MCE)
1142 return false;
1143 return AArch64_AM::isAdvSIMDModImmType10(Imm: MCE->getValue());
1144 }
1145
1146 template<int N>
1147 bool isBranchTarget() const {
1148 if (!isImm())
1149 return false;
1150 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
1151 if (!MCE)
1152 return true;
1153 int64_t Val = MCE->getValue();
1154 if (Val & 0x3)
1155 return false;
1156 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1157 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1158 }
1159
1160 bool isMovWSymbol(ArrayRef<AArch64::Specifier> AllowedModifiers) const {
1161 if (!isImm())
1162 return false;
1163
1164 AArch64::Specifier ELFSpec;
1165 AArch64::Specifier DarwinSpec;
1166 int64_t Addend;
1167 if (!AArch64AsmParser::classifySymbolRef(Expr: getImm(), ELFSpec, DarwinSpec,
1168 Addend)) {
1169 return false;
1170 }
1171 if (DarwinSpec != AArch64::S_None)
1172 return false;
1173
1174 return llvm::is_contained(Range&: AllowedModifiers, Element: ELFSpec);
1175 }
1176
1177 bool isMovWSymbolG3() const {
1178 return isMovWSymbol(AllowedModifiers: {AArch64::S_ABS_G3, AArch64::S_PREL_G3});
1179 }
1180
1181 bool isMovWSymbolG2() const {
1182 return isMovWSymbol(AllowedModifiers: {AArch64::S_ABS_G2, AArch64::S_ABS_G2_S,
1183 AArch64::S_ABS_G2_NC, AArch64::S_PREL_G2,
1184 AArch64::S_PREL_G2_NC, AArch64::S_TPREL_G2,
1185 AArch64::S_DTPREL_G2});
1186 }
1187
1188 bool isMovWSymbolG1() const {
1189 return isMovWSymbol(AllowedModifiers: {AArch64::S_ABS_G1, AArch64::S_ABS_G1_S,
1190 AArch64::S_ABS_G1_NC, AArch64::S_PREL_G1,
1191 AArch64::S_PREL_G1_NC, AArch64::S_GOTTPREL_G1,
1192 AArch64::S_TPREL_G1, AArch64::S_TPREL_G1_NC,
1193 AArch64::S_DTPREL_G1, AArch64::S_DTPREL_G1_NC});
1194 }
1195
1196 bool isMovWSymbolG0() const {
1197 return isMovWSymbol(AllowedModifiers: {AArch64::S_ABS_G0, AArch64::S_ABS_G0_S,
1198 AArch64::S_ABS_G0_NC, AArch64::S_PREL_G0,
1199 AArch64::S_PREL_G0_NC, AArch64::S_GOTTPREL_G0_NC,
1200 AArch64::S_TPREL_G0, AArch64::S_TPREL_G0_NC,
1201 AArch64::S_DTPREL_G0, AArch64::S_DTPREL_G0_NC});
1202 }
1203
1204 template<int RegWidth, int Shift>
1205 bool isMOVZMovAlias() const {
1206 if (!isImm()) return false;
1207
1208 const MCExpr *E = getImm();
1209 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: E)) {
1210 uint64_t Value = CE->getValue();
1211
1212 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1213 }
1214 // Only supports the case of Shift being 0 if an expression is used as an
1215 // operand
1216 return !Shift && E;
1217 }
1218
1219 template<int RegWidth, int Shift>
1220 bool isMOVNMovAlias() const {
1221 if (!isImm()) return false;
1222
1223 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1224 if (!CE) return false;
1225 uint64_t Value = CE->getValue();
1226
1227 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1228 }
1229
1230 bool isFPImm() const {
1231 return Kind == k_FPImm &&
1232 AArch64_AM::getFP64Imm(Imm: getFPImm().bitcastToAPInt()) != -1;
1233 }
1234
1235 bool isBarrier() const {
1236 return Kind == k_Barrier && !getBarriernXSModifier();
1237 }
1238 bool isBarriernXS() const {
1239 return Kind == k_Barrier && getBarriernXSModifier();
1240 }
1241 bool isSysReg() const { return Kind == k_SysReg; }
1242
1243 bool isMRSSystemRegister() const {
1244 if (!isSysReg()) return false;
1245
1246 return SysReg.MRSReg != -1U;
1247 }
1248
1249 bool isMSRSystemRegister() const {
1250 if (!isSysReg()) return false;
1251 return SysReg.MSRReg != -1U;
1252 }
1253
1254 bool isSystemPStateFieldWithImm0_1() const {
1255 if (!isSysReg()) return false;
1256 return AArch64PState::lookupPStateImm0_1ByEncoding(Encoding: SysReg.PStateField);
1257 }
1258
1259 bool isSystemPStateFieldWithImm0_15() const {
1260 if (!isSysReg())
1261 return false;
1262 return AArch64PState::lookupPStateImm0_15ByEncoding(Encoding: SysReg.PStateField);
1263 }
1264
1265 bool isSVCR() const {
1266 if (Kind != k_SVCR)
1267 return false;
1268 return SVCR.PStateField != -1U;
1269 }
1270
1271 bool isReg() const override {
1272 return Kind == k_Register;
1273 }
1274
1275 bool isVectorList() const { return Kind == k_VectorList; }
1276
1277 bool isScalarReg() const {
1278 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1279 }
1280
1281 bool isNeonVectorReg() const {
1282 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1283 }
1284
1285 bool isNeonVectorRegLo() const {
1286 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1287 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1288 Reg: Reg.Reg) ||
1289 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1290 Reg: Reg.Reg));
1291 }
1292
1293 bool isNeonVectorReg0to7() const {
1294 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1295 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1296 Reg: Reg.Reg));
1297 }
1298
1299 bool isMatrix() const { return Kind == k_MatrixRegister; }
1300 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1301
1302 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1303 RegKind RK;
1304 switch (Class) {
1305 case AArch64::PPRRegClassID:
1306 case AArch64::PPR_3bRegClassID:
1307 case AArch64::PPR_p8to15RegClassID:
1308 case AArch64::PNRRegClassID:
1309 case AArch64::PNR_p8to15RegClassID:
1310 case AArch64::PPRorPNRRegClassID:
1311 RK = RegKind::SVEPredicateAsCounter;
1312 break;
1313 default:
1314 llvm_unreachable("Unsupported register class");
1315 }
1316
1317 return (Kind == k_Register && Reg.Kind == RK) &&
1318 AArch64MCRegisterClasses[Class].contains(Reg: getReg());
1319 }
1320
1321 template <unsigned Class> bool isSVEVectorReg() const {
1322 RegKind RK;
1323 switch (Class) {
1324 case AArch64::ZPRRegClassID:
1325 case AArch64::ZPR_3bRegClassID:
1326 case AArch64::ZPR_4bRegClassID:
1327 case AArch64::ZPRMul2_LoRegClassID:
1328 case AArch64::ZPRMul2_HiRegClassID:
1329 case AArch64::ZPR_KRegClassID:
1330 RK = RegKind::SVEDataVector;
1331 break;
1332 case AArch64::PPRRegClassID:
1333 case AArch64::PPR_3bRegClassID:
1334 case AArch64::PPR_p8to15RegClassID:
1335 case AArch64::PNRRegClassID:
1336 case AArch64::PNR_p8to15RegClassID:
1337 case AArch64::PPRorPNRRegClassID:
1338 RK = RegKind::SVEPredicateVector;
1339 break;
1340 default:
1341 llvm_unreachable("Unsupported register class");
1342 }
1343
1344 return (Kind == k_Register && Reg.Kind == RK) &&
1345 AArch64MCRegisterClasses[Class].contains(Reg: getReg());
1346 }
1347
1348 template <unsigned Class> bool isFPRasZPR() const {
1349 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1350 AArch64MCRegisterClasses[Class].contains(Reg: getReg());
1351 }
1352
1353 template <int ElementWidth, unsigned Class>
1354 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1355 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1356 return DiagnosticPredicate::NoMatch;
1357
1358 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1359 return DiagnosticPredicate::Match;
1360
1361 return DiagnosticPredicate::NearMatch;
1362 }
1363
1364 template <int ElementWidth, unsigned Class>
1365 DiagnosticPredicate isSVEPredicateOrPredicateAsCounterRegOfWidth() const {
1366 if (Kind != k_Register || (Reg.Kind != RegKind::SVEPredicateAsCounter &&
1367 Reg.Kind != RegKind::SVEPredicateVector))
1368 return DiagnosticPredicate::NoMatch;
1369
1370 if ((isSVEPredicateAsCounterReg<Class>() ||
1371 isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) &&
1372 Reg.ElementWidth == ElementWidth)
1373 return DiagnosticPredicate::Match;
1374
1375 return DiagnosticPredicate::NearMatch;
1376 }
1377
1378 template <int ElementWidth, unsigned Class>
1379 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1380 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1381 return DiagnosticPredicate::NoMatch;
1382
1383 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1384 return DiagnosticPredicate::Match;
1385
1386 return DiagnosticPredicate::NearMatch;
1387 }
1388
1389 template <int ElementWidth, unsigned Class>
1390 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1391 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1392 return DiagnosticPredicate::NoMatch;
1393
1394 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1395 return DiagnosticPredicate::Match;
1396
1397 return DiagnosticPredicate::NearMatch;
1398 }
1399
1400 template <int ElementWidth, unsigned Class,
1401 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1402 bool ShiftWidthAlwaysSame>
1403 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1404 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1405 if (!VectorMatch.isMatch())
1406 return DiagnosticPredicate::NoMatch;
1407
1408 // Give a more specific diagnostic when the user has explicitly typed in
1409 // a shift-amount that does not match what is expected, but for which
1410 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1411 bool MatchShift = getShiftExtendAmount() == Log2_32(Value: ShiftWidth / 8);
1412 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1413 ShiftExtendTy == AArch64_AM::SXTW) &&
1414 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1415 return DiagnosticPredicate::NoMatch;
1416
1417 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1418 return DiagnosticPredicate::Match;
1419
1420 return DiagnosticPredicate::NearMatch;
1421 }
1422
1423 bool isGPR32as64() const {
1424 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1425 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg: Reg.Reg);
1426 }
1427
1428 bool isGPR64as32() const {
1429 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1430 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg: Reg.Reg);
1431 }
1432
1433 bool isGPR64x8() const {
1434 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1435 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1436 Reg: Reg.Reg);
1437 }
1438
1439 bool isWSeqPair() const {
1440 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1441 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1442 Reg: Reg.Reg);
1443 }
1444
1445 bool isXSeqPair() const {
1446 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1447 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1448 Reg: Reg.Reg);
1449 }
1450
1451 bool isSyspXzrPair() const {
1452 return isGPR64<AArch64::GPR64RegClassID>() && Reg.Reg == AArch64::XZR;
1453 }
1454
1455 template<int64_t Angle, int64_t Remainder>
1456 DiagnosticPredicate isComplexRotation() const {
1457 if (!isImm())
1458 return DiagnosticPredicate::NoMatch;
1459
1460 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1461 if (!CE)
1462 return DiagnosticPredicate::NoMatch;
1463 uint64_t Value = CE->getValue();
1464
1465 if (Value % Angle == Remainder && Value <= 270)
1466 return DiagnosticPredicate::Match;
1467 return DiagnosticPredicate::NearMatch;
1468 }
1469
1470 template <unsigned RegClassID> bool isGPR64() const {
1471 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1472 AArch64MCRegisterClasses[RegClassID].contains(Reg: getReg());
1473 }
1474
1475 template <unsigned RegClassID, int ExtWidth>
1476 DiagnosticPredicate isGPR64WithShiftExtend() const {
1477 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1478 return DiagnosticPredicate::NoMatch;
1479
1480 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1481 getShiftExtendAmount() == Log2_32(Value: ExtWidth / 8))
1482 return DiagnosticPredicate::Match;
1483 return DiagnosticPredicate::NearMatch;
1484 }
1485
1486 /// Is this a vector list with the type implicit (presumably attached to the
1487 /// instruction itself)?
1488 template <RegKind VectorKind, unsigned NumRegs, bool IsConsecutive = false>
1489 bool isImplicitlyTypedVectorList() const {
1490 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1491 VectorList.NumElements == 0 &&
1492 VectorList.RegisterKind == VectorKind &&
1493 (!IsConsecutive || (VectorList.Stride == 1));
1494 }
1495
1496 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1497 unsigned ElementWidth, unsigned Stride = 1>
1498 bool isTypedVectorList() const {
1499 if (Kind != k_VectorList)
1500 return false;
1501 if (VectorList.Count != NumRegs)
1502 return false;
1503 if (VectorList.RegisterKind != VectorKind)
1504 return false;
1505 if (VectorList.ElementWidth != ElementWidth)
1506 return false;
1507 if (VectorList.Stride != Stride)
1508 return false;
1509 return VectorList.NumElements == NumElements;
1510 }
1511
1512 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1513 unsigned ElementWidth, unsigned RegClass>
1514 DiagnosticPredicate isTypedVectorListMultiple() const {
1515 bool Res =
1516 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1517 if (!Res)
1518 return DiagnosticPredicate::NoMatch;
1519 if (!AArch64MCRegisterClasses[RegClass].contains(Reg: VectorList.Reg))
1520 return DiagnosticPredicate::NearMatch;
1521 return DiagnosticPredicate::Match;
1522 }
1523
1524 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1525 unsigned ElementWidth>
1526 DiagnosticPredicate isTypedVectorListStrided() const {
1527 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1528 ElementWidth, Stride>();
1529 if (!Res)
1530 return DiagnosticPredicate::NoMatch;
1531 if ((VectorList.Reg < (AArch64::Z0 + Stride)) ||
1532 ((VectorList.Reg >= AArch64::Z16) &&
1533 (VectorList.Reg < (AArch64::Z16 + Stride))))
1534 return DiagnosticPredicate::Match;
1535 return DiagnosticPredicate::NoMatch;
1536 }
1537
1538 template <int Min, int Max>
1539 DiagnosticPredicate isVectorIndex() const {
1540 if (Kind != k_VectorIndex)
1541 return DiagnosticPredicate::NoMatch;
1542 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1543 return DiagnosticPredicate::Match;
1544 return DiagnosticPredicate::NearMatch;
1545 }
1546
1547 bool isToken() const override { return Kind == k_Token; }
1548
1549 bool isTokenEqual(StringRef Str) const {
1550 return Kind == k_Token && getToken() == Str;
1551 }
1552 bool isSysCR() const { return Kind == k_SysCR; }
1553 bool isPrefetch() const { return Kind == k_Prefetch; }
1554 bool isPSBHint() const { return Kind == k_PSBHint; }
1555 bool isPHint() const { return Kind == k_PHint; }
1556 bool isBTIHint() const { return Kind == k_BTIHint; }
1557 bool isCMHPriorityHint() const { return Kind == k_CMHPriorityHint; }
1558 bool isTIndexHint() const { return Kind == k_TIndexHint; }
1559 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1560 bool isShifter() const {
1561 if (!isShiftExtend())
1562 return false;
1563
1564 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1565 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1566 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1567 ST == AArch64_AM::MSL);
1568 }
1569
1570 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1571 if (Kind != k_FPImm)
1572 return DiagnosticPredicate::NoMatch;
1573
1574 if (getFPImmIsExact()) {
1575 // Lookup the immediate from table of supported immediates.
1576 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(Enum: ImmEnum);
1577 assert(Desc && "Unknown enum value");
1578
1579 // Calculate its FP value.
1580 APFloat RealVal(APFloat::IEEEdouble());
1581 auto StatusOrErr =
1582 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1583 if (errorToBool(Err: StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1584 llvm_unreachable("FP immediate is not exact");
1585
1586 if (getFPImm().bitwiseIsEqual(RHS: RealVal))
1587 return DiagnosticPredicate::Match;
1588 }
1589
1590 return DiagnosticPredicate::NearMatch;
1591 }
1592
1593 template <unsigned ImmA, unsigned ImmB>
1594 DiagnosticPredicate isExactFPImm() const {
1595 DiagnosticPredicate Res = DiagnosticPredicate::NoMatch;
1596 if ((Res = isExactFPImm<ImmA>()))
1597 return DiagnosticPredicate::Match;
1598 if ((Res = isExactFPImm<ImmB>()))
1599 return DiagnosticPredicate::Match;
1600 return Res;
1601 }
1602
1603 bool isExtend() const {
1604 if (!isShiftExtend())
1605 return false;
1606
1607 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1608 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1609 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1610 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1611 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1612 ET == AArch64_AM::LSL) &&
1613 getShiftExtendAmount() <= 4;
1614 }
1615
1616 bool isExtend64() const {
1617 if (!isExtend())
1618 return false;
1619 // Make sure the extend expects a 32-bit source register.
1620 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1621 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1622 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1623 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1624 }
1625
1626 bool isExtendLSL64() const {
1627 if (!isExtend())
1628 return false;
1629 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1630 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1631 ET == AArch64_AM::LSL) &&
1632 getShiftExtendAmount() <= 4;
1633 }
1634
1635 bool isLSLImm3Shift() const {
1636 if (!isShiftExtend())
1637 return false;
1638 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1639 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7;
1640 }
1641
1642 template<int Width> bool isMemXExtend() const {
1643 if (!isExtend())
1644 return false;
1645 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1646 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1647 (getShiftExtendAmount() == Log2_32(Value: Width / 8) ||
1648 getShiftExtendAmount() == 0);
1649 }
1650
1651 template<int Width> bool isMemWExtend() const {
1652 if (!isExtend())
1653 return false;
1654 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1655 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1656 (getShiftExtendAmount() == Log2_32(Value: Width / 8) ||
1657 getShiftExtendAmount() == 0);
1658 }
1659
1660 template <unsigned width>
1661 bool isArithmeticShifter() const {
1662 if (!isShifter())
1663 return false;
1664
1665 // An arithmetic shifter is LSL, LSR, or ASR.
1666 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1667 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1668 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1669 }
1670
1671 template <unsigned width>
1672 bool isLogicalShifter() const {
1673 if (!isShifter())
1674 return false;
1675
1676 // A logical shifter is LSL, LSR, ASR or ROR.
1677 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1678 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1679 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1680 getShiftExtendAmount() < width;
1681 }
1682
1683 bool isMovImm32Shifter() const {
1684 if (!isShifter())
1685 return false;
1686
1687 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1688 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1689 if (ST != AArch64_AM::LSL)
1690 return false;
1691 uint64_t Val = getShiftExtendAmount();
1692 return (Val == 0 || Val == 16);
1693 }
1694
1695 bool isMovImm64Shifter() const {
1696 if (!isShifter())
1697 return false;
1698
1699 // A MOVi shifter is LSL of 0 or 16.
1700 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1701 if (ST != AArch64_AM::LSL)
1702 return false;
1703 uint64_t Val = getShiftExtendAmount();
1704 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1705 }
1706
1707 bool isLogicalVecShifter() const {
1708 if (!isShifter())
1709 return false;
1710
1711 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1712 unsigned Shift = getShiftExtendAmount();
1713 return getShiftExtendType() == AArch64_AM::LSL &&
1714 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1715 }
1716
1717 bool isLogicalVecHalfWordShifter() const {
1718 if (!isLogicalVecShifter())
1719 return false;
1720
1721 // A logical vector shifter is a left shift by 0 or 8.
1722 unsigned Shift = getShiftExtendAmount();
1723 return getShiftExtendType() == AArch64_AM::LSL &&
1724 (Shift == 0 || Shift == 8);
1725 }
1726
1727 bool isMoveVecShifter() const {
1728 if (!isShiftExtend())
1729 return false;
1730
1731 // A logical vector shifter is a left shift by 8 or 16.
1732 unsigned Shift = getShiftExtendAmount();
1733 return getShiftExtendType() == AArch64_AM::MSL &&
1734 (Shift == 8 || Shift == 16);
1735 }
1736
1737 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1738 // to LDUR/STUR when the offset is not legal for the former but is for
1739 // the latter. As such, in addition to checking for being a legal unscaled
1740 // address, also check that it is not a legal scaled address. This avoids
1741 // ambiguity in the matcher.
1742 template<int Width>
1743 bool isSImm9OffsetFB() const {
1744 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1745 }
1746
1747 bool isAdrpLabel() const {
1748 // Validation was handled during parsing, so we just verify that
1749 // something didn't go haywire.
1750 if (!isImm())
1751 return false;
1752
1753 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Imm.Val)) {
1754 int64_t Val = CE->getValue();
1755 int64_t Min = - (4096 * (1LL << (21 - 1)));
1756 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1757 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1758 }
1759
1760 return true;
1761 }
1762
1763 bool isAdrLabel() const {
1764 // Validation was handled during parsing, so we just verify that
1765 // something didn't go haywire.
1766 if (!isImm())
1767 return false;
1768
1769 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Imm.Val)) {
1770 int64_t Val = CE->getValue();
1771 int64_t Min = - (1LL << (21 - 1));
1772 int64_t Max = ((1LL << (21 - 1)) - 1);
1773 return Val >= Min && Val <= Max;
1774 }
1775
1776 return true;
1777 }
1778
1779 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1780 DiagnosticPredicate isMatrixRegOperand() const {
1781 if (!isMatrix())
1782 return DiagnosticPredicate::NoMatch;
1783 if (getMatrixKind() != Kind ||
1784 !AArch64MCRegisterClasses[RegClass].contains(Reg: getMatrixReg()) ||
1785 EltSize != getMatrixElementWidth())
1786 return DiagnosticPredicate::NearMatch;
1787 return DiagnosticPredicate::Match;
1788 }
1789
1790 bool isPAuthPCRelLabel16Operand() const {
1791 // PAuth PCRel16 operands are similar to regular branch targets, but only
1792 // negative values are allowed for concrete immediates as signing instr
1793 // should be in a lower address.
1794 if (!isImm())
1795 return false;
1796 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
1797 if (!MCE)
1798 return true;
1799 int64_t Val = MCE->getValue();
1800 if (Val & 0b11)
1801 return false;
1802 return (Val <= 0) && (Val > -(1 << 18));
1803 }
1804
1805 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1806 // Add as immediates when possible. Null MCExpr = 0.
1807 if (!Expr)
1808 Inst.addOperand(Op: MCOperand::createImm(Val: 0));
1809 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Expr))
1810 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue()));
1811 else
1812 Inst.addOperand(Op: MCOperand::createExpr(Val: Expr));
1813 }
1814
1815 void addRegOperands(MCInst &Inst, unsigned N) const {
1816 assert(N == 1 && "Invalid number of operands!");
1817 Inst.addOperand(Op: MCOperand::createReg(Reg: getReg()));
1818 }
1819
1820 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1821 assert(N == 1 && "Invalid number of operands!");
1822 Inst.addOperand(Op: MCOperand::createReg(Reg: getMatrixReg()));
1823 }
1824
1825 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1826 assert(N == 1 && "Invalid number of operands!");
1827 assert(
1828 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1829
1830 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1831 MCRegister Reg = RI->getRegClass(i: AArch64::GPR32RegClassID)
1832 .getRegister(i: RI->getEncodingValue(Reg: getReg()));
1833
1834 Inst.addOperand(Op: MCOperand::createReg(Reg));
1835 }
1836
1837 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1838 assert(N == 1 && "Invalid number of operands!");
1839 assert(
1840 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1841
1842 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1843 MCRegister Reg = RI->getRegClass(i: AArch64::GPR64RegClassID)
1844 .getRegister(i: RI->getEncodingValue(Reg: getReg()));
1845
1846 Inst.addOperand(Op: MCOperand::createReg(Reg));
1847 }
1848
1849 template <int Width>
1850 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1851 unsigned Base;
1852 switch (Width) {
1853 case 8: Base = AArch64::B0; break;
1854 case 16: Base = AArch64::H0; break;
1855 case 32: Base = AArch64::S0; break;
1856 case 64: Base = AArch64::D0; break;
1857 case 128: Base = AArch64::Q0; break;
1858 default:
1859 llvm_unreachable("Unsupported width");
1860 }
1861 Inst.addOperand(Op: MCOperand::createReg(Reg: AArch64::Z0 + getReg() - Base));
1862 }
1863
1864 void addPPRorPNRRegOperands(MCInst &Inst, unsigned N) const {
1865 assert(N == 1 && "Invalid number of operands!");
1866 MCRegister Reg = getReg();
1867 // Normalise to PPR
1868 if (Reg >= AArch64::PN0 && Reg <= AArch64::PN15)
1869 Reg = Reg - AArch64::PN0 + AArch64::P0;
1870 Inst.addOperand(Op: MCOperand::createReg(Reg));
1871 }
1872
1873 void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const {
1874 assert(N == 1 && "Invalid number of operands!");
1875 Inst.addOperand(
1876 Op: MCOperand::createReg(Reg: (getReg() - AArch64::PN0) + AArch64::P0));
1877 }
1878
1879 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1880 assert(N == 1 && "Invalid number of operands!");
1881 assert(
1882 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1883 Inst.addOperand(Op: MCOperand::createReg(Reg: AArch64::D0 + getReg() - AArch64::Q0));
1884 }
1885
1886 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1887 assert(N == 1 && "Invalid number of operands!");
1888 assert(
1889 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1890 Inst.addOperand(Op: MCOperand::createReg(Reg: getReg()));
1891 }
1892
1893 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1894 assert(N == 1 && "Invalid number of operands!");
1895 Inst.addOperand(Op: MCOperand::createReg(Reg: getReg()));
1896 }
1897
1898 void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const {
1899 assert(N == 1 && "Invalid number of operands!");
1900 Inst.addOperand(Op: MCOperand::createReg(Reg: getReg()));
1901 }
1902
1903 enum VecListIndexType {
1904 VecListIdx_DReg = 0,
1905 VecListIdx_QReg = 1,
1906 VecListIdx_ZReg = 2,
1907 VecListIdx_PReg = 3,
1908 };
1909
1910 template <VecListIndexType RegTy, unsigned NumRegs,
1911 bool IsConsecutive = false>
1912 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1913 assert(N == 1 && "Invalid number of operands!");
1914 assert((!IsConsecutive || (getVectorListStride() == 1)) &&
1915 "Expected consecutive registers");
1916 static const unsigned FirstRegs[][5] = {
1917 /* DReg */ { AArch64::Q0,
1918 AArch64::D0, AArch64::D0_D1,
1919 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1920 /* QReg */ { AArch64::Q0,
1921 AArch64::Q0, AArch64::Q0_Q1,
1922 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1923 /* ZReg */ { AArch64::Z0,
1924 AArch64::Z0, AArch64::Z0_Z1,
1925 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1926 /* PReg */ { AArch64::P0,
1927 AArch64::P0, AArch64::P0_P1 }
1928 };
1929
1930 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1931 " NumRegs must be <= 4 for ZRegs");
1932
1933 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1934 " NumRegs must be <= 2 for PRegs");
1935
1936 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1937 Inst.addOperand(Op: MCOperand::createReg(Reg: FirstReg + getVectorListStart() -
1938 FirstRegs[(unsigned)RegTy][0]));
1939 }
1940
1941 template <unsigned NumRegs>
1942 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1943 assert(N == 1 && "Invalid number of operands!");
1944 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1945
1946 switch (NumRegs) {
1947 case 2:
1948 if (getVectorListStart() < AArch64::Z16) {
1949 assert((getVectorListStart() < AArch64::Z8) &&
1950 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1951 Inst.addOperand(Op: MCOperand::createReg(
1952 Reg: AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1953 } else {
1954 assert((getVectorListStart() < AArch64::Z24) &&
1955 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1956 Inst.addOperand(Op: MCOperand::createReg(
1957 Reg: AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1958 }
1959 break;
1960 case 4:
1961 if (getVectorListStart() < AArch64::Z16) {
1962 assert((getVectorListStart() < AArch64::Z4) &&
1963 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1964 Inst.addOperand(Op: MCOperand::createReg(
1965 Reg: AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1966 } else {
1967 assert((getVectorListStart() < AArch64::Z20) &&
1968 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1969 Inst.addOperand(Op: MCOperand::createReg(
1970 Reg: AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1971 }
1972 break;
1973 default:
1974 llvm_unreachable("Unsupported number of registers for strided vec list");
1975 }
1976 }
1977
1978 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1979 assert(N == 1 && "Invalid number of operands!");
1980 unsigned RegMask = getMatrixTileListRegMask();
1981 assert(RegMask <= 0xFF && "Invalid mask!");
1982 Inst.addOperand(Op: MCOperand::createImm(Val: RegMask));
1983 }
1984
1985 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1986 assert(N == 1 && "Invalid number of operands!");
1987 Inst.addOperand(Op: MCOperand::createImm(Val: getVectorIndex()));
1988 }
1989
1990 template <unsigned ImmIs0, unsigned ImmIs1>
1991 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1992 assert(N == 1 && "Invalid number of operands!");
1993 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1994 Inst.addOperand(Op: MCOperand::createImm(Val: bool(isExactFPImm<ImmIs1>())));
1995 }
1996
1997 void addImmOperands(MCInst &Inst, unsigned N) const {
1998 assert(N == 1 && "Invalid number of operands!");
1999 // If this is a pageoff symrefexpr with an addend, adjust the addend
2000 // to be only the page-offset portion. Otherwise, just add the expr
2001 // as-is.
2002 addExpr(Inst, Expr: getImm());
2003 }
2004
2005 template <int Shift>
2006 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
2007 assert(N == 2 && "Invalid number of operands!");
2008 if (auto ShiftedVal = getShiftedVal<Shift>()) {
2009 Inst.addOperand(Op: MCOperand::createImm(Val: ShiftedVal->first));
2010 Inst.addOperand(Op: MCOperand::createImm(Val: ShiftedVal->second));
2011 } else if (isShiftedImm()) {
2012 addExpr(Inst, Expr: getShiftedImmVal());
2013 Inst.addOperand(Op: MCOperand::createImm(Val: getShiftedImmShift()));
2014 } else {
2015 addExpr(Inst, Expr: getImm());
2016 Inst.addOperand(Op: MCOperand::createImm(Val: 0));
2017 }
2018 }
2019
2020 template <int Shift>
2021 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
2022 assert(N == 2 && "Invalid number of operands!");
2023 if (auto ShiftedVal = getShiftedVal<Shift>()) {
2024 Inst.addOperand(Op: MCOperand::createImm(Val: -ShiftedVal->first));
2025 Inst.addOperand(Op: MCOperand::createImm(Val: ShiftedVal->second));
2026 } else
2027 llvm_unreachable("Not a shifted negative immediate");
2028 }
2029
2030 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2031 assert(N == 1 && "Invalid number of operands!");
2032 Inst.addOperand(Op: MCOperand::createImm(Val: getCondCode()));
2033 }
2034
2035 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
2036 assert(N == 1 && "Invalid number of operands!");
2037 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2038 if (!MCE)
2039 addExpr(Inst, Expr: getImm());
2040 else
2041 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 12));
2042 }
2043
2044 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2045 addImmOperands(Inst, N);
2046 }
2047
2048 template<int Scale>
2049 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2050 assert(N == 1 && "Invalid number of operands!");
2051 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2052
2053 if (!MCE) {
2054 Inst.addOperand(Op: MCOperand::createExpr(Val: getImm()));
2055 return;
2056 }
2057 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() / Scale));
2058 }
2059
2060 void addUImm6Operands(MCInst &Inst, unsigned N) const {
2061 assert(N == 1 && "Invalid number of operands!");
2062 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2063 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue()));
2064 }
2065
2066 template <int Scale>
2067 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
2068 assert(N == 1 && "Invalid number of operands!");
2069 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2070 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() / Scale));
2071 }
2072
2073 template <int Scale>
2074 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
2075 assert(N == 1 && "Invalid number of operands!");
2076 Inst.addOperand(Op: MCOperand::createImm(Val: getFirstImmVal() / Scale));
2077 }
2078
2079 template <typename T>
2080 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
2081 assert(N == 1 && "Invalid number of operands!");
2082 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2083 std::make_unsigned_t<T> Val = MCE->getValue();
2084 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(imm: Val, regSize: sizeof(T) * 8);
2085 Inst.addOperand(Op: MCOperand::createImm(Val: encoding));
2086 }
2087
2088 template <typename T>
2089 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
2090 assert(N == 1 && "Invalid number of operands!");
2091 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2092 std::make_unsigned_t<T> Val = ~MCE->getValue();
2093 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(imm: Val, regSize: sizeof(T) * 8);
2094 Inst.addOperand(Op: MCOperand::createImm(Val: encoding));
2095 }
2096
2097 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
2098 assert(N == 1 && "Invalid number of operands!");
2099 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2100 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(Imm: MCE->getValue());
2101 Inst.addOperand(Op: MCOperand::createImm(Val: encoding));
2102 }
2103
2104 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
2105 // Branch operands don't encode the low bits, so shift them off
2106 // here. If it's a label, however, just put it on directly as there's
2107 // not enough information now to do anything.
2108 assert(N == 1 && "Invalid number of operands!");
2109 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2110 if (!MCE) {
2111 addExpr(Inst, Expr: getImm());
2112 return;
2113 }
2114 assert(MCE && "Invalid constant immediate operand!");
2115 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2));
2116 }
2117
2118 void addPAuthPCRelLabel16Operands(MCInst &Inst, unsigned N) const {
2119 // PC-relative operands don't encode the low bits, so shift them off
2120 // here. If it's a label, however, just put it on directly as there's
2121 // not enough information now to do anything.
2122 assert(N == 1 && "Invalid number of operands!");
2123 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2124 if (!MCE) {
2125 addExpr(Inst, Expr: getImm());
2126 return;
2127 }
2128 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2));
2129 }
2130
2131 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
2132 // Branch operands don't encode the low bits, so shift them off
2133 // here. If it's a label, however, just put it on directly as there's
2134 // not enough information now to do anything.
2135 assert(N == 1 && "Invalid number of operands!");
2136 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2137 if (!MCE) {
2138 addExpr(Inst, Expr: getImm());
2139 return;
2140 }
2141 assert(MCE && "Invalid constant immediate operand!");
2142 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2));
2143 }
2144
2145 void addPCRelLabel9Operands(MCInst &Inst, unsigned N) const {
2146 // Branch operands don't encode the low bits, so shift them off
2147 // here. If it's a label, however, just put it on directly as there's
2148 // not enough information now to do anything.
2149 assert(N == 1 && "Invalid number of operands!");
2150 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2151 if (!MCE) {
2152 addExpr(Inst, Expr: getImm());
2153 return;
2154 }
2155 assert(MCE && "Invalid constant immediate operand!");
2156 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2));
2157 }
2158
2159 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
2160 // Branch operands don't encode the low bits, so shift them off
2161 // here. If it's a label, however, just put it on directly as there's
2162 // not enough information now to do anything.
2163 assert(N == 1 && "Invalid number of operands!");
2164 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2165 if (!MCE) {
2166 addExpr(Inst, Expr: getImm());
2167 return;
2168 }
2169 assert(MCE && "Invalid constant immediate operand!");
2170 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2));
2171 }
2172
2173 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2174 assert(N == 1 && "Invalid number of operands!");
2175 Inst.addOperand(Op: MCOperand::createImm(
2176 Val: AArch64_AM::getFP64Imm(Imm: getFPImm().bitcastToAPInt())));
2177 }
2178
2179 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2180 assert(N == 1 && "Invalid number of operands!");
2181 Inst.addOperand(Op: MCOperand::createImm(Val: getBarrier()));
2182 }
2183
2184 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2185 assert(N == 1 && "Invalid number of operands!");
2186 Inst.addOperand(Op: MCOperand::createImm(Val: getBarrier()));
2187 }
2188
2189 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2190 assert(N == 1 && "Invalid number of operands!");
2191
2192 Inst.addOperand(Op: MCOperand::createImm(Val: SysReg.MRSReg));
2193 }
2194
2195 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2196 assert(N == 1 && "Invalid number of operands!");
2197
2198 Inst.addOperand(Op: MCOperand::createImm(Val: SysReg.MSRReg));
2199 }
2200
2201 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2202 assert(N == 1 && "Invalid number of operands!");
2203
2204 Inst.addOperand(Op: MCOperand::createImm(Val: SysReg.PStateField));
2205 }
2206
2207 void addSVCROperands(MCInst &Inst, unsigned N) const {
2208 assert(N == 1 && "Invalid number of operands!");
2209
2210 Inst.addOperand(Op: MCOperand::createImm(Val: SVCR.PStateField));
2211 }
2212
2213 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2214 assert(N == 1 && "Invalid number of operands!");
2215
2216 Inst.addOperand(Op: MCOperand::createImm(Val: SysReg.PStateField));
2217 }
2218
2219 void addSysCROperands(MCInst &Inst, unsigned N) const {
2220 assert(N == 1 && "Invalid number of operands!");
2221 Inst.addOperand(Op: MCOperand::createImm(Val: getSysCR()));
2222 }
2223
2224 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2225 assert(N == 1 && "Invalid number of operands!");
2226 Inst.addOperand(Op: MCOperand::createImm(Val: getPrefetch()));
2227 }
2228
2229 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2230 assert(N == 1 && "Invalid number of operands!");
2231 Inst.addOperand(Op: MCOperand::createImm(Val: getPSBHint()));
2232 }
2233
2234 void addPHintOperands(MCInst &Inst, unsigned N) const {
2235 assert(N == 1 && "Invalid number of operands!");
2236 Inst.addOperand(Op: MCOperand::createImm(Val: getPHint()));
2237 }
2238
2239 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2240 assert(N == 1 && "Invalid number of operands!");
2241 Inst.addOperand(Op: MCOperand::createImm(Val: getBTIHint()));
2242 }
2243
2244 void addCMHPriorityHintOperands(MCInst &Inst, unsigned N) const {
2245 assert(N == 1 && "Invalid number of operands!");
2246 Inst.addOperand(Op: MCOperand::createImm(Val: getCMHPriorityHint()));
2247 }
2248
2249 void addTIndexHintOperands(MCInst &Inst, unsigned N) const {
2250 assert(N == 1 && "Invalid number of operands!");
2251 Inst.addOperand(Op: MCOperand::createImm(Val: getTIndexHint()));
2252 }
2253
2254 void addShifterOperands(MCInst &Inst, unsigned N) const {
2255 assert(N == 1 && "Invalid number of operands!");
2256 unsigned Imm =
2257 AArch64_AM::getShifterImm(ST: getShiftExtendType(), Imm: getShiftExtendAmount());
2258 Inst.addOperand(Op: MCOperand::createImm(Val: Imm));
2259 }
2260
2261 void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const {
2262 assert(N == 1 && "Invalid number of operands!");
2263 unsigned Imm = getShiftExtendAmount();
2264 Inst.addOperand(Op: MCOperand::createImm(Val: Imm));
2265 }
2266
2267 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2268 assert(N == 1 && "Invalid number of operands!");
2269
2270 if (!isScalarReg())
2271 return;
2272
2273 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2274 MCRegister Reg = RI->getRegClass(i: AArch64::GPR64RegClassID)
2275 .getRegister(i: RI->getEncodingValue(Reg: getReg()));
2276 if (Reg != AArch64::XZR)
2277 llvm_unreachable("wrong register");
2278
2279 Inst.addOperand(Op: MCOperand::createReg(Reg: AArch64::XZR));
2280 }
2281
2282 void addExtendOperands(MCInst &Inst, unsigned N) const {
2283 assert(N == 1 && "Invalid number of operands!");
2284 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2285 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2286 unsigned Imm = AArch64_AM::getArithExtendImm(ET, Imm: getShiftExtendAmount());
2287 Inst.addOperand(Op: MCOperand::createImm(Val: Imm));
2288 }
2289
2290 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2291 assert(N == 1 && "Invalid number of operands!");
2292 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2293 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2294 unsigned Imm = AArch64_AM::getArithExtendImm(ET, Imm: getShiftExtendAmount());
2295 Inst.addOperand(Op: MCOperand::createImm(Val: Imm));
2296 }
2297
2298 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2299 assert(N == 2 && "Invalid number of operands!");
2300 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2301 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2302 Inst.addOperand(Op: MCOperand::createImm(Val: IsSigned));
2303 Inst.addOperand(Op: MCOperand::createImm(Val: getShiftExtendAmount() != 0));
2304 }
2305
2306 // For 8-bit load/store instructions with a register offset, both the
2307 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2308 // they're disambiguated by whether the shift was explicit or implicit rather
2309 // than its size.
2310 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2311 assert(N == 2 && "Invalid number of operands!");
2312 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2313 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2314 Inst.addOperand(Op: MCOperand::createImm(Val: IsSigned));
2315 Inst.addOperand(Op: MCOperand::createImm(Val: hasShiftExtendAmount()));
2316 }
2317
2318 template<int Shift>
2319 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2320 assert(N == 1 && "Invalid number of operands!");
2321
2322 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
2323 if (CE) {
2324 uint64_t Value = CE->getValue();
2325 Inst.addOperand(Op: MCOperand::createImm(Val: (Value >> Shift) & 0xffff));
2326 } else {
2327 addExpr(Inst, Expr: getImm());
2328 }
2329 }
2330
2331 template<int Shift>
2332 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2333 assert(N == 1 && "Invalid number of operands!");
2334
2335 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2336 uint64_t Value = CE->getValue();
2337 Inst.addOperand(Op: MCOperand::createImm(Val: (~Value >> Shift) & 0xffff));
2338 }
2339
2340 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2341 assert(N == 1 && "Invalid number of operands!");
2342 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2343 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() / 90));
2344 }
2345
2346 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2347 assert(N == 1 && "Invalid number of operands!");
2348 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2349 Inst.addOperand(Op: MCOperand::createImm(Val: (MCE->getValue() - 90) / 180));
2350 }
2351
2352 void print(raw_ostream &OS, const MCAsmInfo &MAI) const override;
2353
2354 static std::unique_ptr<AArch64Operand>
2355 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2356 auto Op = std::make_unique<AArch64Operand>(args: k_Token, args&: Ctx);
2357 Op->Tok.Data = Str.data();
2358 Op->Tok.Length = Str.size();
2359 Op->Tok.IsSuffix = IsSuffix;
2360 Op->StartLoc = S;
2361 Op->EndLoc = S;
2362 return Op;
2363 }
2364
2365 static std::unique_ptr<AArch64Operand>
2366 CreateReg(MCRegister Reg, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2367 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2368 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2369 unsigned ShiftAmount = 0, unsigned HasExplicitAmount = false) {
2370 auto Op = std::make_unique<AArch64Operand>(args: k_Register, args&: Ctx);
2371 Op->Reg.Reg = Reg;
2372 Op->Reg.Kind = Kind;
2373 Op->Reg.ElementWidth = 0;
2374 Op->Reg.EqualityTy = EqTy;
2375 Op->Reg.ShiftExtend.Type = ExtTy;
2376 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2377 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2378 Op->StartLoc = S;
2379 Op->EndLoc = E;
2380 return Op;
2381 }
2382
2383 static std::unique_ptr<AArch64Operand> CreateVectorReg(
2384 MCRegister Reg, RegKind Kind, unsigned ElementWidth, SMLoc S, SMLoc E,
2385 MCContext &Ctx, AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2386 unsigned ShiftAmount = 0, unsigned HasExplicitAmount = false) {
2387 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2388 Kind == RegKind::SVEPredicateVector ||
2389 Kind == RegKind::SVEPredicateAsCounter) &&
2390 "Invalid vector kind");
2391 auto Op = CreateReg(Reg, Kind, S, E, Ctx, EqTy: EqualsReg, ExtTy, ShiftAmount,
2392 HasExplicitAmount);
2393 Op->Reg.ElementWidth = ElementWidth;
2394 return Op;
2395 }
2396
2397 static std::unique_ptr<AArch64Operand>
2398 CreateVectorList(MCRegister Reg, unsigned Count, unsigned Stride,
2399 unsigned NumElements, unsigned ElementWidth,
2400 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2401 auto Op = std::make_unique<AArch64Operand>(args: k_VectorList, args&: Ctx);
2402 Op->VectorList.Reg = Reg;
2403 Op->VectorList.Count = Count;
2404 Op->VectorList.Stride = Stride;
2405 Op->VectorList.NumElements = NumElements;
2406 Op->VectorList.ElementWidth = ElementWidth;
2407 Op->VectorList.RegisterKind = RegisterKind;
2408 Op->StartLoc = S;
2409 Op->EndLoc = E;
2410 return Op;
2411 }
2412
2413 static std::unique_ptr<AArch64Operand>
2414 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2415 auto Op = std::make_unique<AArch64Operand>(args: k_VectorIndex, args&: Ctx);
2416 Op->VectorIndex.Val = Idx;
2417 Op->StartLoc = S;
2418 Op->EndLoc = E;
2419 return Op;
2420 }
2421
2422 static std::unique_ptr<AArch64Operand>
2423 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2424 auto Op = std::make_unique<AArch64Operand>(args: k_MatrixTileList, args&: Ctx);
2425 Op->MatrixTileList.RegMask = RegMask;
2426 Op->StartLoc = S;
2427 Op->EndLoc = E;
2428 return Op;
2429 }
2430
2431 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2432 const unsigned ElementWidth) {
2433 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2434 RegMap = {
2435 {{0, AArch64::ZAB0},
2436 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2437 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2438 {{8, AArch64::ZAB0},
2439 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2440 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2441 {{16, AArch64::ZAH0},
2442 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2443 {{16, AArch64::ZAH1},
2444 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2445 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2446 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2447 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2448 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2449 };
2450
2451 if (ElementWidth == 64)
2452 OutRegs.insert(V: Reg);
2453 else {
2454 std::vector<unsigned> Regs = RegMap[std::make_pair(x: ElementWidth, y&: Reg)];
2455 assert(!Regs.empty() && "Invalid tile or element width!");
2456 OutRegs.insert_range(R&: Regs);
2457 }
2458 }
2459
2460 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2461 SMLoc E, MCContext &Ctx) {
2462 auto Op = std::make_unique<AArch64Operand>(args: k_Immediate, args&: Ctx);
2463 Op->Imm.Val = Val;
2464 Op->StartLoc = S;
2465 Op->EndLoc = E;
2466 return Op;
2467 }
2468
2469 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2470 unsigned ShiftAmount,
2471 SMLoc S, SMLoc E,
2472 MCContext &Ctx) {
2473 auto Op = std::make_unique<AArch64Operand>(args: k_ShiftedImm, args&: Ctx);
2474 Op->ShiftedImm .Val = Val;
2475 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2476 Op->StartLoc = S;
2477 Op->EndLoc = E;
2478 return Op;
2479 }
2480
2481 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2482 unsigned Last, SMLoc S,
2483 SMLoc E,
2484 MCContext &Ctx) {
2485 auto Op = std::make_unique<AArch64Operand>(args: k_ImmRange, args&: Ctx);
2486 Op->ImmRange.First = First;
2487 Op->ImmRange.Last = Last;
2488 Op->EndLoc = E;
2489 return Op;
2490 }
2491
2492 static std::unique_ptr<AArch64Operand>
2493 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2494 auto Op = std::make_unique<AArch64Operand>(args: k_CondCode, args&: Ctx);
2495 Op->CondCode.Code = Code;
2496 Op->StartLoc = S;
2497 Op->EndLoc = E;
2498 return Op;
2499 }
2500
2501 static std::unique_ptr<AArch64Operand>
2502 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2503 auto Op = std::make_unique<AArch64Operand>(args: k_FPImm, args&: Ctx);
2504 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2505 Op->FPImm.IsExact = IsExact;
2506 Op->StartLoc = S;
2507 Op->EndLoc = S;
2508 return Op;
2509 }
2510
2511 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2512 StringRef Str,
2513 SMLoc S,
2514 MCContext &Ctx,
2515 bool HasnXSModifier) {
2516 auto Op = std::make_unique<AArch64Operand>(args: k_Barrier, args&: Ctx);
2517 Op->Barrier.Val = Val;
2518 Op->Barrier.Data = Str.data();
2519 Op->Barrier.Length = Str.size();
2520 Op->Barrier.HasnXSModifier = HasnXSModifier;
2521 Op->StartLoc = S;
2522 Op->EndLoc = S;
2523 return Op;
2524 }
2525
2526 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2527 uint32_t MRSReg,
2528 uint32_t MSRReg,
2529 uint32_t PStateField,
2530 MCContext &Ctx) {
2531 auto Op = std::make_unique<AArch64Operand>(args: k_SysReg, args&: Ctx);
2532 Op->SysReg.Data = Str.data();
2533 Op->SysReg.Length = Str.size();
2534 Op->SysReg.MRSReg = MRSReg;
2535 Op->SysReg.MSRReg = MSRReg;
2536 Op->SysReg.PStateField = PStateField;
2537 Op->StartLoc = S;
2538 Op->EndLoc = S;
2539 return Op;
2540 }
2541
2542 static std::unique_ptr<AArch64Operand>
2543 CreatePHintInst(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2544 auto Op = std::make_unique<AArch64Operand>(args: k_PHint, args&: Ctx);
2545 Op->PHint.Val = Val;
2546 Op->PHint.Data = Str.data();
2547 Op->PHint.Length = Str.size();
2548 Op->StartLoc = S;
2549 Op->EndLoc = S;
2550 return Op;
2551 }
2552
2553 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2554 SMLoc E, MCContext &Ctx) {
2555 auto Op = std::make_unique<AArch64Operand>(args: k_SysCR, args&: Ctx);
2556 Op->SysCRImm.Val = Val;
2557 Op->StartLoc = S;
2558 Op->EndLoc = E;
2559 return Op;
2560 }
2561
2562 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2563 StringRef Str,
2564 SMLoc S,
2565 MCContext &Ctx) {
2566 auto Op = std::make_unique<AArch64Operand>(args: k_Prefetch, args&: Ctx);
2567 Op->Prefetch.Val = Val;
2568 Op->Barrier.Data = Str.data();
2569 Op->Barrier.Length = Str.size();
2570 Op->StartLoc = S;
2571 Op->EndLoc = S;
2572 return Op;
2573 }
2574
2575 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2576 StringRef Str,
2577 SMLoc S,
2578 MCContext &Ctx) {
2579 auto Op = std::make_unique<AArch64Operand>(args: k_PSBHint, args&: Ctx);
2580 Op->PSBHint.Val = Val;
2581 Op->PSBHint.Data = Str.data();
2582 Op->PSBHint.Length = Str.size();
2583 Op->StartLoc = S;
2584 Op->EndLoc = S;
2585 return Op;
2586 }
2587
2588 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2589 StringRef Str,
2590 SMLoc S,
2591 MCContext &Ctx) {
2592 auto Op = std::make_unique<AArch64Operand>(args: k_BTIHint, args&: Ctx);
2593 Op->BTIHint.Val = Val | 32;
2594 Op->BTIHint.Data = Str.data();
2595 Op->BTIHint.Length = Str.size();
2596 Op->StartLoc = S;
2597 Op->EndLoc = S;
2598 return Op;
2599 }
2600
2601 static std::unique_ptr<AArch64Operand>
2602 CreateCMHPriorityHint(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2603 auto Op = std::make_unique<AArch64Operand>(args: k_CMHPriorityHint, args&: Ctx);
2604 Op->CMHPriorityHint.Val = Val;
2605 Op->CMHPriorityHint.Data = Str.data();
2606 Op->CMHPriorityHint.Length = Str.size();
2607 Op->StartLoc = S;
2608 Op->EndLoc = S;
2609 return Op;
2610 }
2611
2612 static std::unique_ptr<AArch64Operand>
2613 CreateTIndexHint(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2614 auto Op = std::make_unique<AArch64Operand>(args: k_TIndexHint, args&: Ctx);
2615 Op->TIndexHint.Val = Val;
2616 Op->TIndexHint.Data = Str.data();
2617 Op->TIndexHint.Length = Str.size();
2618 Op->StartLoc = S;
2619 Op->EndLoc = S;
2620 return Op;
2621 }
2622
2623 static std::unique_ptr<AArch64Operand>
2624 CreateMatrixRegister(MCRegister Reg, unsigned ElementWidth, MatrixKind Kind,
2625 SMLoc S, SMLoc E, MCContext &Ctx) {
2626 auto Op = std::make_unique<AArch64Operand>(args: k_MatrixRegister, args&: Ctx);
2627 Op->MatrixReg.Reg = Reg;
2628 Op->MatrixReg.ElementWidth = ElementWidth;
2629 Op->MatrixReg.Kind = Kind;
2630 Op->StartLoc = S;
2631 Op->EndLoc = E;
2632 return Op;
2633 }
2634
2635 static std::unique_ptr<AArch64Operand>
2636 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2637 auto Op = std::make_unique<AArch64Operand>(args: k_SVCR, args&: Ctx);
2638 Op->SVCR.PStateField = PStateField;
2639 Op->SVCR.Data = Str.data();
2640 Op->SVCR.Length = Str.size();
2641 Op->StartLoc = S;
2642 Op->EndLoc = S;
2643 return Op;
2644 }
2645
2646 static std::unique_ptr<AArch64Operand>
2647 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2648 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2649 auto Op = std::make_unique<AArch64Operand>(args: k_ShiftExtend, args&: Ctx);
2650 Op->ShiftExtend.Type = ShOp;
2651 Op->ShiftExtend.Amount = Val;
2652 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2653 Op->StartLoc = S;
2654 Op->EndLoc = E;
2655 return Op;
2656 }
2657};
2658
2659} // end anonymous namespace.
2660
2661void AArch64Operand::print(raw_ostream &OS, const MCAsmInfo &MAI) const {
2662 switch (Kind) {
2663 case k_FPImm:
2664 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2665 if (!getFPImmIsExact())
2666 OS << " (inexact)";
2667 OS << ">";
2668 break;
2669 case k_Barrier: {
2670 StringRef Name = getBarrierName();
2671 if (!Name.empty())
2672 OS << "<barrier " << Name << ">";
2673 else
2674 OS << "<barrier invalid #" << getBarrier() << ">";
2675 break;
2676 }
2677 case k_Immediate:
2678 MAI.printExpr(OS, *getImm());
2679 break;
2680 case k_ShiftedImm: {
2681 unsigned Shift = getShiftedImmShift();
2682 OS << "<shiftedimm ";
2683 MAI.printExpr(OS, *getShiftedImmVal());
2684 OS << ", lsl #" << AArch64_AM::getShiftValue(Imm: Shift) << ">";
2685 break;
2686 }
2687 case k_ImmRange: {
2688 OS << "<immrange ";
2689 OS << getFirstImmVal();
2690 OS << ":" << getLastImmVal() << ">";
2691 break;
2692 }
2693 case k_CondCode:
2694 OS << "<condcode " << getCondCode() << ">";
2695 break;
2696 case k_VectorList: {
2697 OS << "<vectorlist ";
2698 MCRegister Reg = getVectorListStart();
2699 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2700 OS << Reg.id() + i * getVectorListStride() << " ";
2701 OS << ">";
2702 break;
2703 }
2704 case k_VectorIndex:
2705 OS << "<vectorindex " << getVectorIndex() << ">";
2706 break;
2707 case k_SysReg:
2708 OS << "<sysreg: " << getSysReg() << '>';
2709 break;
2710 case k_Token:
2711 OS << "'" << getToken() << "'";
2712 break;
2713 case k_SysCR:
2714 OS << "c" << getSysCR();
2715 break;
2716 case k_Prefetch: {
2717 StringRef Name = getPrefetchName();
2718 if (!Name.empty())
2719 OS << "<prfop " << Name << ">";
2720 else
2721 OS << "<prfop invalid #" << getPrefetch() << ">";
2722 break;
2723 }
2724 case k_PSBHint:
2725 OS << getPSBHintName();
2726 break;
2727 case k_PHint:
2728 OS << getPHintName();
2729 break;
2730 case k_BTIHint:
2731 OS << getBTIHintName();
2732 break;
2733 case k_CMHPriorityHint:
2734 OS << getCMHPriorityHintName();
2735 break;
2736 case k_TIndexHint:
2737 OS << getTIndexHintName();
2738 break;
2739 case k_MatrixRegister:
2740 OS << "<matrix " << getMatrixReg().id() << ">";
2741 break;
2742 case k_MatrixTileList: {
2743 OS << "<matrixlist ";
2744 unsigned RegMask = getMatrixTileListRegMask();
2745 unsigned MaxBits = 8;
2746 for (unsigned I = MaxBits; I > 0; --I)
2747 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2748 OS << '>';
2749 break;
2750 }
2751 case k_SVCR: {
2752 OS << getSVCR();
2753 break;
2754 }
2755 case k_Register:
2756 OS << "<register " << getReg().id() << ">";
2757 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2758 break;
2759 [[fallthrough]];
2760 case k_ShiftExtend:
2761 OS << "<" << AArch64_AM::getShiftExtendName(ST: getShiftExtendType()) << " #"
2762 << getShiftExtendAmount();
2763 if (!hasShiftExtendAmount())
2764 OS << "<imp>";
2765 OS << '>';
2766 break;
2767 }
2768}
2769
2770/// @name Auto-generated Match Functions
2771/// {
2772
2773static MCRegister MatchRegisterName(StringRef Name);
2774
2775/// }
2776
2777static unsigned MatchNeonVectorRegName(StringRef Name) {
2778 return StringSwitch<unsigned>(Name.lower())
2779 .Case(S: "v0", Value: AArch64::Q0)
2780 .Case(S: "v1", Value: AArch64::Q1)
2781 .Case(S: "v2", Value: AArch64::Q2)
2782 .Case(S: "v3", Value: AArch64::Q3)
2783 .Case(S: "v4", Value: AArch64::Q4)
2784 .Case(S: "v5", Value: AArch64::Q5)
2785 .Case(S: "v6", Value: AArch64::Q6)
2786 .Case(S: "v7", Value: AArch64::Q7)
2787 .Case(S: "v8", Value: AArch64::Q8)
2788 .Case(S: "v9", Value: AArch64::Q9)
2789 .Case(S: "v10", Value: AArch64::Q10)
2790 .Case(S: "v11", Value: AArch64::Q11)
2791 .Case(S: "v12", Value: AArch64::Q12)
2792 .Case(S: "v13", Value: AArch64::Q13)
2793 .Case(S: "v14", Value: AArch64::Q14)
2794 .Case(S: "v15", Value: AArch64::Q15)
2795 .Case(S: "v16", Value: AArch64::Q16)
2796 .Case(S: "v17", Value: AArch64::Q17)
2797 .Case(S: "v18", Value: AArch64::Q18)
2798 .Case(S: "v19", Value: AArch64::Q19)
2799 .Case(S: "v20", Value: AArch64::Q20)
2800 .Case(S: "v21", Value: AArch64::Q21)
2801 .Case(S: "v22", Value: AArch64::Q22)
2802 .Case(S: "v23", Value: AArch64::Q23)
2803 .Case(S: "v24", Value: AArch64::Q24)
2804 .Case(S: "v25", Value: AArch64::Q25)
2805 .Case(S: "v26", Value: AArch64::Q26)
2806 .Case(S: "v27", Value: AArch64::Q27)
2807 .Case(S: "v28", Value: AArch64::Q28)
2808 .Case(S: "v29", Value: AArch64::Q29)
2809 .Case(S: "v30", Value: AArch64::Q30)
2810 .Case(S: "v31", Value: AArch64::Q31)
2811 .Default(Value: 0);
2812}
2813
2814/// Returns an optional pair of (#elements, element-width) if Suffix
2815/// is a valid vector kind. Where the number of elements in a vector
2816/// or the vector width is implicit or explicitly unknown (but still a
2817/// valid suffix kind), 0 is used.
2818static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2819 RegKind VectorKind) {
2820 std::pair<int, int> Res = {-1, -1};
2821
2822 switch (VectorKind) {
2823 case RegKind::NeonVector:
2824 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2825 .Case(S: "", Value: {0, 0})
2826 .Case(S: ".1d", Value: {1, 64})
2827 .Case(S: ".1q", Value: {1, 128})
2828 // '.2h' needed for fp16 scalar pairwise reductions
2829 .Case(S: ".2h", Value: {2, 16})
2830 .Case(S: ".2b", Value: {2, 8})
2831 .Case(S: ".2s", Value: {2, 32})
2832 .Case(S: ".2d", Value: {2, 64})
2833 // '.4b' is another special case for the ARMv8.2a dot product
2834 // operand
2835 .Case(S: ".4b", Value: {4, 8})
2836 .Case(S: ".4h", Value: {4, 16})
2837 .Case(S: ".4s", Value: {4, 32})
2838 .Case(S: ".8b", Value: {8, 8})
2839 .Case(S: ".8h", Value: {8, 16})
2840 .Case(S: ".16b", Value: {16, 8})
2841 // Accept the width neutral ones, too, for verbose syntax. If
2842 // those aren't used in the right places, the token operand won't
2843 // match so all will work out.
2844 .Case(S: ".b", Value: {0, 8})
2845 .Case(S: ".h", Value: {0, 16})
2846 .Case(S: ".s", Value: {0, 32})
2847 .Case(S: ".d", Value: {0, 64})
2848 .Default(Value: {-1, -1});
2849 break;
2850 case RegKind::SVEPredicateAsCounter:
2851 case RegKind::SVEPredicateVector:
2852 case RegKind::SVEDataVector:
2853 case RegKind::Matrix:
2854 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2855 .Case(S: "", Value: {0, 0})
2856 .Case(S: ".b", Value: {0, 8})
2857 .Case(S: ".h", Value: {0, 16})
2858 .Case(S: ".s", Value: {0, 32})
2859 .Case(S: ".d", Value: {0, 64})
2860 .Case(S: ".q", Value: {0, 128})
2861 .Default(Value: {-1, -1});
2862 break;
2863 default:
2864 llvm_unreachable("Unsupported RegKind");
2865 }
2866
2867 if (Res == std::make_pair(x: -1, y: -1))
2868 return std::nullopt;
2869
2870 return std::optional<std::pair<int, int>>(Res);
2871}
2872
2873static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2874 return parseVectorKind(Suffix, VectorKind).has_value();
2875}
2876
2877static unsigned matchSVEDataVectorRegName(StringRef Name) {
2878 return StringSwitch<unsigned>(Name.lower())
2879 .Case(S: "z0", Value: AArch64::Z0)
2880 .Case(S: "z1", Value: AArch64::Z1)
2881 .Case(S: "z2", Value: AArch64::Z2)
2882 .Case(S: "z3", Value: AArch64::Z3)
2883 .Case(S: "z4", Value: AArch64::Z4)
2884 .Case(S: "z5", Value: AArch64::Z5)
2885 .Case(S: "z6", Value: AArch64::Z6)
2886 .Case(S: "z7", Value: AArch64::Z7)
2887 .Case(S: "z8", Value: AArch64::Z8)
2888 .Case(S: "z9", Value: AArch64::Z9)
2889 .Case(S: "z10", Value: AArch64::Z10)
2890 .Case(S: "z11", Value: AArch64::Z11)
2891 .Case(S: "z12", Value: AArch64::Z12)
2892 .Case(S: "z13", Value: AArch64::Z13)
2893 .Case(S: "z14", Value: AArch64::Z14)
2894 .Case(S: "z15", Value: AArch64::Z15)
2895 .Case(S: "z16", Value: AArch64::Z16)
2896 .Case(S: "z17", Value: AArch64::Z17)
2897 .Case(S: "z18", Value: AArch64::Z18)
2898 .Case(S: "z19", Value: AArch64::Z19)
2899 .Case(S: "z20", Value: AArch64::Z20)
2900 .Case(S: "z21", Value: AArch64::Z21)
2901 .Case(S: "z22", Value: AArch64::Z22)
2902 .Case(S: "z23", Value: AArch64::Z23)
2903 .Case(S: "z24", Value: AArch64::Z24)
2904 .Case(S: "z25", Value: AArch64::Z25)
2905 .Case(S: "z26", Value: AArch64::Z26)
2906 .Case(S: "z27", Value: AArch64::Z27)
2907 .Case(S: "z28", Value: AArch64::Z28)
2908 .Case(S: "z29", Value: AArch64::Z29)
2909 .Case(S: "z30", Value: AArch64::Z30)
2910 .Case(S: "z31", Value: AArch64::Z31)
2911 .Default(Value: 0);
2912}
2913
2914static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2915 return StringSwitch<unsigned>(Name.lower())
2916 .Case(S: "p0", Value: AArch64::P0)
2917 .Case(S: "p1", Value: AArch64::P1)
2918 .Case(S: "p2", Value: AArch64::P2)
2919 .Case(S: "p3", Value: AArch64::P3)
2920 .Case(S: "p4", Value: AArch64::P4)
2921 .Case(S: "p5", Value: AArch64::P5)
2922 .Case(S: "p6", Value: AArch64::P6)
2923 .Case(S: "p7", Value: AArch64::P7)
2924 .Case(S: "p8", Value: AArch64::P8)
2925 .Case(S: "p9", Value: AArch64::P9)
2926 .Case(S: "p10", Value: AArch64::P10)
2927 .Case(S: "p11", Value: AArch64::P11)
2928 .Case(S: "p12", Value: AArch64::P12)
2929 .Case(S: "p13", Value: AArch64::P13)
2930 .Case(S: "p14", Value: AArch64::P14)
2931 .Case(S: "p15", Value: AArch64::P15)
2932 .Default(Value: 0);
2933}
2934
2935static unsigned matchSVEPredicateAsCounterRegName(StringRef Name) {
2936 return StringSwitch<unsigned>(Name.lower())
2937 .Case(S: "pn0", Value: AArch64::PN0)
2938 .Case(S: "pn1", Value: AArch64::PN1)
2939 .Case(S: "pn2", Value: AArch64::PN2)
2940 .Case(S: "pn3", Value: AArch64::PN3)
2941 .Case(S: "pn4", Value: AArch64::PN4)
2942 .Case(S: "pn5", Value: AArch64::PN5)
2943 .Case(S: "pn6", Value: AArch64::PN6)
2944 .Case(S: "pn7", Value: AArch64::PN7)
2945 .Case(S: "pn8", Value: AArch64::PN8)
2946 .Case(S: "pn9", Value: AArch64::PN9)
2947 .Case(S: "pn10", Value: AArch64::PN10)
2948 .Case(S: "pn11", Value: AArch64::PN11)
2949 .Case(S: "pn12", Value: AArch64::PN12)
2950 .Case(S: "pn13", Value: AArch64::PN13)
2951 .Case(S: "pn14", Value: AArch64::PN14)
2952 .Case(S: "pn15", Value: AArch64::PN15)
2953 .Default(Value: 0);
2954}
2955
2956static unsigned matchMatrixTileListRegName(StringRef Name) {
2957 return StringSwitch<unsigned>(Name.lower())
2958 .Case(S: "za0.d", Value: AArch64::ZAD0)
2959 .Case(S: "za1.d", Value: AArch64::ZAD1)
2960 .Case(S: "za2.d", Value: AArch64::ZAD2)
2961 .Case(S: "za3.d", Value: AArch64::ZAD3)
2962 .Case(S: "za4.d", Value: AArch64::ZAD4)
2963 .Case(S: "za5.d", Value: AArch64::ZAD5)
2964 .Case(S: "za6.d", Value: AArch64::ZAD6)
2965 .Case(S: "za7.d", Value: AArch64::ZAD7)
2966 .Case(S: "za0.s", Value: AArch64::ZAS0)
2967 .Case(S: "za1.s", Value: AArch64::ZAS1)
2968 .Case(S: "za2.s", Value: AArch64::ZAS2)
2969 .Case(S: "za3.s", Value: AArch64::ZAS3)
2970 .Case(S: "za0.h", Value: AArch64::ZAH0)
2971 .Case(S: "za1.h", Value: AArch64::ZAH1)
2972 .Case(S: "za0.b", Value: AArch64::ZAB0)
2973 .Default(Value: 0);
2974}
2975
2976static unsigned matchMatrixRegName(StringRef Name) {
2977 return StringSwitch<unsigned>(Name.lower())
2978 .Case(S: "za", Value: AArch64::ZA)
2979 .Case(S: "za0.q", Value: AArch64::ZAQ0)
2980 .Case(S: "za1.q", Value: AArch64::ZAQ1)
2981 .Case(S: "za2.q", Value: AArch64::ZAQ2)
2982 .Case(S: "za3.q", Value: AArch64::ZAQ3)
2983 .Case(S: "za4.q", Value: AArch64::ZAQ4)
2984 .Case(S: "za5.q", Value: AArch64::ZAQ5)
2985 .Case(S: "za6.q", Value: AArch64::ZAQ6)
2986 .Case(S: "za7.q", Value: AArch64::ZAQ7)
2987 .Case(S: "za8.q", Value: AArch64::ZAQ8)
2988 .Case(S: "za9.q", Value: AArch64::ZAQ9)
2989 .Case(S: "za10.q", Value: AArch64::ZAQ10)
2990 .Case(S: "za11.q", Value: AArch64::ZAQ11)
2991 .Case(S: "za12.q", Value: AArch64::ZAQ12)
2992 .Case(S: "za13.q", Value: AArch64::ZAQ13)
2993 .Case(S: "za14.q", Value: AArch64::ZAQ14)
2994 .Case(S: "za15.q", Value: AArch64::ZAQ15)
2995 .Case(S: "za0.d", Value: AArch64::ZAD0)
2996 .Case(S: "za1.d", Value: AArch64::ZAD1)
2997 .Case(S: "za2.d", Value: AArch64::ZAD2)
2998 .Case(S: "za3.d", Value: AArch64::ZAD3)
2999 .Case(S: "za4.d", Value: AArch64::ZAD4)
3000 .Case(S: "za5.d", Value: AArch64::ZAD5)
3001 .Case(S: "za6.d", Value: AArch64::ZAD6)
3002 .Case(S: "za7.d", Value: AArch64::ZAD7)
3003 .Case(S: "za0.s", Value: AArch64::ZAS0)
3004 .Case(S: "za1.s", Value: AArch64::ZAS1)
3005 .Case(S: "za2.s", Value: AArch64::ZAS2)
3006 .Case(S: "za3.s", Value: AArch64::ZAS3)
3007 .Case(S: "za0.h", Value: AArch64::ZAH0)
3008 .Case(S: "za1.h", Value: AArch64::ZAH1)
3009 .Case(S: "za0.b", Value: AArch64::ZAB0)
3010 .Case(S: "za0h.q", Value: AArch64::ZAQ0)
3011 .Case(S: "za1h.q", Value: AArch64::ZAQ1)
3012 .Case(S: "za2h.q", Value: AArch64::ZAQ2)
3013 .Case(S: "za3h.q", Value: AArch64::ZAQ3)
3014 .Case(S: "za4h.q", Value: AArch64::ZAQ4)
3015 .Case(S: "za5h.q", Value: AArch64::ZAQ5)
3016 .Case(S: "za6h.q", Value: AArch64::ZAQ6)
3017 .Case(S: "za7h.q", Value: AArch64::ZAQ7)
3018 .Case(S: "za8h.q", Value: AArch64::ZAQ8)
3019 .Case(S: "za9h.q", Value: AArch64::ZAQ9)
3020 .Case(S: "za10h.q", Value: AArch64::ZAQ10)
3021 .Case(S: "za11h.q", Value: AArch64::ZAQ11)
3022 .Case(S: "za12h.q", Value: AArch64::ZAQ12)
3023 .Case(S: "za13h.q", Value: AArch64::ZAQ13)
3024 .Case(S: "za14h.q", Value: AArch64::ZAQ14)
3025 .Case(S: "za15h.q", Value: AArch64::ZAQ15)
3026 .Case(S: "za0h.d", Value: AArch64::ZAD0)
3027 .Case(S: "za1h.d", Value: AArch64::ZAD1)
3028 .Case(S: "za2h.d", Value: AArch64::ZAD2)
3029 .Case(S: "za3h.d", Value: AArch64::ZAD3)
3030 .Case(S: "za4h.d", Value: AArch64::ZAD4)
3031 .Case(S: "za5h.d", Value: AArch64::ZAD5)
3032 .Case(S: "za6h.d", Value: AArch64::ZAD6)
3033 .Case(S: "za7h.d", Value: AArch64::ZAD7)
3034 .Case(S: "za0h.s", Value: AArch64::ZAS0)
3035 .Case(S: "za1h.s", Value: AArch64::ZAS1)
3036 .Case(S: "za2h.s", Value: AArch64::ZAS2)
3037 .Case(S: "za3h.s", Value: AArch64::ZAS3)
3038 .Case(S: "za0h.h", Value: AArch64::ZAH0)
3039 .Case(S: "za1h.h", Value: AArch64::ZAH1)
3040 .Case(S: "za0h.b", Value: AArch64::ZAB0)
3041 .Case(S: "za0v.q", Value: AArch64::ZAQ0)
3042 .Case(S: "za1v.q", Value: AArch64::ZAQ1)
3043 .Case(S: "za2v.q", Value: AArch64::ZAQ2)
3044 .Case(S: "za3v.q", Value: AArch64::ZAQ3)
3045 .Case(S: "za4v.q", Value: AArch64::ZAQ4)
3046 .Case(S: "za5v.q", Value: AArch64::ZAQ5)
3047 .Case(S: "za6v.q", Value: AArch64::ZAQ6)
3048 .Case(S: "za7v.q", Value: AArch64::ZAQ7)
3049 .Case(S: "za8v.q", Value: AArch64::ZAQ8)
3050 .Case(S: "za9v.q", Value: AArch64::ZAQ9)
3051 .Case(S: "za10v.q", Value: AArch64::ZAQ10)
3052 .Case(S: "za11v.q", Value: AArch64::ZAQ11)
3053 .Case(S: "za12v.q", Value: AArch64::ZAQ12)
3054 .Case(S: "za13v.q", Value: AArch64::ZAQ13)
3055 .Case(S: "za14v.q", Value: AArch64::ZAQ14)
3056 .Case(S: "za15v.q", Value: AArch64::ZAQ15)
3057 .Case(S: "za0v.d", Value: AArch64::ZAD0)
3058 .Case(S: "za1v.d", Value: AArch64::ZAD1)
3059 .Case(S: "za2v.d", Value: AArch64::ZAD2)
3060 .Case(S: "za3v.d", Value: AArch64::ZAD3)
3061 .Case(S: "za4v.d", Value: AArch64::ZAD4)
3062 .Case(S: "za5v.d", Value: AArch64::ZAD5)
3063 .Case(S: "za6v.d", Value: AArch64::ZAD6)
3064 .Case(S: "za7v.d", Value: AArch64::ZAD7)
3065 .Case(S: "za0v.s", Value: AArch64::ZAS0)
3066 .Case(S: "za1v.s", Value: AArch64::ZAS1)
3067 .Case(S: "za2v.s", Value: AArch64::ZAS2)
3068 .Case(S: "za3v.s", Value: AArch64::ZAS3)
3069 .Case(S: "za0v.h", Value: AArch64::ZAH0)
3070 .Case(S: "za1v.h", Value: AArch64::ZAH1)
3071 .Case(S: "za0v.b", Value: AArch64::ZAB0)
3072 .Default(Value: 0);
3073}
3074
3075bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
3076 SMLoc &EndLoc) {
3077 return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess();
3078}
3079
3080ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
3081 SMLoc &EndLoc) {
3082 StartLoc = getLoc();
3083 ParseStatus Res = tryParseScalarRegister(Reg);
3084 EndLoc = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
3085 return Res;
3086}
3087
3088// Matches a register name or register alias previously defined by '.req'
3089MCRegister AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
3090 RegKind Kind) {
3091 MCRegister Reg = MCRegister();
3092 if ((Reg = matchSVEDataVectorRegName(Name)))
3093 return Kind == RegKind::SVEDataVector ? Reg : MCRegister();
3094
3095 if ((Reg = matchSVEPredicateVectorRegName(Name)))
3096 return Kind == RegKind::SVEPredicateVector ? Reg : MCRegister();
3097
3098 if ((Reg = matchSVEPredicateAsCounterRegName(Name)))
3099 return Kind == RegKind::SVEPredicateAsCounter ? Reg : MCRegister();
3100
3101 if ((Reg = MatchNeonVectorRegName(Name)))
3102 return Kind == RegKind::NeonVector ? Reg : MCRegister();
3103
3104 if ((Reg = matchMatrixRegName(Name)))
3105 return Kind == RegKind::Matrix ? Reg : MCRegister();
3106
3107 if (Name.equals_insensitive(RHS: "zt0"))
3108 return Kind == RegKind::LookupTable ? unsigned(AArch64::ZT0) : 0;
3109
3110 // The parsed register must be of RegKind Scalar
3111 if ((Reg = MatchRegisterName(Name)))
3112 return (Kind == RegKind::Scalar) ? Reg : MCRegister();
3113
3114 if (!Reg) {
3115 // Handle a few common aliases of registers.
3116 if (MCRegister Reg = StringSwitch<unsigned>(Name.lower())
3117 .Case(S: "fp", Value: AArch64::FP)
3118 .Case(S: "lr", Value: AArch64::LR)
3119 .Case(S: "x31", Value: AArch64::XZR)
3120 .Case(S: "w31", Value: AArch64::WZR)
3121 .Default(Value: 0))
3122 return Kind == RegKind::Scalar ? Reg : MCRegister();
3123
3124 // Check for aliases registered via .req. Canonicalize to lower case.
3125 // That's more consistent since register names are case insensitive, and
3126 // it's how the original entry was passed in from MC/MCParser/AsmParser.
3127 auto Entry = RegisterReqs.find(Key: Name.lower());
3128 if (Entry == RegisterReqs.end())
3129 return MCRegister();
3130
3131 // set Reg if the match is the right kind of register
3132 if (Kind == Entry->getValue().first)
3133 Reg = Entry->getValue().second;
3134 }
3135 return Reg;
3136}
3137
3138unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
3139 switch (K) {
3140 case RegKind::Scalar:
3141 case RegKind::NeonVector:
3142 case RegKind::SVEDataVector:
3143 return 32;
3144 case RegKind::Matrix:
3145 case RegKind::SVEPredicateVector:
3146 case RegKind::SVEPredicateAsCounter:
3147 return 16;
3148 case RegKind::LookupTable:
3149 return 1;
3150 }
3151 llvm_unreachable("Unsupported RegKind");
3152}
3153
3154/// tryParseScalarRegister - Try to parse a register name. The token must be an
3155/// Identifier when called, and if it is a register name the token is eaten and
3156/// the register is added to the operand list.
3157ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
3158 const AsmToken &Tok = getTok();
3159 if (Tok.isNot(K: AsmToken::Identifier))
3160 return ParseStatus::NoMatch;
3161
3162 std::string lowerCase = Tok.getString().lower();
3163 MCRegister Reg = matchRegisterNameAlias(Name: lowerCase, Kind: RegKind::Scalar);
3164 if (!Reg)
3165 return ParseStatus::NoMatch;
3166
3167 RegNum = Reg;
3168 Lex(); // Eat identifier token.
3169 return ParseStatus::Success;
3170}
3171
3172/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
3173ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
3174 SMLoc S = getLoc();
3175
3176 if (getTok().isNot(K: AsmToken::Identifier))
3177 return Error(L: S, Msg: "Expected cN operand where 0 <= N <= 15");
3178
3179 StringRef Tok = getTok().getIdentifier();
3180 if (Tok[0] != 'c' && Tok[0] != 'C')
3181 return Error(L: S, Msg: "Expected cN operand where 0 <= N <= 15");
3182
3183 uint32_t CRNum;
3184 bool BadNum = Tok.drop_front().getAsInteger(Radix: 10, Result&: CRNum);
3185 if (BadNum || CRNum > 15)
3186 return Error(L: S, Msg: "Expected cN operand where 0 <= N <= 15");
3187
3188 Lex(); // Eat identifier token.
3189 Operands.push_back(
3190 Elt: AArch64Operand::CreateSysCR(Val: CRNum, S, E: getLoc(), Ctx&: getContext()));
3191 return ParseStatus::Success;
3192}
3193
3194// Either an identifier for named values or a 6-bit immediate.
3195ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
3196 SMLoc S = getLoc();
3197 const AsmToken &Tok = getTok();
3198
3199 unsigned MaxVal = 63;
3200
3201 // Immediate case, with optional leading hash:
3202 if (parseOptionalToken(T: AsmToken::Hash) ||
3203 Tok.is(K: AsmToken::Integer)) {
3204 const MCExpr *ImmVal;
3205 if (getParser().parseExpression(Res&: ImmVal))
3206 return ParseStatus::Failure;
3207
3208 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
3209 if (!MCE)
3210 return TokError(Msg: "immediate value expected for prefetch operand");
3211 unsigned prfop = MCE->getValue();
3212 if (prfop > MaxVal)
3213 return TokError(Msg: "prefetch operand out of range, [0," + utostr(X: MaxVal) +
3214 "] expected");
3215
3216 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(Encoding: MCE->getValue());
3217 Operands.push_back(Elt: AArch64Operand::CreatePrefetch(
3218 Val: prfop, Str: RPRFM ? RPRFM->Name : "", S, Ctx&: getContext()));
3219 return ParseStatus::Success;
3220 }
3221
3222 if (Tok.isNot(K: AsmToken::Identifier))
3223 return TokError(Msg: "prefetch hint expected");
3224
3225 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Name: Tok.getString());
3226 if (!RPRFM)
3227 return TokError(Msg: "prefetch hint expected");
3228
3229 Operands.push_back(Elt: AArch64Operand::CreatePrefetch(
3230 Val: RPRFM->Encoding, Str: Tok.getString(), S, Ctx&: getContext()));
3231 Lex(); // Eat identifier token.
3232 return ParseStatus::Success;
3233}
3234
3235/// tryParsePrefetch - Try to parse a prefetch operand.
3236template <bool IsSVEPrefetch>
3237ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3238 SMLoc S = getLoc();
3239 const AsmToken &Tok = getTok();
3240
3241 auto LookupByName = [](StringRef N) {
3242 if (IsSVEPrefetch) {
3243 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(Name: N))
3244 return std::optional<unsigned>(Res->Encoding);
3245 } else if (auto Res = AArch64PRFM::lookupPRFMByName(Name: N))
3246 return std::optional<unsigned>(Res->Encoding);
3247 return std::optional<unsigned>();
3248 };
3249
3250 auto LookupByEncoding = [](unsigned E) {
3251 if (IsSVEPrefetch) {
3252 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(Encoding: E))
3253 return std::optional<StringRef>(Res->Name);
3254 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(Encoding: E))
3255 return std::optional<StringRef>(Res->Name);
3256 return std::optional<StringRef>();
3257 };
3258 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3259
3260 // Either an identifier for named values or a 5-bit immediate.
3261 // Eat optional hash.
3262 if (parseOptionalToken(T: AsmToken::Hash) ||
3263 Tok.is(K: AsmToken::Integer)) {
3264 const MCExpr *ImmVal;
3265 if (getParser().parseExpression(Res&: ImmVal))
3266 return ParseStatus::Failure;
3267
3268 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
3269 if (!MCE)
3270 return TokError(Msg: "immediate value expected for prefetch operand");
3271 unsigned prfop = MCE->getValue();
3272 if (prfop > MaxVal)
3273 return TokError(Msg: "prefetch operand out of range, [0," + utostr(X: MaxVal) +
3274 "] expected");
3275
3276 auto PRFM = LookupByEncoding(MCE->getValue());
3277 Operands.push_back(AArch64Operand::CreatePrefetch(Val: prfop, Str: PRFM.value_or(""),
3278 S, Ctx&: getContext()));
3279 return ParseStatus::Success;
3280 }
3281
3282 if (Tok.isNot(K: AsmToken::Identifier))
3283 return TokError(Msg: "prefetch hint expected");
3284
3285 auto PRFM = LookupByName(Tok.getString());
3286 if (!PRFM)
3287 return TokError(Msg: "prefetch hint expected");
3288
3289 Operands.push_back(AArch64Operand::CreatePrefetch(
3290 Val: *PRFM, Str: Tok.getString(), S, Ctx&: getContext()));
3291 Lex(); // Eat identifier token.
3292 return ParseStatus::Success;
3293}
3294
3295/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3296ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3297 SMLoc S = getLoc();
3298 const AsmToken &Tok = getTok();
3299 if (Tok.isNot(K: AsmToken::Identifier))
3300 return TokError(Msg: "invalid operand for instruction");
3301
3302 auto PSB = AArch64PSBHint::lookupPSBByName(Name: Tok.getString());
3303 if (!PSB)
3304 return TokError(Msg: "invalid operand for instruction");
3305
3306 Operands.push_back(Elt: AArch64Operand::CreatePSBHint(
3307 Val: PSB->Encoding, Str: Tok.getString(), S, Ctx&: getContext()));
3308 Lex(); // Eat identifier token.
3309 return ParseStatus::Success;
3310}
3311
3312ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3313 SMLoc StartLoc = getLoc();
3314
3315 MCRegister RegNum;
3316
3317 // The case where xzr, xzr is not present is handled by an InstAlias.
3318
3319 auto RegTok = getTok(); // in case we need to backtrack
3320 if (!tryParseScalarRegister(RegNum).isSuccess())
3321 return ParseStatus::NoMatch;
3322
3323 if (RegNum != AArch64::XZR) {
3324 getLexer().UnLex(Token: RegTok);
3325 return ParseStatus::NoMatch;
3326 }
3327
3328 if (parseComma())
3329 return ParseStatus::Failure;
3330
3331 if (!tryParseScalarRegister(RegNum).isSuccess())
3332 return TokError(Msg: "expected register operand");
3333
3334 if (RegNum != AArch64::XZR)
3335 return TokError(Msg: "xzr must be followed by xzr");
3336
3337 // We need to push something, since we claim this is an operand in .td.
3338 // See also AArch64AsmParser::parseKeywordOperand.
3339 Operands.push_back(Elt: AArch64Operand::CreateReg(
3340 Reg: RegNum, Kind: RegKind::Scalar, S: StartLoc, E: getLoc(), Ctx&: getContext()));
3341
3342 return ParseStatus::Success;
3343}
3344
3345/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3346ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3347 SMLoc S = getLoc();
3348 const AsmToken &Tok = getTok();
3349 if (Tok.isNot(K: AsmToken::Identifier))
3350 return TokError(Msg: "invalid operand for instruction");
3351
3352 auto BTI = AArch64BTIHint::lookupBTIByName(Name: Tok.getString());
3353 if (!BTI)
3354 return TokError(Msg: "invalid operand for instruction");
3355
3356 Operands.push_back(Elt: AArch64Operand::CreateBTIHint(
3357 Val: BTI->Encoding, Str: Tok.getString(), S, Ctx&: getContext()));
3358 Lex(); // Eat identifier token.
3359 return ParseStatus::Success;
3360}
3361
3362/// tryParseCMHPriorityHint - Try to parse a CMHPriority operand
3363ParseStatus AArch64AsmParser::tryParseCMHPriorityHint(OperandVector &Operands) {
3364 SMLoc S = getLoc();
3365 const AsmToken &Tok = getTok();
3366 if (Tok.isNot(K: AsmToken::Identifier))
3367 return TokError(Msg: "invalid operand for instruction");
3368
3369 auto CMHPriority =
3370 AArch64CMHPriorityHint::lookupCMHPriorityHintByName(Name: Tok.getString());
3371 if (!CMHPriority)
3372 return TokError(Msg: "invalid operand for instruction");
3373
3374 Operands.push_back(Elt: AArch64Operand::CreateCMHPriorityHint(
3375 Val: CMHPriority->Encoding, Str: Tok.getString(), S, Ctx&: getContext()));
3376 Lex(); // Eat identifier token.
3377 return ParseStatus::Success;
3378}
3379
3380/// tryParseTIndexHint - Try to parse a TIndex operand
3381ParseStatus AArch64AsmParser::tryParseTIndexHint(OperandVector &Operands) {
3382 SMLoc S = getLoc();
3383 const AsmToken &Tok = getTok();
3384 if (Tok.isNot(K: AsmToken::Identifier))
3385 return TokError(Msg: "invalid operand for instruction");
3386
3387 auto TIndex = AArch64TIndexHint::lookupTIndexByName(Name: Tok.getString());
3388 if (!TIndex)
3389 return TokError(Msg: "invalid operand for instruction");
3390
3391 Operands.push_back(Elt: AArch64Operand::CreateTIndexHint(
3392 Val: TIndex->Encoding, Str: Tok.getString(), S, Ctx&: getContext()));
3393 Lex(); // Eat identifier token.
3394 return ParseStatus::Success;
3395}
3396
3397/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3398/// instruction.
3399ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3400 SMLoc S = getLoc();
3401 const MCExpr *Expr = nullptr;
3402
3403 if (getTok().is(K: AsmToken::Hash)) {
3404 Lex(); // Eat hash token.
3405 }
3406
3407 if (parseSymbolicImmVal(ImmVal&: Expr))
3408 return ParseStatus::Failure;
3409
3410 AArch64::Specifier ELFSpec;
3411 AArch64::Specifier DarwinSpec;
3412 int64_t Addend;
3413 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3414 if (DarwinSpec == AArch64::S_None && ELFSpec == AArch64::S_INVALID) {
3415 // No modifier was specified at all; this is the syntax for an ELF basic
3416 // ADRP relocation (unfortunately).
3417 Expr =
3418 MCSpecifierExpr::create(Expr, S: AArch64::S_ABS_PAGE, Ctx&: getContext(), Loc: S);
3419 } else if ((DarwinSpec == AArch64::S_MACHO_GOTPAGE ||
3420 DarwinSpec == AArch64::S_MACHO_TLVPPAGE) &&
3421 Addend != 0) {
3422 return Error(L: S, Msg: "gotpage label reference not allowed an addend");
3423 } else if (DarwinSpec != AArch64::S_MACHO_PAGE &&
3424 DarwinSpec != AArch64::S_MACHO_GOTPAGE &&
3425 DarwinSpec != AArch64::S_MACHO_TLVPPAGE &&
3426 ELFSpec != AArch64::S_ABS_PAGE_NC &&
3427 ELFSpec != AArch64::S_GOT_PAGE &&
3428 ELFSpec != AArch64::S_GOT_AUTH_PAGE &&
3429 ELFSpec != AArch64::S_GOT_PAGE_LO15 &&
3430 ELFSpec != AArch64::S_GOTTPREL_PAGE &&
3431 ELFSpec != AArch64::S_TLSDESC_PAGE &&
3432 ELFSpec != AArch64::S_TLSDESC_AUTH_PAGE) {
3433 // The operand must be an @page or @gotpage qualified symbolref.
3434 return Error(L: S, Msg: "page or gotpage label reference expected");
3435 }
3436 }
3437
3438 // We have either a label reference possibly with addend or an immediate. The
3439 // addend is a raw value here. The linker will adjust it to only reference the
3440 // page.
3441 SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
3442 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: Expr, S, E, Ctx&: getContext()));
3443
3444 return ParseStatus::Success;
3445}
3446
3447/// tryParseAdrLabel - Parse and validate a source label for the ADR
3448/// instruction.
3449ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3450 SMLoc S = getLoc();
3451 const MCExpr *Expr = nullptr;
3452
3453 // Leave anything with a bracket to the default for SVE
3454 if (getTok().is(K: AsmToken::LBrac))
3455 return ParseStatus::NoMatch;
3456
3457 if (getTok().is(K: AsmToken::Hash))
3458 Lex(); // Eat hash token.
3459
3460 if (parseSymbolicImmVal(ImmVal&: Expr))
3461 return ParseStatus::Failure;
3462
3463 AArch64::Specifier ELFSpec;
3464 AArch64::Specifier DarwinSpec;
3465 int64_t Addend;
3466 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3467 if (DarwinSpec == AArch64::S_None && ELFSpec == AArch64::S_INVALID) {
3468 // No modifier was specified at all; this is the syntax for an ELF basic
3469 // ADR relocation (unfortunately).
3470 Expr = MCSpecifierExpr::create(Expr, S: AArch64::S_ABS, Ctx&: getContext(), Loc: S);
3471 } else if (ELFSpec != AArch64::S_GOT_AUTH_PAGE) {
3472 // For tiny code model, we use :got_auth: operator to fill 21-bit imm of
3473 // adr. It's not actually GOT entry page address but the GOT address
3474 // itself - we just share the same variant kind with :got_auth: operator
3475 // applied for adrp.
3476 // TODO: can we somehow get current TargetMachine object to call
3477 // getCodeModel() on it to ensure we are using tiny code model?
3478 return Error(L: S, Msg: "unexpected adr label");
3479 }
3480 }
3481
3482 SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
3483 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: Expr, S, E, Ctx&: getContext()));
3484 return ParseStatus::Success;
3485}
3486
3487/// tryParseFPImm - A floating point immediate expression operand.
3488template <bool AddFPZeroAsLiteral>
3489ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3490 SMLoc S = getLoc();
3491
3492 bool Hash = parseOptionalToken(T: AsmToken::Hash);
3493
3494 // Handle negation, as that still comes through as a separate token.
3495 bool isNegative = parseOptionalToken(T: AsmToken::Minus);
3496
3497 const AsmToken &Tok = getTok();
3498 if (!Tok.is(K: AsmToken::Real) && !Tok.is(K: AsmToken::Integer)) {
3499 if (!Hash)
3500 return ParseStatus::NoMatch;
3501 return TokError(Msg: "invalid floating point immediate");
3502 }
3503
3504 // Parse hexadecimal representation.
3505 if (Tok.is(K: AsmToken::Integer) && Tok.getString().starts_with(Prefix: "0x")) {
3506 if (Tok.getIntVal() > 255 || isNegative)
3507 return TokError(Msg: "encoded floating point value out of range");
3508
3509 APFloat F((double)AArch64_AM::getFPImmFloat(Imm: Tok.getIntVal()));
3510 Operands.push_back(
3511 Elt: AArch64Operand::CreateFPImm(Val: F, IsExact: true, S, Ctx&: getContext()));
3512 } else {
3513 // Parse FP representation.
3514 APFloat RealVal(APFloat::IEEEdouble());
3515 auto StatusOrErr =
3516 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3517 if (errorToBool(Err: StatusOrErr.takeError()))
3518 return TokError(Msg: "invalid floating point representation");
3519
3520 if (isNegative)
3521 RealVal.changeSign();
3522
3523 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3524 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: "#0", S, Ctx&: getContext()));
3525 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: ".0", S, Ctx&: getContext()));
3526 } else
3527 Operands.push_back(Elt: AArch64Operand::CreateFPImm(
3528 Val: RealVal, IsExact: *StatusOrErr == APFloat::opOK, S, Ctx&: getContext()));
3529 }
3530
3531 Lex(); // Eat the token.
3532
3533 return ParseStatus::Success;
3534}
3535
3536/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3537/// a shift suffix, for example '#1, lsl #12'.
3538ParseStatus
3539AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3540 SMLoc S = getLoc();
3541
3542 if (getTok().is(K: AsmToken::Hash))
3543 Lex(); // Eat '#'
3544 else if (getTok().isNot(K: AsmToken::Integer))
3545 // Operand should start from # or should be integer, emit error otherwise.
3546 return ParseStatus::NoMatch;
3547
3548 if (getTok().is(K: AsmToken::Integer) &&
3549 getLexer().peekTok().is(K: AsmToken::Colon))
3550 return tryParseImmRange(Operands);
3551
3552 const MCExpr *Imm = nullptr;
3553 if (parseSymbolicImmVal(ImmVal&: Imm))
3554 return ParseStatus::Failure;
3555 else if (getTok().isNot(K: AsmToken::Comma)) {
3556 Operands.push_back(
3557 Elt: AArch64Operand::CreateImm(Val: Imm, S, E: getLoc(), Ctx&: getContext()));
3558 return ParseStatus::Success;
3559 }
3560
3561 // Eat ','
3562 Lex();
3563 StringRef VecGroup;
3564 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3565 Operands.push_back(
3566 Elt: AArch64Operand::CreateImm(Val: Imm, S, E: getLoc(), Ctx&: getContext()));
3567 Operands.push_back(
3568 Elt: AArch64Operand::CreateToken(Str: VecGroup, S: getLoc(), Ctx&: getContext()));
3569 return ParseStatus::Success;
3570 }
3571
3572 // The optional operand must be "lsl #N" where N is non-negative.
3573 if (!getTok().is(K: AsmToken::Identifier) ||
3574 !getTok().getIdentifier().equals_insensitive(RHS: "lsl"))
3575 return Error(L: getLoc(), Msg: "only 'lsl #+N' valid after immediate");
3576
3577 // Eat 'lsl'
3578 Lex();
3579
3580 parseOptionalToken(T: AsmToken::Hash);
3581
3582 if (getTok().isNot(K: AsmToken::Integer))
3583 return Error(L: getLoc(), Msg: "only 'lsl #+N' valid after immediate");
3584
3585 int64_t ShiftAmount = getTok().getIntVal();
3586
3587 if (ShiftAmount < 0)
3588 return Error(L: getLoc(), Msg: "positive shift amount required");
3589 Lex(); // Eat the number
3590
3591 // Just in case the optional lsl #0 is used for immediates other than zero.
3592 if (ShiftAmount == 0 && Imm != nullptr) {
3593 Operands.push_back(
3594 Elt: AArch64Operand::CreateImm(Val: Imm, S, E: getLoc(), Ctx&: getContext()));
3595 return ParseStatus::Success;
3596 }
3597
3598 Operands.push_back(Elt: AArch64Operand::CreateShiftedImm(Val: Imm, ShiftAmount, S,
3599 E: getLoc(), Ctx&: getContext()));
3600 return ParseStatus::Success;
3601}
3602
3603/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3604/// suggestion to help common typos.
3605AArch64CC::CondCode
3606AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3607 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3608 .Case(S: "eq", Value: AArch64CC::EQ)
3609 .Case(S: "ne", Value: AArch64CC::NE)
3610 .Case(S: "cs", Value: AArch64CC::HS)
3611 .Case(S: "hs", Value: AArch64CC::HS)
3612 .Case(S: "cc", Value: AArch64CC::LO)
3613 .Case(S: "lo", Value: AArch64CC::LO)
3614 .Case(S: "mi", Value: AArch64CC::MI)
3615 .Case(S: "pl", Value: AArch64CC::PL)
3616 .Case(S: "vs", Value: AArch64CC::VS)
3617 .Case(S: "vc", Value: AArch64CC::VC)
3618 .Case(S: "hi", Value: AArch64CC::HI)
3619 .Case(S: "ls", Value: AArch64CC::LS)
3620 .Case(S: "ge", Value: AArch64CC::GE)
3621 .Case(S: "lt", Value: AArch64CC::LT)
3622 .Case(S: "gt", Value: AArch64CC::GT)
3623 .Case(S: "le", Value: AArch64CC::LE)
3624 .Case(S: "al", Value: AArch64CC::AL)
3625 .Case(S: "nv", Value: AArch64CC::NV)
3626 // SVE condition code aliases:
3627 .Case(S: "none", Value: AArch64CC::EQ)
3628 .Case(S: "any", Value: AArch64CC::NE)
3629 .Case(S: "nlast", Value: AArch64CC::HS)
3630 .Case(S: "last", Value: AArch64CC::LO)
3631 .Case(S: "first", Value: AArch64CC::MI)
3632 .Case(S: "nfrst", Value: AArch64CC::PL)
3633 .Case(S: "pmore", Value: AArch64CC::HI)
3634 .Case(S: "plast", Value: AArch64CC::LS)
3635 .Case(S: "tcont", Value: AArch64CC::GE)
3636 .Case(S: "tstop", Value: AArch64CC::LT)
3637 .Default(Value: AArch64CC::Invalid);
3638
3639 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3640 Suggestion = "nfrst";
3641
3642 return CC;
3643}
3644
3645/// parseCondCode - Parse a Condition Code operand.
3646bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3647 bool invertCondCode) {
3648 SMLoc S = getLoc();
3649 const AsmToken &Tok = getTok();
3650 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3651
3652 StringRef Cond = Tok.getString();
3653 std::string Suggestion;
3654 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3655 if (CC == AArch64CC::Invalid) {
3656 std::string Msg = "invalid condition code";
3657 if (!Suggestion.empty())
3658 Msg += ", did you mean " + Suggestion + "?";
3659 return TokError(Msg);
3660 }
3661 Lex(); // Eat identifier token.
3662
3663 if (invertCondCode) {
3664 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3665 return TokError(Msg: "condition codes AL and NV are invalid for this instruction");
3666 CC = AArch64CC::getInvertedCondCode(Code: AArch64CC::CondCode(CC));
3667 }
3668
3669 Operands.push_back(
3670 Elt: AArch64Operand::CreateCondCode(Code: CC, S, E: getLoc(), Ctx&: getContext()));
3671 return false;
3672}
3673
3674ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3675 const AsmToken &Tok = getTok();
3676 SMLoc S = getLoc();
3677
3678 if (Tok.isNot(K: AsmToken::Identifier))
3679 return TokError(Msg: "invalid operand for instruction");
3680
3681 unsigned PStateImm = -1;
3682 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Name: Tok.getString());
3683 if (!SVCR)
3684 return ParseStatus::NoMatch;
3685 if (SVCR->haveFeatures(ActiveFeatures: getSTI().getFeatureBits()))
3686 PStateImm = SVCR->Encoding;
3687
3688 Operands.push_back(
3689 Elt: AArch64Operand::CreateSVCR(PStateField: PStateImm, Str: Tok.getString(), S, Ctx&: getContext()));
3690 Lex(); // Eat identifier token.
3691 return ParseStatus::Success;
3692}
3693
3694ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3695 const AsmToken &Tok = getTok();
3696 SMLoc S = getLoc();
3697
3698 StringRef Name = Tok.getString();
3699
3700 if (Name.equals_insensitive(RHS: "za") || Name.starts_with_insensitive(Prefix: "za.")) {
3701 Lex(); // eat "za[.(b|h|s|d)]"
3702 unsigned ElementWidth = 0;
3703 auto DotPosition = Name.find(C: '.');
3704 if (DotPosition != StringRef::npos) {
3705 const auto &KindRes =
3706 parseVectorKind(Suffix: Name.drop_front(N: DotPosition), VectorKind: RegKind::Matrix);
3707 if (!KindRes)
3708 return TokError(
3709 Msg: "Expected the register to be followed by element width suffix");
3710 ElementWidth = KindRes->second;
3711 }
3712 Operands.push_back(Elt: AArch64Operand::CreateMatrixRegister(
3713 Reg: AArch64::ZA, ElementWidth, Kind: MatrixKind::Array, S, E: getLoc(),
3714 Ctx&: getContext()));
3715 if (getLexer().is(K: AsmToken::LBrac)) {
3716 // There's no comma after matrix operand, so we can parse the next operand
3717 // immediately.
3718 if (parseOperand(Operands, isCondCode: false, invertCondCode: false))
3719 return ParseStatus::NoMatch;
3720 }
3721 return ParseStatus::Success;
3722 }
3723
3724 // Try to parse matrix register.
3725 MCRegister Reg = matchRegisterNameAlias(Name, Kind: RegKind::Matrix);
3726 if (!Reg)
3727 return ParseStatus::NoMatch;
3728
3729 size_t DotPosition = Name.find(C: '.');
3730 assert(DotPosition != StringRef::npos && "Unexpected register");
3731
3732 StringRef Head = Name.take_front(N: DotPosition);
3733 StringRef Tail = Name.drop_front(N: DotPosition);
3734 StringRef RowOrColumn = Head.take_back();
3735
3736 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3737 .Case(S: "h", Value: MatrixKind::Row)
3738 .Case(S: "v", Value: MatrixKind::Col)
3739 .Default(Value: MatrixKind::Tile);
3740
3741 // Next up, parsing the suffix
3742 const auto &KindRes = parseVectorKind(Suffix: Tail, VectorKind: RegKind::Matrix);
3743 if (!KindRes)
3744 return TokError(
3745 Msg: "Expected the register to be followed by element width suffix");
3746 unsigned ElementWidth = KindRes->second;
3747
3748 Lex();
3749
3750 Operands.push_back(Elt: AArch64Operand::CreateMatrixRegister(
3751 Reg, ElementWidth, Kind, S, E: getLoc(), Ctx&: getContext()));
3752
3753 if (getLexer().is(K: AsmToken::LBrac)) {
3754 // There's no comma after matrix operand, so we can parse the next operand
3755 // immediately.
3756 if (parseOperand(Operands, isCondCode: false, invertCondCode: false))
3757 return ParseStatus::NoMatch;
3758 }
3759 return ParseStatus::Success;
3760}
3761
3762/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3763/// them if present.
3764ParseStatus
3765AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3766 const AsmToken &Tok = getTok();
3767 std::string LowerID = Tok.getString().lower();
3768 AArch64_AM::ShiftExtendType ShOp =
3769 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3770 .Case(S: "lsl", Value: AArch64_AM::LSL)
3771 .Case(S: "lsr", Value: AArch64_AM::LSR)
3772 .Case(S: "asr", Value: AArch64_AM::ASR)
3773 .Case(S: "ror", Value: AArch64_AM::ROR)
3774 .Case(S: "msl", Value: AArch64_AM::MSL)
3775 .Case(S: "uxtb", Value: AArch64_AM::UXTB)
3776 .Case(S: "uxth", Value: AArch64_AM::UXTH)
3777 .Case(S: "uxtw", Value: AArch64_AM::UXTW)
3778 .Case(S: "uxtx", Value: AArch64_AM::UXTX)
3779 .Case(S: "sxtb", Value: AArch64_AM::SXTB)
3780 .Case(S: "sxth", Value: AArch64_AM::SXTH)
3781 .Case(S: "sxtw", Value: AArch64_AM::SXTW)
3782 .Case(S: "sxtx", Value: AArch64_AM::SXTX)
3783 .Default(Value: AArch64_AM::InvalidShiftExtend);
3784
3785 if (ShOp == AArch64_AM::InvalidShiftExtend)
3786 return ParseStatus::NoMatch;
3787
3788 SMLoc S = Tok.getLoc();
3789 Lex();
3790
3791 bool Hash = parseOptionalToken(T: AsmToken::Hash);
3792
3793 if (!Hash && getLexer().isNot(K: AsmToken::Integer)) {
3794 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3795 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3796 ShOp == AArch64_AM::MSL) {
3797 // We expect a number here.
3798 return TokError(Msg: "expected #imm after shift specifier");
3799 }
3800
3801 // "extend" type operations don't need an immediate, #0 is implicit.
3802 SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
3803 Operands.push_back(
3804 Elt: AArch64Operand::CreateShiftExtend(ShOp, Val: 0, HasExplicitAmount: false, S, E, Ctx&: getContext()));
3805 return ParseStatus::Success;
3806 }
3807
3808 // Make sure we do actually have a number, identifier or a parenthesized
3809 // expression.
3810 SMLoc E = getLoc();
3811 if (!getTok().is(K: AsmToken::Integer) && !getTok().is(K: AsmToken::LParen) &&
3812 !getTok().is(K: AsmToken::Identifier))
3813 return Error(L: E, Msg: "expected integer shift amount");
3814
3815 const MCExpr *ImmVal;
3816 if (getParser().parseExpression(Res&: ImmVal))
3817 return ParseStatus::Failure;
3818
3819 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
3820 if (!MCE)
3821 return Error(L: E, Msg: "expected constant '#imm' after shift specifier");
3822
3823 E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
3824 Operands.push_back(Elt: AArch64Operand::CreateShiftExtend(
3825 ShOp, Val: MCE->getValue(), HasExplicitAmount: true, S, E, Ctx&: getContext()));
3826 return ParseStatus::Success;
3827}
3828
3829static const struct Extension {
3830 const char *Name;
3831 const FeatureBitset Features;
3832} ExtensionMap[] = {
3833 {.Name: "crc", .Features: {AArch64::FeatureCRC}},
3834 {.Name: "sm4", .Features: {AArch64::FeatureSM4}},
3835 {.Name: "sha3", .Features: {AArch64::FeatureSHA3}},
3836 {.Name: "sha2", .Features: {AArch64::FeatureSHA2}},
3837 {.Name: "aes", .Features: {AArch64::FeatureAES}},
3838 {.Name: "crypto", .Features: {AArch64::FeatureCrypto}},
3839 {.Name: "fp", .Features: {AArch64::FeatureFPARMv8}},
3840 {.Name: "simd", .Features: {AArch64::FeatureNEON}},
3841 {.Name: "ras", .Features: {AArch64::FeatureRAS}},
3842 {.Name: "rasv2", .Features: {AArch64::FeatureRASv2}},
3843 {.Name: "lse", .Features: {AArch64::FeatureLSE}},
3844 {.Name: "predres", .Features: {AArch64::FeaturePredRes}},
3845 {.Name: "predres2", .Features: {AArch64::FeatureSPECRES2}},
3846 {.Name: "ccdp", .Features: {AArch64::FeatureCacheDeepPersist}},
3847 {.Name: "mte", .Features: {AArch64::FeatureMTE}},
3848 {.Name: "memtag", .Features: {AArch64::FeatureMTE}},
3849 {.Name: "tlb-rmi", .Features: {AArch64::FeatureTLB_RMI}},
3850 {.Name: "pan", .Features: {AArch64::FeaturePAN}},
3851 {.Name: "pan-rwv", .Features: {AArch64::FeaturePAN_RWV}},
3852 {.Name: "ccpp", .Features: {AArch64::FeatureCCPP}},
3853 {.Name: "rcpc", .Features: {AArch64::FeatureRCPC}},
3854 {.Name: "rng", .Features: {AArch64::FeatureRandGen}},
3855 {.Name: "sve", .Features: {AArch64::FeatureSVE}},
3856 {.Name: "sve-b16b16", .Features: {AArch64::FeatureSVEB16B16}},
3857 {.Name: "sve2", .Features: {AArch64::FeatureSVE2}},
3858 {.Name: "sve-aes", .Features: {AArch64::FeatureSVEAES}},
3859 {.Name: "sve2-aes", .Features: {AArch64::FeatureAliasSVE2AES, AArch64::FeatureSVEAES}},
3860 {.Name: "sve-sm4", .Features: {AArch64::FeatureSVESM4}},
3861 {.Name: "sve2-sm4", .Features: {AArch64::FeatureAliasSVE2SM4, AArch64::FeatureSVESM4}},
3862 {.Name: "sve-sha3", .Features: {AArch64::FeatureSVESHA3}},
3863 {.Name: "sve2-sha3", .Features: {AArch64::FeatureAliasSVE2SHA3, AArch64::FeatureSVESHA3}},
3864 {.Name: "sve-bitperm", .Features: {AArch64::FeatureSVEBitPerm}},
3865 {.Name: "sve2-bitperm",
3866 .Features: {AArch64::FeatureAliasSVE2BitPerm, AArch64::FeatureSVEBitPerm,
3867 AArch64::FeatureSVE2}},
3868 {.Name: "sve2p1", .Features: {AArch64::FeatureSVE2p1}},
3869 {.Name: "ls64", .Features: {AArch64::FeatureLS64}},
3870 {.Name: "xs", .Features: {AArch64::FeatureXS}},
3871 {.Name: "pauth", .Features: {AArch64::FeaturePAuth}},
3872 {.Name: "flagm", .Features: {AArch64::FeatureFlagM}},
3873 {.Name: "rme", .Features: {AArch64::FeatureRME}},
3874 {.Name: "sme", .Features: {AArch64::FeatureSME}},
3875 {.Name: "sme-f64f64", .Features: {AArch64::FeatureSMEF64F64}},
3876 {.Name: "sme-f16f16", .Features: {AArch64::FeatureSMEF16F16}},
3877 {.Name: "sme-i16i64", .Features: {AArch64::FeatureSMEI16I64}},
3878 {.Name: "sme2", .Features: {AArch64::FeatureSME2}},
3879 {.Name: "sme2p1", .Features: {AArch64::FeatureSME2p1}},
3880 {.Name: "sme-b16b16", .Features: {AArch64::FeatureSMEB16B16}},
3881 {.Name: "hbc", .Features: {AArch64::FeatureHBC}},
3882 {.Name: "mops", .Features: {AArch64::FeatureMOPS}},
3883 {.Name: "mec", .Features: {AArch64::FeatureMEC}},
3884 {.Name: "the", .Features: {AArch64::FeatureTHE}},
3885 {.Name: "d128", .Features: {AArch64::FeatureD128}},
3886 {.Name: "lse128", .Features: {AArch64::FeatureLSE128}},
3887 {.Name: "ite", .Features: {AArch64::FeatureITE}},
3888 {.Name: "cssc", .Features: {AArch64::FeatureCSSC}},
3889 {.Name: "rcpc3", .Features: {AArch64::FeatureRCPC3}},
3890 {.Name: "gcs", .Features: {AArch64::FeatureGCS}},
3891 {.Name: "bf16", .Features: {AArch64::FeatureBF16}},
3892 {.Name: "compnum", .Features: {AArch64::FeatureComplxNum}},
3893 {.Name: "dotprod", .Features: {AArch64::FeatureDotProd}},
3894 {.Name: "f32mm", .Features: {AArch64::FeatureMatMulFP32}},
3895 {.Name: "f64mm", .Features: {AArch64::FeatureMatMulFP64}},
3896 {.Name: "fp16", .Features: {AArch64::FeatureFullFP16}},
3897 {.Name: "fp16fml", .Features: {AArch64::FeatureFP16FML}},
3898 {.Name: "i8mm", .Features: {AArch64::FeatureMatMulInt8}},
3899 {.Name: "lor", .Features: {AArch64::FeatureLOR}},
3900 {.Name: "profile", .Features: {AArch64::FeatureSPE}},
3901 // "rdma" is the name documented by binutils for the feature, but
3902 // binutils also accepts incomplete prefixes of features, so "rdm"
3903 // works too. Support both spellings here.
3904 {.Name: "rdm", .Features: {AArch64::FeatureRDM}},
3905 {.Name: "rdma", .Features: {AArch64::FeatureRDM}},
3906 {.Name: "sb", .Features: {AArch64::FeatureSB}},
3907 {.Name: "ssbs", .Features: {AArch64::FeatureSSBS}},
3908 {.Name: "fp8", .Features: {AArch64::FeatureFP8}},
3909 {.Name: "faminmax", .Features: {AArch64::FeatureFAMINMAX}},
3910 {.Name: "fp8fma", .Features: {AArch64::FeatureFP8FMA}},
3911 {.Name: "ssve-fp8fma", .Features: {AArch64::FeatureSSVE_FP8FMA}},
3912 {.Name: "fp8dot2", .Features: {AArch64::FeatureFP8DOT2}},
3913 {.Name: "ssve-fp8dot2", .Features: {AArch64::FeatureSSVE_FP8DOT2}},
3914 {.Name: "fp8dot4", .Features: {AArch64::FeatureFP8DOT4}},
3915 {.Name: "ssve-fp8dot4", .Features: {AArch64::FeatureSSVE_FP8DOT4}},
3916 {.Name: "lut", .Features: {AArch64::FeatureLUT}},
3917 {.Name: "sme-lutv2", .Features: {AArch64::FeatureSME_LUTv2}},
3918 {.Name: "sme-f8f16", .Features: {AArch64::FeatureSMEF8F16}},
3919 {.Name: "sme-f8f32", .Features: {AArch64::FeatureSMEF8F32}},
3920 {.Name: "sme-fa64", .Features: {AArch64::FeatureSMEFA64}},
3921 {.Name: "cpa", .Features: {AArch64::FeatureCPA}},
3922 {.Name: "tlbiw", .Features: {AArch64::FeatureTLBIW}},
3923 {.Name: "pops", .Features: {AArch64::FeaturePoPS}},
3924 {.Name: "cmpbr", .Features: {AArch64::FeatureCMPBR}},
3925 {.Name: "f8f32mm", .Features: {AArch64::FeatureF8F32MM}},
3926 {.Name: "f8f16mm", .Features: {AArch64::FeatureF8F16MM}},
3927 {.Name: "fprcvt", .Features: {AArch64::FeatureFPRCVT}},
3928 {.Name: "lsfe", .Features: {AArch64::FeatureLSFE}},
3929 {.Name: "sme2p2", .Features: {AArch64::FeatureSME2p2}},
3930 {.Name: "ssve-aes", .Features: {AArch64::FeatureSSVE_AES}},
3931 {.Name: "sve2p2", .Features: {AArch64::FeatureSVE2p2}},
3932 {.Name: "sve-aes2", .Features: {AArch64::FeatureSVEAES2}},
3933 {.Name: "sve-bfscale", .Features: {AArch64::FeatureSVEBFSCALE}},
3934 {.Name: "sve-f16f32mm", .Features: {AArch64::FeatureSVE_F16F32MM}},
3935 {.Name: "lsui", .Features: {AArch64::FeatureLSUI}},
3936 {.Name: "occmo", .Features: {AArch64::FeatureOCCMO}},
3937 {.Name: "pcdphint", .Features: {AArch64::FeaturePCDPHINT}},
3938 {.Name: "ssve-bitperm", .Features: {AArch64::FeatureSSVE_BitPerm}},
3939 {.Name: "sme-mop4", .Features: {AArch64::FeatureSME_MOP4}},
3940 {.Name: "sme-tmop", .Features: {AArch64::FeatureSME_TMOP}},
3941 {.Name: "cmh", .Features: {AArch64::FeatureCMH}},
3942 {.Name: "lscp", .Features: {AArch64::FeatureLSCP}},
3943 {.Name: "tlbid", .Features: {AArch64::FeatureTLBID}},
3944 {.Name: "mpamv2", .Features: {AArch64::FeatureMPAMv2}},
3945 {.Name: "mtetc", .Features: {AArch64::FeatureMTETC}},
3946 {.Name: "gcie", .Features: {AArch64::FeatureGCIE}},
3947 {.Name: "sme2p3", .Features: {AArch64::FeatureSME2p3}},
3948 {.Name: "sve2p3", .Features: {AArch64::FeatureSVE2p3}},
3949 {.Name: "sve-b16mm", .Features: {AArch64::FeatureSVE_B16MM}},
3950 {.Name: "f16mm", .Features: {AArch64::FeatureF16MM}},
3951 {.Name: "f16f32dot", .Features: {AArch64::FeatureF16F32DOT}},
3952 {.Name: "f16f32mm", .Features: {AArch64::FeatureF16F32MM}},
3953 {.Name: "mops-go", .Features: {AArch64::FeatureMOPS_GO}},
3954 {.Name: "poe2", .Features: {AArch64::FeatureS1POE2}},
3955 {.Name: "tev", .Features: {AArch64::FeatureTEV}},
3956 {.Name: "btie", .Features: {AArch64::FeatureBTIE}},
3957 {.Name: "dit", .Features: {AArch64::FeatureDIT}},
3958 {.Name: "brbe", .Features: {AArch64::FeatureBRBE}},
3959 {.Name: "bti", .Features: {AArch64::FeatureBranchTargetId}},
3960 {.Name: "fcma", .Features: {AArch64::FeatureComplxNum}},
3961 {.Name: "jscvt", .Features: {AArch64::FeatureJS}},
3962 {.Name: "pauth-lr", .Features: {AArch64::FeaturePAuthLR}},
3963 {.Name: "ssve-fexpa", .Features: {AArch64::FeatureSSVE_FEXPA}},
3964 {.Name: "wfxt", .Features: {AArch64::FeatureWFxT}},
3965};
3966
3967static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3968 if (FBS[AArch64::HasV8_0aOps])
3969 Str += "ARMv8a";
3970 if (FBS[AArch64::HasV8_1aOps])
3971 Str += "ARMv8.1a";
3972 else if (FBS[AArch64::HasV8_2aOps])
3973 Str += "ARMv8.2a";
3974 else if (FBS[AArch64::HasV8_3aOps])
3975 Str += "ARMv8.3a";
3976 else if (FBS[AArch64::HasV8_4aOps])
3977 Str += "ARMv8.4a";
3978 else if (FBS[AArch64::HasV8_5aOps])
3979 Str += "ARMv8.5a";
3980 else if (FBS[AArch64::HasV8_6aOps])
3981 Str += "ARMv8.6a";
3982 else if (FBS[AArch64::HasV8_7aOps])
3983 Str += "ARMv8.7a";
3984 else if (FBS[AArch64::HasV8_8aOps])
3985 Str += "ARMv8.8a";
3986 else if (FBS[AArch64::HasV8_9aOps])
3987 Str += "ARMv8.9a";
3988 else if (FBS[AArch64::HasV9_0aOps])
3989 Str += "ARMv9-a";
3990 else if (FBS[AArch64::HasV9_1aOps])
3991 Str += "ARMv9.1a";
3992 else if (FBS[AArch64::HasV9_2aOps])
3993 Str += "ARMv9.2a";
3994 else if (FBS[AArch64::HasV9_3aOps])
3995 Str += "ARMv9.3a";
3996 else if (FBS[AArch64::HasV9_4aOps])
3997 Str += "ARMv9.4a";
3998 else if (FBS[AArch64::HasV9_5aOps])
3999 Str += "ARMv9.5a";
4000 else if (FBS[AArch64::HasV9_6aOps])
4001 Str += "ARMv9.6a";
4002 else if (FBS[AArch64::HasV9_7aOps])
4003 Str += "ARMv9.7a";
4004 else if (FBS[AArch64::HasV8_0rOps])
4005 Str += "ARMv8r";
4006 else {
4007 SmallVector<std::string, 2> ExtMatches;
4008 for (const auto& Ext : ExtensionMap) {
4009 // Use & in case multiple features are enabled
4010 if ((FBS & Ext.Features) != FeatureBitset())
4011 ExtMatches.push_back(Elt: Ext.Name);
4012 }
4013 Str += !ExtMatches.empty() ? llvm::join(R&: ExtMatches, Separator: ", ") : "(unknown)";
4014 }
4015}
4016
4017void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
4018 SMLoc S) {
4019 const uint16_t Op2 = Encoding & 7;
4020 const uint16_t Cm = (Encoding & 0x78) >> 3;
4021 const uint16_t Cn = (Encoding & 0x780) >> 7;
4022 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
4023
4024 const MCExpr *Expr = MCConstantExpr::create(Value: Op1, Ctx&: getContext());
4025
4026 Operands.push_back(
4027 Elt: AArch64Operand::CreateImm(Val: Expr, S, E: getLoc(), Ctx&: getContext()));
4028 Operands.push_back(
4029 Elt: AArch64Operand::CreateSysCR(Val: Cn, S, E: getLoc(), Ctx&: getContext()));
4030 Operands.push_back(
4031 Elt: AArch64Operand::CreateSysCR(Val: Cm, S, E: getLoc(), Ctx&: getContext()));
4032 Expr = MCConstantExpr::create(Value: Op2, Ctx&: getContext());
4033 Operands.push_back(
4034 Elt: AArch64Operand::CreateImm(Val: Expr, S, E: getLoc(), Ctx&: getContext()));
4035}
4036
4037/// parseSysAlias - The IC, DC, AT, TLBI, MLBI and GIC{R} and GSB instructions
4038/// are simple aliases for the SYS instruction. Parse them specially so that
4039/// we create a SYS MCInst.
4040bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
4041 OperandVector &Operands) {
4042 if (Name.contains(C: '.'))
4043 return TokError(Msg: "invalid operand");
4044
4045 Mnemonic = Name;
4046 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: "sys", S: NameLoc, Ctx&: getContext()));
4047
4048 const AsmToken &Tok = getTok();
4049 StringRef Op = Tok.getString();
4050 SMLoc S = Tok.getLoc();
4051 bool ExpectRegister = true;
4052 bool OptionalRegister = false;
4053 bool hasAll = getSTI().hasFeature(Feature: AArch64::FeatureAll);
4054 bool hasTLBID = getSTI().hasFeature(Feature: AArch64::FeatureTLBID);
4055
4056 if (Mnemonic == "ic") {
4057 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Name: Op);
4058 if (!IC)
4059 return TokError(Msg: "invalid operand for IC instruction");
4060 else if (!IC->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4061 std::string Str("IC " + std::string(IC->Name) + " requires: ");
4062 setRequiredFeatureString(FBS: IC->getRequiredFeatures(), Str);
4063 return TokError(Msg: Str);
4064 }
4065 ExpectRegister = IC->NeedsReg;
4066 createSysAlias(Encoding: IC->Encoding, Operands, S);
4067 } else if (Mnemonic == "dc") {
4068 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Name: Op);
4069 if (!DC)
4070 return TokError(Msg: "invalid operand for DC instruction");
4071 else if (!DC->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4072 std::string Str("DC " + std::string(DC->Name) + " requires: ");
4073 setRequiredFeatureString(FBS: DC->getRequiredFeatures(), Str);
4074 return TokError(Msg: Str);
4075 }
4076 createSysAlias(Encoding: DC->Encoding, Operands, S);
4077 } else if (Mnemonic == "at") {
4078 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Name: Op);
4079 if (!AT)
4080 return TokError(Msg: "invalid operand for AT instruction");
4081 else if (!AT->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4082 std::string Str("AT " + std::string(AT->Name) + " requires: ");
4083 setRequiredFeatureString(FBS: AT->getRequiredFeatures(), Str);
4084 return TokError(Msg: Str);
4085 }
4086 createSysAlias(Encoding: AT->Encoding, Operands, S);
4087 } else if (Mnemonic == "tlbi") {
4088 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Name: Op);
4089 if (!TLBI)
4090 return TokError(Msg: "invalid operand for TLBI instruction");
4091 else if (!TLBI->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4092 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
4093 setRequiredFeatureString(FBS: TLBI->getRequiredFeatures(), Str);
4094 return TokError(Msg: Str);
4095 }
4096 ExpectRegister = TLBI->NeedsReg;
4097 bool hasTLBID = getSTI().hasFeature(Feature: AArch64::FeatureTLBID);
4098 if (hasAll || hasTLBID) {
4099 OptionalRegister = TLBI->OptionalReg;
4100 }
4101 createSysAlias(Encoding: TLBI->Encoding, Operands, S);
4102 } else if (Mnemonic == "mlbi") {
4103 const AArch64MLBI::MLBI *MLBI = AArch64MLBI::lookupMLBIByName(Name: Op);
4104 if (!MLBI)
4105 return TokError(Msg: "invalid operand for MLBI instruction");
4106 else if (!MLBI->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4107 std::string Str("MLBI " + std::string(MLBI->Name) + " requires: ");
4108 setRequiredFeatureString(FBS: MLBI->getRequiredFeatures(), Str);
4109 return TokError(Msg: Str);
4110 }
4111 ExpectRegister = MLBI->NeedsReg;
4112 createSysAlias(Encoding: MLBI->Encoding, Operands, S);
4113 } else if (Mnemonic == "gic") {
4114 const AArch64GIC::GIC *GIC = AArch64GIC::lookupGICByName(Name: Op);
4115 if (!GIC)
4116 return TokError(Msg: "invalid operand for GIC instruction");
4117 else if (!GIC->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4118 std::string Str("GIC " + std::string(GIC->Name) + " requires: ");
4119 setRequiredFeatureString(FBS: GIC->getRequiredFeatures(), Str);
4120 return TokError(Msg: Str);
4121 }
4122 ExpectRegister = GIC->NeedsReg;
4123 createSysAlias(Encoding: GIC->Encoding, Operands, S);
4124 } else if (Mnemonic == "gsb") {
4125 const AArch64GSB::GSB *GSB = AArch64GSB::lookupGSBByName(Name: Op);
4126 if (!GSB)
4127 return TokError(Msg: "invalid operand for GSB instruction");
4128 else if (!GSB->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4129 std::string Str("GSB " + std::string(GSB->Name) + " requires: ");
4130 setRequiredFeatureString(FBS: GSB->getRequiredFeatures(), Str);
4131 return TokError(Msg: Str);
4132 }
4133 ExpectRegister = false;
4134 createSysAlias(Encoding: GSB->Encoding, Operands, S);
4135 } else if (Mnemonic == "plbi") {
4136 const AArch64PLBI::PLBI *PLBI = AArch64PLBI::lookupPLBIByName(Name: Op);
4137 if (!PLBI)
4138 return TokError(Msg: "invalid operand for PLBI instruction");
4139 else if (!PLBI->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4140 std::string Str("PLBI " + std::string(PLBI->Name) + " requires: ");
4141 setRequiredFeatureString(FBS: PLBI->getRequiredFeatures(), Str);
4142 return TokError(Msg: Str);
4143 }
4144 ExpectRegister = PLBI->NeedsReg;
4145 if (hasAll || hasTLBID) {
4146 OptionalRegister = PLBI->OptionalReg;
4147 }
4148 createSysAlias(Encoding: PLBI->Encoding, Operands, S);
4149 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" ||
4150 Mnemonic == "cosp") {
4151
4152 if (Op.lower() != "rctx")
4153 return TokError(Msg: "invalid operand for prediction restriction instruction");
4154
4155 bool hasPredres = hasAll || getSTI().hasFeature(Feature: AArch64::FeaturePredRes);
4156 bool hasSpecres2 = hasAll || getSTI().hasFeature(Feature: AArch64::FeatureSPECRES2);
4157
4158 if (Mnemonic == "cosp" && !hasSpecres2)
4159 return TokError(Msg: "COSP requires: predres2");
4160 if (!hasPredres)
4161 return TokError(Msg: Mnemonic.upper() + "RCTX requires: predres");
4162
4163 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
4164 : Mnemonic == "dvp" ? 0b101
4165 : Mnemonic == "cosp" ? 0b110
4166 : Mnemonic == "cpp" ? 0b111
4167 : 0;
4168 assert(PRCTX_Op2 &&
4169 "Invalid mnemonic for prediction restriction instruction");
4170 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
4171 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
4172
4173 createSysAlias(Encoding, Operands, S);
4174 }
4175
4176 Lex(); // Eat operand.
4177
4178 bool HasRegister = false;
4179
4180 // Check for the optional register operand.
4181 if (parseOptionalToken(T: AsmToken::Comma)) {
4182 if (Tok.isNot(K: AsmToken::Identifier) || parseRegister(Operands))
4183 return TokError(Msg: "expected register operand");
4184 HasRegister = true;
4185 }
4186
4187 if (!OptionalRegister) {
4188 if (ExpectRegister && !HasRegister)
4189 return TokError(Msg: "specified " + Mnemonic + " op requires a register");
4190 else if (!ExpectRegister && HasRegister)
4191 return TokError(Msg: "specified " + Mnemonic + " op does not use a register");
4192 }
4193
4194 if (parseToken(T: AsmToken::EndOfStatement, Msg: "unexpected token in argument list"))
4195 return true;
4196
4197 return false;
4198}
4199
4200/// parseSyslAlias - The GICR instructions are simple aliases for
4201/// the SYSL instruction. Parse them specially so that we create a
4202/// SYS MCInst.
4203bool AArch64AsmParser::parseSyslAlias(StringRef Name, SMLoc NameLoc,
4204 OperandVector &Operands) {
4205
4206 Mnemonic = Name;
4207 Operands.push_back(
4208 Elt: AArch64Operand::CreateToken(Str: "sysl", S: NameLoc, Ctx&: getContext()));
4209
4210 // Now expect two operands (identifier + register)
4211 SMLoc startLoc = getLoc();
4212 const AsmToken &regTok = getTok();
4213 StringRef reg = regTok.getString();
4214 MCRegister Reg = matchRegisterNameAlias(Name: reg.lower(), Kind: RegKind::Scalar);
4215 if (!Reg)
4216 return TokError(Msg: "expected register operand");
4217
4218 Operands.push_back(Elt: AArch64Operand::CreateReg(
4219 Reg, Kind: RegKind::Scalar, S: startLoc, E: getLoc(), Ctx&: getContext(), EqTy: EqualsReg));
4220
4221 Lex(); // Eat token
4222 if (parseToken(T: AsmToken::Comma))
4223 return true;
4224
4225 // Check for identifier
4226 const AsmToken &operandTok = getTok();
4227 StringRef Op = operandTok.getString();
4228 SMLoc S2 = operandTok.getLoc();
4229 Lex(); // Eat token
4230
4231 if (Mnemonic == "gicr") {
4232 const AArch64GICR::GICR *GICR = AArch64GICR::lookupGICRByName(Name: Op);
4233 if (!GICR)
4234 return Error(L: S2, Msg: "invalid operand for GICR instruction");
4235 else if (!GICR->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4236 std::string Str("GICR " + std::string(GICR->Name) + " requires: ");
4237 setRequiredFeatureString(FBS: GICR->getRequiredFeatures(), Str);
4238 return Error(L: S2, Msg: Str);
4239 }
4240 createSysAlias(Encoding: GICR->Encoding, Operands, S: S2);
4241 }
4242
4243 if (parseToken(T: AsmToken::EndOfStatement, Msg: "unexpected token in argument list"))
4244 return true;
4245
4246 return false;
4247}
4248
4249/// parseSyspAlias - The TLBIP instructions are simple aliases for
4250/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
4251bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
4252 OperandVector &Operands) {
4253 if (Name.contains(C: '.'))
4254 return TokError(Msg: "invalid operand");
4255
4256 Mnemonic = Name;
4257 Operands.push_back(
4258 Elt: AArch64Operand::CreateToken(Str: "sysp", S: NameLoc, Ctx&: getContext()));
4259
4260 const AsmToken &Tok = getTok();
4261 StringRef Op = Tok.getString();
4262 SMLoc S = Tok.getLoc();
4263
4264 if (Mnemonic == "tlbip") {
4265 bool HasnXSQualifier = Op.ends_with_insensitive(Suffix: "nXS");
4266 if (HasnXSQualifier) {
4267 Op = Op.drop_back(N: 3);
4268 }
4269 const AArch64TLBIP::TLBIP *TLBIPorig = AArch64TLBIP::lookupTLBIPByName(Name: Op);
4270 if (!TLBIPorig)
4271 return TokError(Msg: "invalid operand for TLBIP instruction");
4272 const AArch64TLBIP::TLBIP TLBIP(
4273 TLBIPorig->Name, TLBIPorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0),
4274 TLBIPorig->NeedsReg, TLBIPorig->OptionalReg,
4275 HasnXSQualifier
4276 ? TLBIPorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS})
4277 : TLBIPorig->FeaturesRequired);
4278 if (!TLBIP.haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4279 std::string Name =
4280 std::string(TLBIP.Name) + (HasnXSQualifier ? "nXS" : "");
4281 std::string Str("TLBIP " + Name + " requires: ");
4282 setRequiredFeatureString(FBS: TLBIP.getRequiredFeatures(), Str);
4283 return TokError(Msg: Str);
4284 }
4285 createSysAlias(Encoding: TLBIP.Encoding, Operands, S);
4286 }
4287
4288 Lex(); // Eat operand.
4289
4290 if (parseComma())
4291 return true;
4292
4293 if (Tok.isNot(K: AsmToken::Identifier))
4294 return TokError(Msg: "expected register identifier");
4295 auto Result = tryParseSyspXzrPair(Operands);
4296 if (Result.isNoMatch())
4297 Result = tryParseGPRSeqPair(Operands);
4298 if (!Result.isSuccess())
4299 return TokError(Msg: "specified " + Mnemonic +
4300 " op requires a pair of registers");
4301
4302 if (parseToken(T: AsmToken::EndOfStatement, Msg: "unexpected token in argument list"))
4303 return true;
4304
4305 return false;
4306}
4307
4308ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
4309 MCAsmParser &Parser = getParser();
4310 const AsmToken &Tok = getTok();
4311
4312 if (Mnemonic == "tsb" && Tok.isNot(K: AsmToken::Identifier))
4313 return TokError(Msg: "'csync' operand expected");
4314 if (parseOptionalToken(T: AsmToken::Hash) || Tok.is(K: AsmToken::Integer)) {
4315 // Immediate operand.
4316 const MCExpr *ImmVal;
4317 SMLoc ExprLoc = getLoc();
4318 AsmToken IntTok = Tok;
4319 if (getParser().parseExpression(Res&: ImmVal))
4320 return ParseStatus::Failure;
4321 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
4322 if (!MCE)
4323 return Error(L: ExprLoc, Msg: "immediate value expected for barrier operand");
4324 int64_t Value = MCE->getValue();
4325 if (Mnemonic == "dsb" && Value > 15) {
4326 // This case is a no match here, but it might be matched by the nXS
4327 // variant. Deliberately not unlex the optional '#' as it is not necessary
4328 // to characterize an integer immediate.
4329 Parser.getLexer().UnLex(Token: IntTok);
4330 return ParseStatus::NoMatch;
4331 }
4332 if (Value < 0 || Value > 15)
4333 return Error(L: ExprLoc, Msg: "barrier operand out of range");
4334 auto DB = AArch64DB::lookupDBByEncoding(Encoding: Value);
4335 Operands.push_back(Elt: AArch64Operand::CreateBarrier(Val: Value, Str: DB ? DB->Name : "",
4336 S: ExprLoc, Ctx&: getContext(),
4337 HasnXSModifier: false /*hasnXSModifier*/));
4338 return ParseStatus::Success;
4339 }
4340
4341 if (Tok.isNot(K: AsmToken::Identifier))
4342 return TokError(Msg: "invalid operand for instruction");
4343
4344 StringRef Operand = Tok.getString();
4345 auto TSB = AArch64TSB::lookupTSBByName(Name: Operand);
4346 auto DB = AArch64DB::lookupDBByName(Name: Operand);
4347 // The only valid named option for ISB is 'sy'
4348 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
4349 return TokError(Msg: "'sy' or #imm operand expected");
4350 // The only valid named option for TSB is 'csync'
4351 if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
4352 return TokError(Msg: "'csync' operand expected");
4353 if (!DB && !TSB) {
4354 if (Mnemonic == "dsb") {
4355 // This case is a no match here, but it might be matched by the nXS
4356 // variant.
4357 return ParseStatus::NoMatch;
4358 }
4359 return TokError(Msg: "invalid barrier option name");
4360 }
4361
4362 Operands.push_back(Elt: AArch64Operand::CreateBarrier(
4363 Val: DB ? DB->Encoding : TSB->Encoding, Str: Tok.getString(), S: getLoc(),
4364 Ctx&: getContext(), HasnXSModifier: false /*hasnXSModifier*/));
4365 Lex(); // Consume the option
4366
4367 return ParseStatus::Success;
4368}
4369
4370ParseStatus
4371AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
4372 const AsmToken &Tok = getTok();
4373
4374 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
4375 if (Mnemonic != "dsb")
4376 return ParseStatus::Failure;
4377
4378 if (parseOptionalToken(T: AsmToken::Hash) || Tok.is(K: AsmToken::Integer)) {
4379 // Immediate operand.
4380 const MCExpr *ImmVal;
4381 SMLoc ExprLoc = getLoc();
4382 if (getParser().parseExpression(Res&: ImmVal))
4383 return ParseStatus::Failure;
4384 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
4385 if (!MCE)
4386 return Error(L: ExprLoc, Msg: "immediate value expected for barrier operand");
4387 int64_t Value = MCE->getValue();
4388 // v8.7-A DSB in the nXS variant accepts only the following immediate
4389 // values: 16, 20, 24, 28.
4390 if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
4391 return Error(L: ExprLoc, Msg: "barrier operand out of range");
4392 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(ImmValue: Value);
4393 Operands.push_back(Elt: AArch64Operand::CreateBarrier(Val: DB->Encoding, Str: DB->Name,
4394 S: ExprLoc, Ctx&: getContext(),
4395 HasnXSModifier: true /*hasnXSModifier*/));
4396 return ParseStatus::Success;
4397 }
4398
4399 if (Tok.isNot(K: AsmToken::Identifier))
4400 return TokError(Msg: "invalid operand for instruction");
4401
4402 StringRef Operand = Tok.getString();
4403 auto DB = AArch64DBnXS::lookupDBnXSByName(Name: Operand);
4404
4405 if (!DB)
4406 return TokError(Msg: "invalid barrier option name");
4407
4408 Operands.push_back(
4409 Elt: AArch64Operand::CreateBarrier(Val: DB->Encoding, Str: Tok.getString(), S: getLoc(),
4410 Ctx&: getContext(), HasnXSModifier: true /*hasnXSModifier*/));
4411 Lex(); // Consume the option
4412
4413 return ParseStatus::Success;
4414}
4415
4416ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4417 const AsmToken &Tok = getTok();
4418
4419 if (Tok.isNot(K: AsmToken::Identifier))
4420 return ParseStatus::NoMatch;
4421
4422 if (AArch64SVCR::lookupSVCRByName(Name: Tok.getString()))
4423 return ParseStatus::NoMatch;
4424
4425 int MRSReg, MSRReg;
4426 auto SysReg = AArch64SysReg::lookupSysRegByName(Name: Tok.getString());
4427 if (SysReg && SysReg->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4428 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4429 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4430 } else
4431 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Name: Tok.getString());
4432
4433 unsigned PStateImm = -1;
4434 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Name: Tok.getString());
4435 if (PState15 && PState15->haveFeatures(ActiveFeatures: getSTI().getFeatureBits()))
4436 PStateImm = PState15->Encoding;
4437 if (!PState15) {
4438 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Name: Tok.getString());
4439 if (PState1 && PState1->haveFeatures(ActiveFeatures: getSTI().getFeatureBits()))
4440 PStateImm = PState1->Encoding;
4441 }
4442
4443 Operands.push_back(
4444 Elt: AArch64Operand::CreateSysReg(Str: Tok.getString(), S: getLoc(), MRSReg, MSRReg,
4445 PStateField: PStateImm, Ctx&: getContext()));
4446 Lex(); // Eat identifier
4447
4448 return ParseStatus::Success;
4449}
4450
4451ParseStatus
4452AArch64AsmParser::tryParsePHintInstOperand(OperandVector &Operands) {
4453 SMLoc S = getLoc();
4454 const AsmToken &Tok = getTok();
4455 if (Tok.isNot(K: AsmToken::Identifier))
4456 return TokError(Msg: "invalid operand for instruction");
4457
4458 auto PH = AArch64PHint::lookupPHintByName(Tok.getString());
4459 if (!PH)
4460 return TokError(Msg: "invalid operand for instruction");
4461
4462 Operands.push_back(Elt: AArch64Operand::CreatePHintInst(
4463 Val: PH->Encoding, Str: Tok.getString(), S, Ctx&: getContext()));
4464 Lex(); // Eat identifier token.
4465 return ParseStatus::Success;
4466}
4467
4468/// tryParseNeonVectorRegister - Parse a vector register operand.
4469bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4470 if (getTok().isNot(K: AsmToken::Identifier))
4471 return true;
4472
4473 SMLoc S = getLoc();
4474 // Check for a vector register specifier first.
4475 StringRef Kind;
4476 MCRegister Reg;
4477 ParseStatus Res = tryParseVectorRegister(Reg, Kind, MatchKind: RegKind::NeonVector);
4478 if (!Res.isSuccess())
4479 return true;
4480
4481 const auto &KindRes = parseVectorKind(Suffix: Kind, VectorKind: RegKind::NeonVector);
4482 if (!KindRes)
4483 return true;
4484
4485 unsigned ElementWidth = KindRes->second;
4486 Operands.push_back(
4487 Elt: AArch64Operand::CreateVectorReg(Reg, Kind: RegKind::NeonVector, ElementWidth,
4488 S, E: getLoc(), Ctx&: getContext()));
4489
4490 // If there was an explicit qualifier, that goes on as a literal text
4491 // operand.
4492 if (!Kind.empty())
4493 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: Kind, S, Ctx&: getContext()));
4494
4495 return tryParseVectorIndex(Operands).isFailure();
4496}
4497
4498ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4499 SMLoc SIdx = getLoc();
4500 if (parseOptionalToken(T: AsmToken::LBrac)) {
4501 const MCExpr *ImmVal;
4502 if (getParser().parseExpression(Res&: ImmVal))
4503 return ParseStatus::NoMatch;
4504 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
4505 if (!MCE)
4506 return TokError(Msg: "immediate value expected for vector index");
4507
4508 SMLoc E = getLoc();
4509
4510 if (parseToken(T: AsmToken::RBrac, Msg: "']' expected"))
4511 return ParseStatus::Failure;
4512
4513 Operands.push_back(Elt: AArch64Operand::CreateVectorIndex(Idx: MCE->getValue(), S: SIdx,
4514 E, Ctx&: getContext()));
4515 return ParseStatus::Success;
4516 }
4517
4518 return ParseStatus::NoMatch;
4519}
4520
4521// tryParseVectorRegister - Try to parse a vector register name with
4522// optional kind specifier. If it is a register specifier, eat the token
4523// and return it.
4524ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4525 StringRef &Kind,
4526 RegKind MatchKind) {
4527 const AsmToken &Tok = getTok();
4528
4529 if (Tok.isNot(K: AsmToken::Identifier))
4530 return ParseStatus::NoMatch;
4531
4532 StringRef Name = Tok.getString();
4533 // If there is a kind specifier, it's separated from the register name by
4534 // a '.'.
4535 size_t Start = 0, Next = Name.find(C: '.');
4536 StringRef Head = Name.slice(Start, End: Next);
4537 MCRegister RegNum = matchRegisterNameAlias(Name: Head, Kind: MatchKind);
4538
4539 if (RegNum) {
4540 if (Next != StringRef::npos) {
4541 Kind = Name.substr(Start: Next);
4542 if (!isValidVectorKind(Suffix: Kind, VectorKind: MatchKind))
4543 return TokError(Msg: "invalid vector kind qualifier");
4544 }
4545 Lex(); // Eat the register token.
4546
4547 Reg = RegNum;
4548 return ParseStatus::Success;
4549 }
4550
4551 return ParseStatus::NoMatch;
4552}
4553
4554ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector(
4555 OperandVector &Operands) {
4556 ParseStatus Status =
4557 tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(Operands);
4558 if (!Status.isSuccess())
4559 Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(Operands);
4560 return Status;
4561}
4562
4563/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4564template <RegKind RK>
4565ParseStatus
4566AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4567 // Check for a SVE predicate register specifier first.
4568 const SMLoc S = getLoc();
4569 StringRef Kind;
4570 MCRegister RegNum;
4571 auto Res = tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RK);
4572 if (!Res.isSuccess())
4573 return Res;
4574
4575 const auto &KindRes = parseVectorKind(Suffix: Kind, VectorKind: RK);
4576 if (!KindRes)
4577 return ParseStatus::NoMatch;
4578
4579 unsigned ElementWidth = KindRes->second;
4580 Operands.push_back(Elt: AArch64Operand::CreateVectorReg(
4581 Reg: RegNum, Kind: RK, ElementWidth, S,
4582 E: getLoc(), Ctx&: getContext()));
4583
4584 if (getLexer().is(K: AsmToken::LBrac)) {
4585 if (RK == RegKind::SVEPredicateAsCounter) {
4586 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4587 if (ResIndex.isSuccess())
4588 return ParseStatus::Success;
4589 } else {
4590 // Indexed predicate, there's no comma so try parse the next operand
4591 // immediately.
4592 if (parseOperand(Operands, isCondCode: false, invertCondCode: false))
4593 return ParseStatus::NoMatch;
4594 }
4595 }
4596
4597 // Not all predicates are followed by a '/m' or '/z'.
4598 if (getTok().isNot(K: AsmToken::Slash))
4599 return ParseStatus::Success;
4600
4601 // But when they do they shouldn't have an element type suffix.
4602 if (!Kind.empty())
4603 return Error(L: S, Msg: "not expecting size suffix");
4604
4605 // Add a literal slash as operand
4606 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: "/", S: getLoc(), Ctx&: getContext()));
4607
4608 Lex(); // Eat the slash.
4609
4610 // Zeroing or merging?
4611 auto Pred = getTok().getString().lower();
4612 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4613 return Error(L: getLoc(), Msg: "expecting 'z' predication");
4614
4615 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4616 return Error(L: getLoc(), Msg: "expecting 'm' or 'z' predication");
4617
4618 // Add zero/merge token.
4619 const char *ZM = Pred == "z" ? "z" : "m";
4620 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: ZM, S: getLoc(), Ctx&: getContext()));
4621
4622 Lex(); // Eat zero/merge token.
4623 return ParseStatus::Success;
4624}
4625
4626/// parseRegister - Parse a register operand.
4627bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4628 // Try for a Neon vector register.
4629 if (!tryParseNeonVectorRegister(Operands))
4630 return false;
4631
4632 if (tryParseZTOperand(Operands).isSuccess())
4633 return false;
4634
4635 // Otherwise try for a scalar register.
4636 if (tryParseGPROperand<false>(Operands).isSuccess())
4637 return false;
4638
4639 return true;
4640}
4641
4642bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4643 bool HasELFModifier = false;
4644 AArch64::Specifier RefKind;
4645 SMLoc Loc = getLexer().getLoc();
4646 if (parseOptionalToken(T: AsmToken::Colon)) {
4647 HasELFModifier = true;
4648
4649 if (getTok().isNot(K: AsmToken::Identifier))
4650 return TokError(Msg: "expect relocation specifier in operand after ':'");
4651
4652 std::string LowerCase = getTok().getIdentifier().lower();
4653 RefKind = StringSwitch<AArch64::Specifier>(LowerCase)
4654 .Case(S: "lo12", Value: AArch64::S_LO12)
4655 .Case(S: "abs_g3", Value: AArch64::S_ABS_G3)
4656 .Case(S: "abs_g2", Value: AArch64::S_ABS_G2)
4657 .Case(S: "abs_g2_s", Value: AArch64::S_ABS_G2_S)
4658 .Case(S: "abs_g2_nc", Value: AArch64::S_ABS_G2_NC)
4659 .Case(S: "abs_g1", Value: AArch64::S_ABS_G1)
4660 .Case(S: "abs_g1_s", Value: AArch64::S_ABS_G1_S)
4661 .Case(S: "abs_g1_nc", Value: AArch64::S_ABS_G1_NC)
4662 .Case(S: "abs_g0", Value: AArch64::S_ABS_G0)
4663 .Case(S: "abs_g0_s", Value: AArch64::S_ABS_G0_S)
4664 .Case(S: "abs_g0_nc", Value: AArch64::S_ABS_G0_NC)
4665 .Case(S: "prel_g3", Value: AArch64::S_PREL_G3)
4666 .Case(S: "prel_g2", Value: AArch64::S_PREL_G2)
4667 .Case(S: "prel_g2_nc", Value: AArch64::S_PREL_G2_NC)
4668 .Case(S: "prel_g1", Value: AArch64::S_PREL_G1)
4669 .Case(S: "prel_g1_nc", Value: AArch64::S_PREL_G1_NC)
4670 .Case(S: "prel_g0", Value: AArch64::S_PREL_G0)
4671 .Case(S: "prel_g0_nc", Value: AArch64::S_PREL_G0_NC)
4672 .Case(S: "dtprel_g2", Value: AArch64::S_DTPREL_G2)
4673 .Case(S: "dtprel_g1", Value: AArch64::S_DTPREL_G1)
4674 .Case(S: "dtprel_g1_nc", Value: AArch64::S_DTPREL_G1_NC)
4675 .Case(S: "dtprel_g0", Value: AArch64::S_DTPREL_G0)
4676 .Case(S: "dtprel_g0_nc", Value: AArch64::S_DTPREL_G0_NC)
4677 .Case(S: "dtprel_hi12", Value: AArch64::S_DTPREL_HI12)
4678 .Case(S: "dtprel_lo12", Value: AArch64::S_DTPREL_LO12)
4679 .Case(S: "dtprel_lo12_nc", Value: AArch64::S_DTPREL_LO12_NC)
4680 .Case(S: "pg_hi21_nc", Value: AArch64::S_ABS_PAGE_NC)
4681 .Case(S: "tprel_g2", Value: AArch64::S_TPREL_G2)
4682 .Case(S: "tprel_g1", Value: AArch64::S_TPREL_G1)
4683 .Case(S: "tprel_g1_nc", Value: AArch64::S_TPREL_G1_NC)
4684 .Case(S: "tprel_g0", Value: AArch64::S_TPREL_G0)
4685 .Case(S: "tprel_g0_nc", Value: AArch64::S_TPREL_G0_NC)
4686 .Case(S: "tprel_hi12", Value: AArch64::S_TPREL_HI12)
4687 .Case(S: "tprel_lo12", Value: AArch64::S_TPREL_LO12)
4688 .Case(S: "tprel_lo12_nc", Value: AArch64::S_TPREL_LO12_NC)
4689 .Case(S: "tlsdesc_lo12", Value: AArch64::S_TLSDESC_LO12)
4690 .Case(S: "tlsdesc_auth_lo12", Value: AArch64::S_TLSDESC_AUTH_LO12)
4691 .Case(S: "got", Value: AArch64::S_GOT_PAGE)
4692 .Case(S: "gotpage_lo15", Value: AArch64::S_GOT_PAGE_LO15)
4693 .Case(S: "got_lo12", Value: AArch64::S_GOT_LO12)
4694 .Case(S: "got_auth", Value: AArch64::S_GOT_AUTH_PAGE)
4695 .Case(S: "got_auth_lo12", Value: AArch64::S_GOT_AUTH_LO12)
4696 .Case(S: "gottprel", Value: AArch64::S_GOTTPREL_PAGE)
4697 .Case(S: "gottprel_lo12", Value: AArch64::S_GOTTPREL_LO12_NC)
4698 .Case(S: "gottprel_g1", Value: AArch64::S_GOTTPREL_G1)
4699 .Case(S: "gottprel_g0_nc", Value: AArch64::S_GOTTPREL_G0_NC)
4700 .Case(S: "tlsdesc", Value: AArch64::S_TLSDESC_PAGE)
4701 .Case(S: "tlsdesc_auth", Value: AArch64::S_TLSDESC_AUTH_PAGE)
4702 .Case(S: "secrel_lo12", Value: AArch64::S_SECREL_LO12)
4703 .Case(S: "secrel_hi12", Value: AArch64::S_SECREL_HI12)
4704 .Default(Value: AArch64::S_INVALID);
4705
4706 if (RefKind == AArch64::S_INVALID)
4707 return TokError(Msg: "expect relocation specifier in operand after ':'");
4708
4709 Lex(); // Eat identifier
4710
4711 if (parseToken(T: AsmToken::Colon, Msg: "expect ':' after relocation specifier"))
4712 return true;
4713 }
4714
4715 if (getParser().parseExpression(Res&: ImmVal))
4716 return true;
4717
4718 if (HasELFModifier)
4719 ImmVal = MCSpecifierExpr::create(Expr: ImmVal, S: RefKind, Ctx&: getContext(), Loc);
4720
4721 SMLoc EndLoc;
4722 if (getContext().getAsmInfo()->hasSubsectionsViaSymbols()) {
4723 if (getParser().parseAtSpecifier(Res&: ImmVal, EndLoc))
4724 return true;
4725 const MCExpr *Term;
4726 MCBinaryExpr::Opcode Opcode;
4727 if (parseOptionalToken(T: AsmToken::Plus))
4728 Opcode = MCBinaryExpr::Add;
4729 else if (parseOptionalToken(T: AsmToken::Minus))
4730 Opcode = MCBinaryExpr::Sub;
4731 else
4732 return false;
4733 if (getParser().parsePrimaryExpr(Res&: Term, EndLoc))
4734 return true;
4735 ImmVal = MCBinaryExpr::create(Op: Opcode, LHS: ImmVal, RHS: Term, Ctx&: getContext());
4736 }
4737
4738 return false;
4739}
4740
4741ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4742 if (getTok().isNot(K: AsmToken::LCurly))
4743 return ParseStatus::NoMatch;
4744
4745 auto ParseMatrixTile = [this](unsigned &Reg,
4746 unsigned &ElementWidth) -> ParseStatus {
4747 StringRef Name = getTok().getString();
4748 size_t DotPosition = Name.find(C: '.');
4749 if (DotPosition == StringRef::npos)
4750 return ParseStatus::NoMatch;
4751
4752 unsigned RegNum = matchMatrixTileListRegName(Name);
4753 if (!RegNum)
4754 return ParseStatus::NoMatch;
4755
4756 StringRef Tail = Name.drop_front(N: DotPosition);
4757 const std::optional<std::pair<int, int>> &KindRes =
4758 parseVectorKind(Suffix: Tail, VectorKind: RegKind::Matrix);
4759 if (!KindRes)
4760 return TokError(
4761 Msg: "Expected the register to be followed by element width suffix");
4762 ElementWidth = KindRes->second;
4763 Reg = RegNum;
4764 Lex(); // Eat the register.
4765 return ParseStatus::Success;
4766 };
4767
4768 SMLoc S = getLoc();
4769 auto LCurly = getTok();
4770 Lex(); // Eat left bracket token.
4771
4772 // Empty matrix list
4773 if (parseOptionalToken(T: AsmToken::RCurly)) {
4774 Operands.push_back(Elt: AArch64Operand::CreateMatrixTileList(
4775 /*RegMask=*/0, S, E: getLoc(), Ctx&: getContext()));
4776 return ParseStatus::Success;
4777 }
4778
4779 // Try parse {za} alias early
4780 if (getTok().getString().equals_insensitive(RHS: "za")) {
4781 Lex(); // Eat 'za'
4782
4783 if (parseToken(T: AsmToken::RCurly, Msg: "'}' expected"))
4784 return ParseStatus::Failure;
4785
4786 Operands.push_back(Elt: AArch64Operand::CreateMatrixTileList(
4787 /*RegMask=*/0xFF, S, E: getLoc(), Ctx&: getContext()));
4788 return ParseStatus::Success;
4789 }
4790
4791 SMLoc TileLoc = getLoc();
4792
4793 unsigned FirstReg, ElementWidth;
4794 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4795 if (!ParseRes.isSuccess()) {
4796 getLexer().UnLex(Token: LCurly);
4797 return ParseRes;
4798 }
4799
4800 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4801
4802 unsigned PrevReg = FirstReg;
4803
4804 SmallSet<unsigned, 8> DRegs;
4805 AArch64Operand::ComputeRegsForAlias(Reg: FirstReg, OutRegs&: DRegs, ElementWidth);
4806
4807 SmallSet<unsigned, 8> SeenRegs;
4808 SeenRegs.insert(V: FirstReg);
4809
4810 while (parseOptionalToken(T: AsmToken::Comma)) {
4811 TileLoc = getLoc();
4812 unsigned Reg, NextElementWidth;
4813 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4814 if (!ParseRes.isSuccess())
4815 return ParseRes;
4816
4817 // Element size must match on all regs in the list.
4818 if (ElementWidth != NextElementWidth)
4819 return Error(L: TileLoc, Msg: "mismatched register size suffix");
4820
4821 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(Reg: PrevReg)))
4822 Warning(L: TileLoc, Msg: "tile list not in ascending order");
4823
4824 if (SeenRegs.contains(V: Reg))
4825 Warning(L: TileLoc, Msg: "duplicate tile in list");
4826 else {
4827 SeenRegs.insert(V: Reg);
4828 AArch64Operand::ComputeRegsForAlias(Reg, OutRegs&: DRegs, ElementWidth);
4829 }
4830
4831 PrevReg = Reg;
4832 }
4833
4834 if (parseToken(T: AsmToken::RCurly, Msg: "'}' expected"))
4835 return ParseStatus::Failure;
4836
4837 unsigned RegMask = 0;
4838 for (auto Reg : DRegs)
4839 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4840 RI->getEncodingValue(Reg: AArch64::ZAD0));
4841 Operands.push_back(
4842 Elt: AArch64Operand::CreateMatrixTileList(RegMask, S, E: getLoc(), Ctx&: getContext()));
4843
4844 return ParseStatus::Success;
4845}
4846
4847template <RegKind VectorKind>
4848ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4849 bool ExpectMatch) {
4850 MCAsmParser &Parser = getParser();
4851 if (!getTok().is(K: AsmToken::LCurly))
4852 return ParseStatus::NoMatch;
4853
4854 // Wrapper around parse function
4855 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4856 bool NoMatchIsError) -> ParseStatus {
4857 auto RegTok = getTok();
4858 auto ParseRes = tryParseVectorRegister(Reg, Kind, MatchKind: VectorKind);
4859 if (ParseRes.isSuccess()) {
4860 if (parseVectorKind(Suffix: Kind, VectorKind))
4861 return ParseRes;
4862 llvm_unreachable("Expected a valid vector kind");
4863 }
4864
4865 if (RegTok.is(K: AsmToken::Identifier) && ParseRes.isNoMatch() &&
4866 RegTok.getString().equals_insensitive(RHS: "zt0"))
4867 return ParseStatus::NoMatch;
4868
4869 if (RegTok.isNot(K: AsmToken::Identifier) || ParseRes.isFailure() ||
4870 (ParseRes.isNoMatch() && NoMatchIsError &&
4871 !RegTok.getString().starts_with_insensitive(Prefix: "za")))
4872 return Error(L: Loc, Msg: "vector register expected");
4873
4874 return ParseStatus::NoMatch;
4875 };
4876
4877 unsigned NumRegs = getNumRegsForRegKind(K: VectorKind);
4878 SMLoc S = getLoc();
4879 auto LCurly = getTok();
4880 Lex(); // Eat left bracket token.
4881
4882 StringRef Kind;
4883 MCRegister FirstReg;
4884 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4885
4886 // Put back the original left bracket if there was no match, so that
4887 // different types of list-operands can be matched (e.g. SVE, Neon).
4888 if (ParseRes.isNoMatch())
4889 Parser.getLexer().UnLex(Token: LCurly);
4890
4891 if (!ParseRes.isSuccess())
4892 return ParseRes;
4893
4894 MCRegister PrevReg = FirstReg;
4895 unsigned Count = 1;
4896
4897 unsigned Stride = 1;
4898 if (parseOptionalToken(T: AsmToken::Minus)) {
4899 SMLoc Loc = getLoc();
4900 StringRef NextKind;
4901
4902 MCRegister Reg;
4903 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4904 if (!ParseRes.isSuccess())
4905 return ParseRes;
4906
4907 // Any Kind suffices must match on all regs in the list.
4908 if (Kind != NextKind)
4909 return Error(L: Loc, Msg: "mismatched register size suffix");
4910
4911 unsigned Space =
4912 (PrevReg < Reg) ? (Reg - PrevReg) : (NumRegs - (PrevReg - Reg));
4913
4914 if (Space == 0 || Space > 3)
4915 return Error(L: Loc, Msg: "invalid number of vectors");
4916
4917 Count += Space;
4918 }
4919 else {
4920 bool HasCalculatedStride = false;
4921 while (parseOptionalToken(T: AsmToken::Comma)) {
4922 SMLoc Loc = getLoc();
4923 StringRef NextKind;
4924 MCRegister Reg;
4925 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4926 if (!ParseRes.isSuccess())
4927 return ParseRes;
4928
4929 // Any Kind suffices must match on all regs in the list.
4930 if (Kind != NextKind)
4931 return Error(L: Loc, Msg: "mismatched register size suffix");
4932
4933 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4934 unsigned PrevRegVal =
4935 getContext().getRegisterInfo()->getEncodingValue(Reg: PrevReg);
4936 if (!HasCalculatedStride) {
4937 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4938 : (NumRegs - (PrevRegVal - RegVal));
4939 HasCalculatedStride = true;
4940 }
4941
4942 // Register must be incremental (with a wraparound at last register).
4943 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4944 return Error(L: Loc, Msg: "registers must have the same sequential stride");
4945
4946 PrevReg = Reg;
4947 ++Count;
4948 }
4949 }
4950
4951 if (parseToken(T: AsmToken::RCurly, Msg: "'}' expected"))
4952 return ParseStatus::Failure;
4953
4954 if (Count > 4)
4955 return Error(L: S, Msg: "invalid number of vectors");
4956
4957 unsigned NumElements = 0;
4958 unsigned ElementWidth = 0;
4959 if (!Kind.empty()) {
4960 if (const auto &VK = parseVectorKind(Suffix: Kind, VectorKind))
4961 std::tie(args&: NumElements, args&: ElementWidth) = *VK;
4962 }
4963
4964 Operands.push_back(Elt: AArch64Operand::CreateVectorList(
4965 Reg: FirstReg, Count, Stride, NumElements, ElementWidth, RegisterKind: VectorKind, S,
4966 E: getLoc(), Ctx&: getContext()));
4967
4968 if (getTok().is(K: AsmToken::LBrac)) {
4969 ParseStatus Res = tryParseVectorIndex(Operands);
4970 if (Res.isFailure())
4971 return ParseStatus::Failure;
4972 return ParseStatus::Success;
4973 }
4974
4975 return ParseStatus::Success;
4976}
4977
4978/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4979bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4980 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, ExpectMatch: true);
4981 if (!ParseRes.isSuccess())
4982 return true;
4983
4984 return tryParseVectorIndex(Operands).isFailure();
4985}
4986
4987ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4988 SMLoc StartLoc = getLoc();
4989
4990 MCRegister RegNum;
4991 ParseStatus Res = tryParseScalarRegister(RegNum);
4992 if (!Res.isSuccess())
4993 return Res;
4994
4995 if (!parseOptionalToken(T: AsmToken::Comma)) {
4996 Operands.push_back(Elt: AArch64Operand::CreateReg(
4997 Reg: RegNum, Kind: RegKind::Scalar, S: StartLoc, E: getLoc(), Ctx&: getContext()));
4998 return ParseStatus::Success;
4999 }
5000
5001 parseOptionalToken(T: AsmToken::Hash);
5002
5003 if (getTok().isNot(K: AsmToken::Integer))
5004 return Error(L: getLoc(), Msg: "index must be absent or #0");
5005
5006 const MCExpr *ImmVal;
5007 if (getParser().parseExpression(Res&: ImmVal) || !isa<MCConstantExpr>(Val: ImmVal) ||
5008 cast<MCConstantExpr>(Val: ImmVal)->getValue() != 0)
5009 return Error(L: getLoc(), Msg: "index must be absent or #0");
5010
5011 Operands.push_back(Elt: AArch64Operand::CreateReg(
5012 Reg: RegNum, Kind: RegKind::Scalar, S: StartLoc, E: getLoc(), Ctx&: getContext()));
5013 return ParseStatus::Success;
5014}
5015
5016ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
5017 SMLoc StartLoc = getLoc();
5018 const AsmToken &Tok = getTok();
5019 std::string Name = Tok.getString().lower();
5020
5021 MCRegister Reg = matchRegisterNameAlias(Name, Kind: RegKind::LookupTable);
5022
5023 if (!Reg)
5024 return ParseStatus::NoMatch;
5025
5026 Operands.push_back(Elt: AArch64Operand::CreateReg(
5027 Reg, Kind: RegKind::LookupTable, S: StartLoc, E: getLoc(), Ctx&: getContext()));
5028 Lex(); // Eat register.
5029
5030 // Check if register is followed by an index
5031 if (parseOptionalToken(T: AsmToken::LBrac)) {
5032 Operands.push_back(
5033 Elt: AArch64Operand::CreateToken(Str: "[", S: getLoc(), Ctx&: getContext()));
5034 const MCExpr *ImmVal;
5035 if (getParser().parseExpression(Res&: ImmVal))
5036 return ParseStatus::NoMatch;
5037 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
5038 if (!MCE)
5039 return TokError(Msg: "immediate value expected for vector index");
5040 Operands.push_back(Elt: AArch64Operand::CreateImm(
5041 Val: MCConstantExpr::create(Value: MCE->getValue(), Ctx&: getContext()), S: StartLoc,
5042 E: getLoc(), Ctx&: getContext()));
5043 if (parseOptionalToken(T: AsmToken::Comma))
5044 if (parseOptionalMulOperand(Operands))
5045 return ParseStatus::Failure;
5046 if (parseToken(T: AsmToken::RBrac, Msg: "']' expected"))
5047 return ParseStatus::Failure;
5048 Operands.push_back(
5049 Elt: AArch64Operand::CreateToken(Str: "]", S: getLoc(), Ctx&: getContext()));
5050 }
5051 return ParseStatus::Success;
5052}
5053
5054template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
5055ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
5056 SMLoc StartLoc = getLoc();
5057
5058 MCRegister RegNum;
5059 ParseStatus Res = tryParseScalarRegister(RegNum);
5060 if (!Res.isSuccess())
5061 return Res;
5062
5063 // No shift/extend is the default.
5064 if (!ParseShiftExtend || getTok().isNot(K: AsmToken::Comma)) {
5065 Operands.push_back(Elt: AArch64Operand::CreateReg(
5066 Reg: RegNum, Kind: RegKind::Scalar, S: StartLoc, E: getLoc(), Ctx&: getContext(), EqTy));
5067 return ParseStatus::Success;
5068 }
5069
5070 // Eat the comma
5071 Lex();
5072
5073 // Match the shift
5074 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
5075 Res = tryParseOptionalShiftExtend(Operands&: ExtOpnd);
5076 if (!Res.isSuccess())
5077 return Res;
5078
5079 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
5080 Operands.push_back(Elt: AArch64Operand::CreateReg(
5081 Reg: RegNum, Kind: RegKind::Scalar, S: StartLoc, E: Ext->getEndLoc(), Ctx&: getContext(), EqTy,
5082 ExtTy: Ext->getShiftExtendType(), ShiftAmount: Ext->getShiftExtendAmount(),
5083 HasExplicitAmount: Ext->hasShiftExtendAmount()));
5084
5085 return ParseStatus::Success;
5086}
5087
5088bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
5089 MCAsmParser &Parser = getParser();
5090
5091 // Some SVE instructions have a decoration after the immediate, i.e.
5092 // "mul vl". We parse them here and add tokens, which must be present in the
5093 // asm string in the tablegen instruction.
5094 bool NextIsVL =
5095 Parser.getLexer().peekTok().getString().equals_insensitive(RHS: "vl");
5096 bool NextIsHash = Parser.getLexer().peekTok().is(K: AsmToken::Hash);
5097 if (!getTok().getString().equals_insensitive(RHS: "mul") ||
5098 !(NextIsVL || NextIsHash))
5099 return true;
5100
5101 Operands.push_back(
5102 Elt: AArch64Operand::CreateToken(Str: "mul", S: getLoc(), Ctx&: getContext()));
5103 Lex(); // Eat the "mul"
5104
5105 if (NextIsVL) {
5106 Operands.push_back(
5107 Elt: AArch64Operand::CreateToken(Str: "vl", S: getLoc(), Ctx&: getContext()));
5108 Lex(); // Eat the "vl"
5109 return false;
5110 }
5111
5112 if (NextIsHash) {
5113 Lex(); // Eat the #
5114 SMLoc S = getLoc();
5115
5116 // Parse immediate operand.
5117 const MCExpr *ImmVal;
5118 if (!Parser.parseExpression(Res&: ImmVal))
5119 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal)) {
5120 Operands.push_back(Elt: AArch64Operand::CreateImm(
5121 Val: MCConstantExpr::create(Value: MCE->getValue(), Ctx&: getContext()), S, E: getLoc(),
5122 Ctx&: getContext()));
5123 return false;
5124 }
5125 }
5126
5127 return Error(L: getLoc(), Msg: "expected 'vl' or '#<imm>'");
5128}
5129
5130bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
5131 StringRef &VecGroup) {
5132 MCAsmParser &Parser = getParser();
5133 auto Tok = Parser.getTok();
5134 if (Tok.isNot(K: AsmToken::Identifier))
5135 return true;
5136
5137 StringRef VG = StringSwitch<StringRef>(Tok.getString().lower())
5138 .Case(S: "vgx2", Value: "vgx2")
5139 .Case(S: "vgx4", Value: "vgx4")
5140 .Default(Value: "");
5141
5142 if (VG.empty())
5143 return true;
5144
5145 VecGroup = VG;
5146 Parser.Lex(); // Eat vgx[2|4]
5147 return false;
5148}
5149
5150bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
5151 auto Tok = getTok();
5152 if (Tok.isNot(K: AsmToken::Identifier))
5153 return true;
5154
5155 auto Keyword = Tok.getString();
5156 Keyword = StringSwitch<StringRef>(Keyword.lower())
5157 .Case(S: "sm", Value: "sm")
5158 .Case(S: "za", Value: "za")
5159 .Default(Value: Keyword);
5160 Operands.push_back(
5161 Elt: AArch64Operand::CreateToken(Str: Keyword, S: Tok.getLoc(), Ctx&: getContext()));
5162
5163 Lex();
5164 return false;
5165}
5166
5167/// parseOperand - Parse a arm instruction operand. For now this parses the
5168/// operand regardless of the mnemonic.
5169bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
5170 bool invertCondCode) {
5171 MCAsmParser &Parser = getParser();
5172
5173 ParseStatus ResTy =
5174 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
5175
5176 // Check if the current operand has a custom associated parser, if so, try to
5177 // custom parse the operand, or fallback to the general approach.
5178 if (ResTy.isSuccess())
5179 return false;
5180 // If there wasn't a custom match, try the generic matcher below. Otherwise,
5181 // there was a match, but an error occurred, in which case, just return that
5182 // the operand parsing failed.
5183 if (ResTy.isFailure())
5184 return true;
5185
5186 // Nothing custom, so do general case parsing.
5187 SMLoc S, E;
5188 auto parseOptionalShiftExtend = [&](AsmToken SavedTok) {
5189 if (parseOptionalToken(T: AsmToken::Comma)) {
5190 ParseStatus Res = tryParseOptionalShiftExtend(Operands);
5191 if (!Res.isNoMatch())
5192 return Res.isFailure();
5193 getLexer().UnLex(Token: SavedTok);
5194 }
5195 return false;
5196 };
5197 switch (getLexer().getKind()) {
5198 default: {
5199 SMLoc S = getLoc();
5200 const MCExpr *Expr;
5201 if (parseSymbolicImmVal(ImmVal&: Expr))
5202 return Error(L: S, Msg: "invalid operand");
5203
5204 SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
5205 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: Expr, S, E, Ctx&: getContext()));
5206 return parseOptionalShiftExtend(getTok());
5207 }
5208 case AsmToken::LBrac: {
5209 Operands.push_back(
5210 Elt: AArch64Operand::CreateToken(Str: "[", S: getLoc(), Ctx&: getContext()));
5211 Lex(); // Eat '['
5212
5213 // There's no comma after a '[', so we can parse the next operand
5214 // immediately.
5215 return parseOperand(Operands, isCondCode: false, invertCondCode: false);
5216 }
5217 case AsmToken::LCurly: {
5218 if (!parseNeonVectorList(Operands))
5219 return false;
5220
5221 Operands.push_back(
5222 Elt: AArch64Operand::CreateToken(Str: "{", S: getLoc(), Ctx&: getContext()));
5223 Lex(); // Eat '{'
5224
5225 // There's no comma after a '{', so we can parse the next operand
5226 // immediately.
5227 return parseOperand(Operands, isCondCode: false, invertCondCode: false);
5228 }
5229 case AsmToken::Identifier: {
5230 // See if this is a "VG" decoration used by SME instructions.
5231 StringRef VecGroup;
5232 if (!parseOptionalVGOperand(Operands, VecGroup)) {
5233 Operands.push_back(
5234 Elt: AArch64Operand::CreateToken(Str: VecGroup, S: getLoc(), Ctx&: getContext()));
5235 return false;
5236 }
5237 // If we're expecting a Condition Code operand, then just parse that.
5238 if (isCondCode)
5239 return parseCondCode(Operands, invertCondCode);
5240
5241 // If it's a register name, parse it.
5242 if (!parseRegister(Operands)) {
5243 // Parse an optional shift/extend modifier.
5244 AsmToken SavedTok = getTok();
5245 if (parseOptionalToken(T: AsmToken::Comma)) {
5246 // The operand after the register may be a label (e.g. ADR/ADRP). Check
5247 // such cases and don't report an error when <label> happens to match a
5248 // shift/extend modifier.
5249 ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic,
5250 /*ParseForAllFeatures=*/true);
5251 if (!Res.isNoMatch())
5252 return Res.isFailure();
5253 Res = tryParseOptionalShiftExtend(Operands);
5254 if (!Res.isNoMatch())
5255 return Res.isFailure();
5256 getLexer().UnLex(Token: SavedTok);
5257 }
5258 return false;
5259 }
5260
5261 // See if this is a "mul vl" decoration or "mul #<int>" operand used
5262 // by SVE instructions.
5263 if (!parseOptionalMulOperand(Operands))
5264 return false;
5265
5266 // If this is a two-word mnemonic, parse its special keyword
5267 // operand as an identifier.
5268 if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
5269 Mnemonic == "gcsb")
5270 return parseKeywordOperand(Operands);
5271
5272 // This was not a register so parse other operands that start with an
5273 // identifier (like labels) as expressions and create them as immediates.
5274 const MCExpr *IdVal, *Term;
5275 S = getLoc();
5276 if (getParser().parseExpression(Res&: IdVal))
5277 return true;
5278 if (getParser().parseAtSpecifier(Res&: IdVal, EndLoc&: E))
5279 return true;
5280 std::optional<MCBinaryExpr::Opcode> Opcode;
5281 if (parseOptionalToken(T: AsmToken::Plus))
5282 Opcode = MCBinaryExpr::Add;
5283 else if (parseOptionalToken(T: AsmToken::Minus))
5284 Opcode = MCBinaryExpr::Sub;
5285 if (Opcode) {
5286 if (getParser().parsePrimaryExpr(Res&: Term, EndLoc&: E))
5287 return true;
5288 IdVal = MCBinaryExpr::create(Op: *Opcode, LHS: IdVal, RHS: Term, Ctx&: getContext());
5289 }
5290 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: IdVal, S, E, Ctx&: getContext()));
5291
5292 // Parse an optional shift/extend modifier.
5293 return parseOptionalShiftExtend(getTok());
5294 }
5295 case AsmToken::Integer:
5296 case AsmToken::Real:
5297 case AsmToken::Hash: {
5298 // #42 -> immediate.
5299 S = getLoc();
5300
5301 parseOptionalToken(T: AsmToken::Hash);
5302
5303 // Parse a negative sign
5304 bool isNegative = false;
5305 if (getTok().is(K: AsmToken::Minus)) {
5306 isNegative = true;
5307 // We need to consume this token only when we have a Real, otherwise
5308 // we let parseSymbolicImmVal take care of it
5309 if (Parser.getLexer().peekTok().is(K: AsmToken::Real))
5310 Lex();
5311 }
5312
5313 // The only Real that should come through here is a literal #0.0 for
5314 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
5315 // so convert the value.
5316 const AsmToken &Tok = getTok();
5317 if (Tok.is(K: AsmToken::Real)) {
5318 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
5319 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5320 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
5321 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
5322 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
5323 return TokError(Msg: "unexpected floating point literal");
5324 else if (IntVal != 0 || isNegative)
5325 return TokError(Msg: "expected floating-point constant #0.0");
5326 Lex(); // Eat the token.
5327
5328 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: "#0", S, Ctx&: getContext()));
5329 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: ".0", S, Ctx&: getContext()));
5330 return false;
5331 }
5332
5333 const MCExpr *ImmVal;
5334 if (parseSymbolicImmVal(ImmVal))
5335 return true;
5336
5337 E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
5338 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: ImmVal, S, E, Ctx&: getContext()));
5339
5340 // Parse an optional shift/extend modifier.
5341 return parseOptionalShiftExtend(Tok);
5342 }
5343 case AsmToken::Equal: {
5344 SMLoc Loc = getLoc();
5345 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
5346 return TokError(Msg: "unexpected token in operand");
5347 Lex(); // Eat '='
5348 const MCExpr *SubExprVal;
5349 if (getParser().parseExpression(Res&: SubExprVal))
5350 return true;
5351
5352 if (Operands.size() < 2 ||
5353 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
5354 return Error(L: Loc, Msg: "Only valid when first operand is register");
5355
5356 bool IsXReg =
5357 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5358 Reg: Operands[1]->getReg());
5359
5360 MCContext& Ctx = getContext();
5361 E = SMLoc::getFromPointer(Ptr: Loc.getPointer() - 1);
5362 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
5363 if (isa<MCConstantExpr>(Val: SubExprVal)) {
5364 uint64_t Imm = (cast<MCConstantExpr>(Val: SubExprVal))->getValue();
5365 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
5366 while (Imm > 0xFFFF && llvm::countr_zero(Val: Imm) >= 16) {
5367 ShiftAmt += 16;
5368 Imm >>= 16;
5369 }
5370 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
5371 Operands[0] = AArch64Operand::CreateToken(Str: "movz", S: Loc, Ctx);
5372 Operands.push_back(Elt: AArch64Operand::CreateImm(
5373 Val: MCConstantExpr::create(Value: Imm, Ctx), S, E, Ctx));
5374 if (ShiftAmt)
5375 Operands.push_back(Elt: AArch64Operand::CreateShiftExtend(ShOp: AArch64_AM::LSL,
5376 Val: ShiftAmt, HasExplicitAmount: true, S, E, Ctx));
5377 return false;
5378 }
5379 APInt Simm = APInt(64, Imm << ShiftAmt);
5380 // check if the immediate is an unsigned or signed 32-bit int for W regs
5381 if (!IsXReg && !(Simm.isIntN(N: 32) || Simm.isSignedIntN(N: 32)))
5382 return Error(L: Loc, Msg: "Immediate too large for register");
5383 }
5384 // If it is a label or an imm that cannot fit in a movz, put it into CP.
5385 const MCExpr *CPLoc =
5386 getTargetStreamer().addConstantPoolEntry(SubExprVal, Size: IsXReg ? 8 : 4, Loc);
5387 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: CPLoc, S, E, Ctx));
5388 return false;
5389 }
5390 }
5391}
5392
5393bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
5394 const MCExpr *Expr = nullptr;
5395 SMLoc L = getLoc();
5396 if (check(P: getParser().parseExpression(Res&: Expr), Loc: L, Msg: "expected expression"))
5397 return true;
5398 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Val: Expr);
5399 if (check(P: !Value, Loc: L, Msg: "expected constant expression"))
5400 return true;
5401 Out = Value->getValue();
5402 return false;
5403}
5404
5405bool AArch64AsmParser::parseComma() {
5406 if (check(P: getTok().isNot(K: AsmToken::Comma), Loc: getLoc(), Msg: "expected comma"))
5407 return true;
5408 // Eat the comma
5409 Lex();
5410 return false;
5411}
5412
5413bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
5414 unsigned First, unsigned Last) {
5415 MCRegister Reg;
5416 SMLoc Start, End;
5417 if (check(P: parseRegister(Reg, StartLoc&: Start, EndLoc&: End), Loc: getLoc(), Msg: "expected register"))
5418 return true;
5419
5420 // Special handling for FP and LR; they aren't linearly after x28 in
5421 // the registers enum.
5422 unsigned RangeEnd = Last;
5423 if (Base == AArch64::X0) {
5424 if (Last == AArch64::FP) {
5425 RangeEnd = AArch64::X28;
5426 if (Reg == AArch64::FP) {
5427 Out = 29;
5428 return false;
5429 }
5430 }
5431 if (Last == AArch64::LR) {
5432 RangeEnd = AArch64::X28;
5433 if (Reg == AArch64::FP) {
5434 Out = 29;
5435 return false;
5436 } else if (Reg == AArch64::LR) {
5437 Out = 30;
5438 return false;
5439 }
5440 }
5441 }
5442
5443 if (check(P: Reg < First || Reg > RangeEnd, Loc: Start,
5444 Msg: Twine("expected register in range ") +
5445 AArch64InstPrinter::getRegisterName(Reg: First) + " to " +
5446 AArch64InstPrinter::getRegisterName(Reg: Last)))
5447 return true;
5448 Out = Reg - Base;
5449 return false;
5450}
5451
5452bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1,
5453 const MCParsedAsmOperand &Op2) const {
5454 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
5455 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
5456
5457 if (AOp1.isVectorList() && AOp2.isVectorList())
5458 return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
5459 AOp1.getVectorListStart() == AOp2.getVectorListStart() &&
5460 AOp1.getVectorListStride() == AOp2.getVectorListStride();
5461
5462 if (!AOp1.isReg() || !AOp2.isReg())
5463 return false;
5464
5465 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
5466 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
5467 return MCTargetAsmParser::areEqualRegs(Op1, Op2);
5468
5469 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
5470 "Testing equality of non-scalar registers not supported");
5471
5472 // Check if a registers match their sub/super register classes.
5473 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
5474 return getXRegFromWReg(Reg: Op1.getReg()) == Op2.getReg();
5475 if (AOp1.getRegEqualityTy() == EqualsSubReg)
5476 return getWRegFromXReg(Reg: Op1.getReg()) == Op2.getReg();
5477 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
5478 return getXRegFromWReg(Reg: Op2.getReg()) == Op1.getReg();
5479 if (AOp2.getRegEqualityTy() == EqualsSubReg)
5480 return getWRegFromXReg(Reg: Op2.getReg()) == Op1.getReg();
5481
5482 return false;
5483}
5484
5485/// Parse an AArch64 instruction mnemonic followed by its operands.
5486bool AArch64AsmParser::parseInstruction(ParseInstructionInfo &Info,
5487 StringRef Name, SMLoc NameLoc,
5488 OperandVector &Operands) {
5489 Name = StringSwitch<StringRef>(Name.lower())
5490 .Case(S: "beq", Value: "b.eq")
5491 .Case(S: "bne", Value: "b.ne")
5492 .Case(S: "bhs", Value: "b.hs")
5493 .Case(S: "bcs", Value: "b.cs")
5494 .Case(S: "blo", Value: "b.lo")
5495 .Case(S: "bcc", Value: "b.cc")
5496 .Case(S: "bmi", Value: "b.mi")
5497 .Case(S: "bpl", Value: "b.pl")
5498 .Case(S: "bvs", Value: "b.vs")
5499 .Case(S: "bvc", Value: "b.vc")
5500 .Case(S: "bhi", Value: "b.hi")
5501 .Case(S: "bls", Value: "b.ls")
5502 .Case(S: "bge", Value: "b.ge")
5503 .Case(S: "blt", Value: "b.lt")
5504 .Case(S: "bgt", Value: "b.gt")
5505 .Case(S: "ble", Value: "b.le")
5506 .Case(S: "bal", Value: "b.al")
5507 .Case(S: "bnv", Value: "b.nv")
5508 .Default(Value: Name);
5509
5510 // First check for the AArch64-specific .req directive.
5511 if (getTok().is(K: AsmToken::Identifier) &&
5512 getTok().getIdentifier().lower() == ".req") {
5513 parseDirectiveReq(Name, L: NameLoc);
5514 // We always return 'error' for this, as we're done with this
5515 // statement and don't need to match the 'instruction."
5516 return true;
5517 }
5518
5519 // Create the leading tokens for the mnemonic, split by '.' characters.
5520 size_t Start = 0, Next = Name.find(C: '.');
5521 StringRef Head = Name.slice(Start, End: Next);
5522
5523 // IC, DC, AT, TLBI, MLBI, PLBI, GIC{R}, GSB and Prediction invalidation
5524 // instructions are aliases for the SYS instruction.
5525 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
5526 Head == "cfp" || Head == "dvp" || Head == "cpp" || Head == "cosp" ||
5527 Head == "mlbi" || Head == "plbi" || Head == "gic" || Head == "gsb")
5528 return parseSysAlias(Name: Head, NameLoc, Operands);
5529
5530 // GICR instructions are aliases for the SYSL instruction.
5531 if (Head == "gicr")
5532 return parseSyslAlias(Name: Head, NameLoc, Operands);
5533
5534 // TLBIP instructions are aliases for the SYSP instruction.
5535 if (Head == "tlbip")
5536 return parseSyspAlias(Name: Head, NameLoc, Operands);
5537
5538 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: Head, S: NameLoc, Ctx&: getContext()));
5539 Mnemonic = Head;
5540
5541 // Handle condition codes for a branch mnemonic
5542 if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
5543 Start = Next;
5544 Next = Name.find(C: '.', From: Start + 1);
5545 Head = Name.slice(Start: Start + 1, End: Next);
5546
5547 SMLoc SuffixLoc = SMLoc::getFromPointer(Ptr: NameLoc.getPointer() +
5548 (Head.data() - Name.data()));
5549 std::string Suggestion;
5550 AArch64CC::CondCode CC = parseCondCodeString(Cond: Head, Suggestion);
5551 if (CC == AArch64CC::Invalid) {
5552 std::string Msg = "invalid condition code";
5553 if (!Suggestion.empty())
5554 Msg += ", did you mean " + Suggestion + "?";
5555 return Error(L: SuffixLoc, Msg);
5556 }
5557 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: ".", S: SuffixLoc, Ctx&: getContext(),
5558 /*IsSuffix=*/true));
5559 Operands.push_back(
5560 Elt: AArch64Operand::CreateCondCode(Code: CC, S: NameLoc, E: NameLoc, Ctx&: getContext()));
5561 }
5562
5563 // Add the remaining tokens in the mnemonic.
5564 while (Next != StringRef::npos) {
5565 Start = Next;
5566 Next = Name.find(C: '.', From: Start + 1);
5567 Head = Name.slice(Start, End: Next);
5568 SMLoc SuffixLoc = SMLoc::getFromPointer(Ptr: NameLoc.getPointer() +
5569 (Head.data() - Name.data()) + 1);
5570 Operands.push_back(Elt: AArch64Operand::CreateToken(
5571 Str: Head, S: SuffixLoc, Ctx&: getContext(), /*IsSuffix=*/true));
5572 }
5573
5574 // Conditional compare instructions have a Condition Code operand, which needs
5575 // to be parsed and an immediate operand created.
5576 bool condCodeFourthOperand =
5577 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
5578 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
5579 Head == "csinc" || Head == "csinv" || Head == "csneg");
5580
5581 // These instructions are aliases to some of the conditional select
5582 // instructions. However, the condition code is inverted in the aliased
5583 // instruction.
5584 //
5585 // FIXME: Is this the correct way to handle these? Or should the parser
5586 // generate the aliased instructions directly?
5587 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
5588 bool condCodeThirdOperand =
5589 (Head == "cinc" || Head == "cinv" || Head == "cneg");
5590
5591 // Read the remaining operands.
5592 if (getLexer().isNot(K: AsmToken::EndOfStatement)) {
5593
5594 unsigned N = 1;
5595 do {
5596 // Parse and remember the operand.
5597 if (parseOperand(Operands, isCondCode: (N == 4 && condCodeFourthOperand) ||
5598 (N == 3 && condCodeThirdOperand) ||
5599 (N == 2 && condCodeSecondOperand),
5600 invertCondCode: condCodeSecondOperand || condCodeThirdOperand)) {
5601 return true;
5602 }
5603
5604 // After successfully parsing some operands there are three special cases
5605 // to consider (i.e. notional operands not separated by commas). Two are
5606 // due to memory specifiers:
5607 // + An RBrac will end an address for load/store/prefetch
5608 // + An '!' will indicate a pre-indexed operation.
5609 //
5610 // And a further case is '}', which ends a group of tokens specifying the
5611 // SME accumulator array 'ZA' or tile vector, i.e.
5612 //
5613 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
5614 //
5615 // It's someone else's responsibility to make sure these tokens are sane
5616 // in the given context!
5617
5618 if (parseOptionalToken(T: AsmToken::RBrac))
5619 Operands.push_back(
5620 Elt: AArch64Operand::CreateToken(Str: "]", S: getLoc(), Ctx&: getContext()));
5621 if (parseOptionalToken(T: AsmToken::Exclaim))
5622 Operands.push_back(
5623 Elt: AArch64Operand::CreateToken(Str: "!", S: getLoc(), Ctx&: getContext()));
5624 if (parseOptionalToken(T: AsmToken::RCurly))
5625 Operands.push_back(
5626 Elt: AArch64Operand::CreateToken(Str: "}", S: getLoc(), Ctx&: getContext()));
5627
5628 ++N;
5629 } while (parseOptionalToken(T: AsmToken::Comma));
5630 }
5631
5632 if (parseToken(T: AsmToken::EndOfStatement, Msg: "unexpected token in argument list"))
5633 return true;
5634
5635 return false;
5636}
5637
5638static inline bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg) {
5639 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
5640 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
5641 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
5642 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
5643 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
5644 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
5645 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
5646}
5647
5648// FIXME: This entire function is a giant hack to provide us with decent
5649// operand range validation/diagnostics until TableGen/MC can be extended
5650// to support autogeneration of this kind of validation.
5651bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
5652 SmallVectorImpl<SMLoc> &Loc) {
5653 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5654 const MCInstrDesc &MCID = MII.get(Opcode: Inst.getOpcode());
5655
5656 // A prefix only applies to the instruction following it. Here we extract
5657 // prefix information for the next instruction before validating the current
5658 // one so that in the case of failure we don't erroneously continue using the
5659 // current prefix.
5660 PrefixInfo Prefix = NextPrefix;
5661 NextPrefix = PrefixInfo::CreateFromInst(Inst, TSFlags: MCID.TSFlags);
5662
5663 // Before validating the instruction in isolation we run through the rules
5664 // applicable when it follows a prefix instruction.
5665 // NOTE: brk & hlt can be prefixed but require no additional validation.
5666 if (Prefix.isActive() &&
5667 (Inst.getOpcode() != AArch64::BRK) &&
5668 (Inst.getOpcode() != AArch64::HLT)) {
5669
5670 // Prefixed instructions must have a destructive operand.
5671 if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
5672 AArch64::NotDestructive)
5673 return Error(L: IDLoc, Msg: "instruction is unpredictable when following a"
5674 " movprfx, suggest replacing movprfx with mov");
5675
5676 // Destination operands must match.
5677 if (Inst.getOperand(i: 0).getReg() != Prefix.getDstReg())
5678 return Error(L: Loc[0], Msg: "instruction is unpredictable when following a"
5679 " movprfx writing to a different destination");
5680
5681 // Destination operand must not be used in any other location.
5682 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
5683 if (Inst.getOperand(i).isReg() &&
5684 (MCID.getOperandConstraint(OpNum: i, Constraint: MCOI::TIED_TO) == -1) &&
5685 isMatchingOrAlias(ZReg: Prefix.getDstReg(), Reg: Inst.getOperand(i).getReg()))
5686 return Error(L: Loc[0], Msg: "instruction is unpredictable when following a"
5687 " movprfx and destination also used as non-destructive"
5688 " source");
5689 }
5690
5691 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
5692 if (Prefix.isPredicated()) {
5693 int PgIdx = -1;
5694
5695 // Find the instructions general predicate.
5696 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
5697 if (Inst.getOperand(i).isReg() &&
5698 PPRRegClass.contains(Reg: Inst.getOperand(i).getReg())) {
5699 PgIdx = i;
5700 break;
5701 }
5702
5703 // Instruction must be predicated if the movprfx is predicated.
5704 if (PgIdx == -1 ||
5705 (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
5706 return Error(L: IDLoc, Msg: "instruction is unpredictable when following a"
5707 " predicated movprfx, suggest using unpredicated movprfx");
5708
5709 // Instruction must use same general predicate as the movprfx.
5710 if (Inst.getOperand(i: PgIdx).getReg() != Prefix.getPgReg())
5711 return Error(L: IDLoc, Msg: "instruction is unpredictable when following a"
5712 " predicated movprfx using a different general predicate");
5713
5714 // Instruction element type must match the movprfx.
5715 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
5716 return Error(L: IDLoc, Msg: "instruction is unpredictable when following a"
5717 " predicated movprfx with a different element size");
5718 }
5719 }
5720
5721 // On ARM64EC, only valid registers may be used. Warn against using
5722 // explicitly disallowed registers.
5723 if (IsWindowsArm64EC) {
5724 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) {
5725 if (Inst.getOperand(i).isReg()) {
5726 MCRegister Reg = Inst.getOperand(i).getReg();
5727 // At this point, vector registers are matched to their
5728 // appropriately sized alias.
5729 if ((Reg == AArch64::W13 || Reg == AArch64::X13) ||
5730 (Reg == AArch64::W14 || Reg == AArch64::X14) ||
5731 (Reg == AArch64::W23 || Reg == AArch64::X23) ||
5732 (Reg == AArch64::W24 || Reg == AArch64::X24) ||
5733 (Reg == AArch64::W28 || Reg == AArch64::X28) ||
5734 (Reg >= AArch64::Q16 && Reg <= AArch64::Q31) ||
5735 (Reg >= AArch64::D16 && Reg <= AArch64::D31) ||
5736 (Reg >= AArch64::S16 && Reg <= AArch64::S31) ||
5737 (Reg >= AArch64::H16 && Reg <= AArch64::H31) ||
5738 (Reg >= AArch64::B16 && Reg <= AArch64::B31)) {
5739 Warning(L: IDLoc, Msg: "register " + Twine(RI->getName(RegNo: Reg)) +
5740 " is disallowed on ARM64EC.");
5741 }
5742 }
5743 }
5744 }
5745
5746 // Check for indexed addressing modes w/ the base register being the
5747 // same as a destination/source register or pair load where
5748 // the Rt == Rt2. All of those are undefined behaviour.
5749 switch (Inst.getOpcode()) {
5750 case AArch64::LDPSWpre:
5751 case AArch64::LDPWpost:
5752 case AArch64::LDPWpre:
5753 case AArch64::LDPXpost:
5754 case AArch64::LDPXpre: {
5755 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5756 MCRegister Rt2 = Inst.getOperand(i: 2).getReg();
5757 MCRegister Rn = Inst.getOperand(i: 3).getReg();
5758 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt))
5759 return Error(L: Loc[0], Msg: "unpredictable LDP instruction, writeback base "
5760 "is also a destination");
5761 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt2))
5762 return Error(L: Loc[1], Msg: "unpredictable LDP instruction, writeback base "
5763 "is also a destination");
5764 [[fallthrough]];
5765 }
5766 case AArch64::LDR_ZA:
5767 case AArch64::STR_ZA: {
5768 if (Inst.getOperand(i: 2).isImm() && Inst.getOperand(i: 4).isImm() &&
5769 Inst.getOperand(i: 2).getImm() != Inst.getOperand(i: 4).getImm())
5770 return Error(L: Loc[1],
5771 Msg: "unpredictable instruction, immediate and offset mismatch.");
5772 break;
5773 }
5774 case AArch64::LDPDi:
5775 case AArch64::LDPQi:
5776 case AArch64::LDPSi:
5777 case AArch64::LDPSWi:
5778 case AArch64::LDPWi:
5779 case AArch64::LDPXi: {
5780 MCRegister Rt = Inst.getOperand(i: 0).getReg();
5781 MCRegister Rt2 = Inst.getOperand(i: 1).getReg();
5782 if (Rt == Rt2)
5783 return Error(L: Loc[1], Msg: "unpredictable LDP instruction, Rt2==Rt");
5784 break;
5785 }
5786 case AArch64::LDPDpost:
5787 case AArch64::LDPDpre:
5788 case AArch64::LDPQpost:
5789 case AArch64::LDPQpre:
5790 case AArch64::LDPSpost:
5791 case AArch64::LDPSpre:
5792 case AArch64::LDPSWpost: {
5793 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5794 MCRegister Rt2 = Inst.getOperand(i: 2).getReg();
5795 if (Rt == Rt2)
5796 return Error(L: Loc[1], Msg: "unpredictable LDP instruction, Rt2==Rt");
5797 break;
5798 }
5799 case AArch64::STPDpost:
5800 case AArch64::STPDpre:
5801 case AArch64::STPQpost:
5802 case AArch64::STPQpre:
5803 case AArch64::STPSpost:
5804 case AArch64::STPSpre:
5805 case AArch64::STPWpost:
5806 case AArch64::STPWpre:
5807 case AArch64::STPXpost:
5808 case AArch64::STPXpre: {
5809 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5810 MCRegister Rt2 = Inst.getOperand(i: 2).getReg();
5811 MCRegister Rn = Inst.getOperand(i: 3).getReg();
5812 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt))
5813 return Error(L: Loc[0], Msg: "unpredictable STP instruction, writeback base "
5814 "is also a source");
5815 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt2))
5816 return Error(L: Loc[1], Msg: "unpredictable STP instruction, writeback base "
5817 "is also a source");
5818 break;
5819 }
5820 case AArch64::LDRBBpre:
5821 case AArch64::LDRBpre:
5822 case AArch64::LDRHHpre:
5823 case AArch64::LDRHpre:
5824 case AArch64::LDRSBWpre:
5825 case AArch64::LDRSBXpre:
5826 case AArch64::LDRSHWpre:
5827 case AArch64::LDRSHXpre:
5828 case AArch64::LDRSWpre:
5829 case AArch64::LDRWpre:
5830 case AArch64::LDRXpre:
5831 case AArch64::LDRBBpost:
5832 case AArch64::LDRBpost:
5833 case AArch64::LDRHHpost:
5834 case AArch64::LDRHpost:
5835 case AArch64::LDRSBWpost:
5836 case AArch64::LDRSBXpost:
5837 case AArch64::LDRSHWpost:
5838 case AArch64::LDRSHXpost:
5839 case AArch64::LDRSWpost:
5840 case AArch64::LDRWpost:
5841 case AArch64::LDRXpost: {
5842 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5843 MCRegister Rn = Inst.getOperand(i: 2).getReg();
5844 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt))
5845 return Error(L: Loc[0], Msg: "unpredictable LDR instruction, writeback base "
5846 "is also a source");
5847 break;
5848 }
5849 case AArch64::STRBBpost:
5850 case AArch64::STRBpost:
5851 case AArch64::STRHHpost:
5852 case AArch64::STRHpost:
5853 case AArch64::STRWpost:
5854 case AArch64::STRXpost:
5855 case AArch64::STRBBpre:
5856 case AArch64::STRBpre:
5857 case AArch64::STRHHpre:
5858 case AArch64::STRHpre:
5859 case AArch64::STRWpre:
5860 case AArch64::STRXpre: {
5861 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5862 MCRegister Rn = Inst.getOperand(i: 2).getReg();
5863 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt))
5864 return Error(L: Loc[0], Msg: "unpredictable STR instruction, writeback base "
5865 "is also a source");
5866 break;
5867 }
5868 case AArch64::STXRB:
5869 case AArch64::STXRH:
5870 case AArch64::STXRW:
5871 case AArch64::STXRX:
5872 case AArch64::STLXRB:
5873 case AArch64::STLXRH:
5874 case AArch64::STLXRW:
5875 case AArch64::STLXRX: {
5876 MCRegister Rs = Inst.getOperand(i: 0).getReg();
5877 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5878 MCRegister Rn = Inst.getOperand(i: 2).getReg();
5879 if (RI->isSubRegisterEq(RegA: Rt, RegB: Rs) ||
5880 (RI->isSubRegisterEq(RegA: Rn, RegB: Rs) && Rn != AArch64::SP))
5881 return Error(L: Loc[0],
5882 Msg: "unpredictable STXR instruction, status is also a source");
5883 break;
5884 }
5885 case AArch64::STXPW:
5886 case AArch64::STXPX:
5887 case AArch64::STLXPW:
5888 case AArch64::STLXPX: {
5889 MCRegister Rs = Inst.getOperand(i: 0).getReg();
5890 MCRegister Rt1 = Inst.getOperand(i: 1).getReg();
5891 MCRegister Rt2 = Inst.getOperand(i: 2).getReg();
5892 MCRegister Rn = Inst.getOperand(i: 3).getReg();
5893 if (RI->isSubRegisterEq(RegA: Rt1, RegB: Rs) || RI->isSubRegisterEq(RegA: Rt2, RegB: Rs) ||
5894 (RI->isSubRegisterEq(RegA: Rn, RegB: Rs) && Rn != AArch64::SP))
5895 return Error(L: Loc[0],
5896 Msg: "unpredictable STXP instruction, status is also a source");
5897 break;
5898 }
5899 case AArch64::LDRABwriteback:
5900 case AArch64::LDRAAwriteback: {
5901 MCRegister Xt = Inst.getOperand(i: 0).getReg();
5902 MCRegister Xn = Inst.getOperand(i: 1).getReg();
5903 if (Xt == Xn)
5904 return Error(L: Loc[0],
5905 Msg: "unpredictable LDRA instruction, writeback base"
5906 " is also a destination");
5907 break;
5908 }
5909 }
5910
5911 // Check v8.8-A memops instructions.
5912 switch (Inst.getOpcode()) {
5913 case AArch64::CPYFP:
5914 case AArch64::CPYFPWN:
5915 case AArch64::CPYFPRN:
5916 case AArch64::CPYFPN:
5917 case AArch64::CPYFPWT:
5918 case AArch64::CPYFPWTWN:
5919 case AArch64::CPYFPWTRN:
5920 case AArch64::CPYFPWTN:
5921 case AArch64::CPYFPRT:
5922 case AArch64::CPYFPRTWN:
5923 case AArch64::CPYFPRTRN:
5924 case AArch64::CPYFPRTN:
5925 case AArch64::CPYFPT:
5926 case AArch64::CPYFPTWN:
5927 case AArch64::CPYFPTRN:
5928 case AArch64::CPYFPTN:
5929 case AArch64::CPYFM:
5930 case AArch64::CPYFMWN:
5931 case AArch64::CPYFMRN:
5932 case AArch64::CPYFMN:
5933 case AArch64::CPYFMWT:
5934 case AArch64::CPYFMWTWN:
5935 case AArch64::CPYFMWTRN:
5936 case AArch64::CPYFMWTN:
5937 case AArch64::CPYFMRT:
5938 case AArch64::CPYFMRTWN:
5939 case AArch64::CPYFMRTRN:
5940 case AArch64::CPYFMRTN:
5941 case AArch64::CPYFMT:
5942 case AArch64::CPYFMTWN:
5943 case AArch64::CPYFMTRN:
5944 case AArch64::CPYFMTN:
5945 case AArch64::CPYFE:
5946 case AArch64::CPYFEWN:
5947 case AArch64::CPYFERN:
5948 case AArch64::CPYFEN:
5949 case AArch64::CPYFEWT:
5950 case AArch64::CPYFEWTWN:
5951 case AArch64::CPYFEWTRN:
5952 case AArch64::CPYFEWTN:
5953 case AArch64::CPYFERT:
5954 case AArch64::CPYFERTWN:
5955 case AArch64::CPYFERTRN:
5956 case AArch64::CPYFERTN:
5957 case AArch64::CPYFET:
5958 case AArch64::CPYFETWN:
5959 case AArch64::CPYFETRN:
5960 case AArch64::CPYFETN:
5961 case AArch64::CPYP:
5962 case AArch64::CPYPWN:
5963 case AArch64::CPYPRN:
5964 case AArch64::CPYPN:
5965 case AArch64::CPYPWT:
5966 case AArch64::CPYPWTWN:
5967 case AArch64::CPYPWTRN:
5968 case AArch64::CPYPWTN:
5969 case AArch64::CPYPRT:
5970 case AArch64::CPYPRTWN:
5971 case AArch64::CPYPRTRN:
5972 case AArch64::CPYPRTN:
5973 case AArch64::CPYPT:
5974 case AArch64::CPYPTWN:
5975 case AArch64::CPYPTRN:
5976 case AArch64::CPYPTN:
5977 case AArch64::CPYM:
5978 case AArch64::CPYMWN:
5979 case AArch64::CPYMRN:
5980 case AArch64::CPYMN:
5981 case AArch64::CPYMWT:
5982 case AArch64::CPYMWTWN:
5983 case AArch64::CPYMWTRN:
5984 case AArch64::CPYMWTN:
5985 case AArch64::CPYMRT:
5986 case AArch64::CPYMRTWN:
5987 case AArch64::CPYMRTRN:
5988 case AArch64::CPYMRTN:
5989 case AArch64::CPYMT:
5990 case AArch64::CPYMTWN:
5991 case AArch64::CPYMTRN:
5992 case AArch64::CPYMTN:
5993 case AArch64::CPYE:
5994 case AArch64::CPYEWN:
5995 case AArch64::CPYERN:
5996 case AArch64::CPYEN:
5997 case AArch64::CPYEWT:
5998 case AArch64::CPYEWTWN:
5999 case AArch64::CPYEWTRN:
6000 case AArch64::CPYEWTN:
6001 case AArch64::CPYERT:
6002 case AArch64::CPYERTWN:
6003 case AArch64::CPYERTRN:
6004 case AArch64::CPYERTN:
6005 case AArch64::CPYET:
6006 case AArch64::CPYETWN:
6007 case AArch64::CPYETRN:
6008 case AArch64::CPYETN: {
6009 // Xd_wb == op0, Xs_wb == op1, Xn_wb == op2
6010 MCRegister Xd = Inst.getOperand(i: 3).getReg();
6011 MCRegister Xs = Inst.getOperand(i: 4).getReg();
6012 MCRegister Xn = Inst.getOperand(i: 5).getReg();
6013
6014 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6015 assert(Xs == Inst.getOperand(1).getReg() && "Xs_wb and Xs do not match");
6016 assert(Xn == Inst.getOperand(2).getReg() && "Xn_wb and Xn do not match");
6017
6018 if (Xd == Xs)
6019 return Error(L: Loc[0], Msg: "invalid CPY instruction, destination and source"
6020 " registers are the same");
6021 if (Xd == Xn)
6022 return Error(L: Loc[0], Msg: "invalid CPY instruction, destination and size"
6023 " registers are the same");
6024 if (Xs == Xn)
6025 return Error(L: Loc[0], Msg: "invalid CPY instruction, source and size"
6026 " registers are the same");
6027 break;
6028 }
6029 case AArch64::SETP:
6030 case AArch64::SETPT:
6031 case AArch64::SETPN:
6032 case AArch64::SETPTN:
6033 case AArch64::SETM:
6034 case AArch64::SETMT:
6035 case AArch64::SETMN:
6036 case AArch64::SETMTN:
6037 case AArch64::SETE:
6038 case AArch64::SETET:
6039 case AArch64::SETEN:
6040 case AArch64::SETETN:
6041 case AArch64::SETGP:
6042 case AArch64::SETGPT:
6043 case AArch64::SETGPN:
6044 case AArch64::SETGPTN:
6045 case AArch64::SETGM:
6046 case AArch64::SETGMT:
6047 case AArch64::SETGMN:
6048 case AArch64::SETGMTN:
6049 case AArch64::MOPSSETGE:
6050 case AArch64::MOPSSETGET:
6051 case AArch64::MOPSSETGEN:
6052 case AArch64::MOPSSETGETN: {
6053 // Xd_wb == op0, Xn_wb == op1
6054 MCRegister Xd = Inst.getOperand(i: 2).getReg();
6055 MCRegister Xn = Inst.getOperand(i: 3).getReg();
6056 MCRegister Xm = Inst.getOperand(i: 4).getReg();
6057
6058 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6059 assert(Xn == Inst.getOperand(1).getReg() && "Xn_wb and Xn do not match");
6060
6061 if (Xd == Xn)
6062 return Error(L: Loc[0], Msg: "invalid SET instruction, destination and size"
6063 " registers are the same");
6064 if (Xd == Xm)
6065 return Error(L: Loc[0], Msg: "invalid SET instruction, destination and source"
6066 " registers are the same");
6067 if (Xn == Xm)
6068 return Error(L: Loc[0], Msg: "invalid SET instruction, source and size"
6069 " registers are the same");
6070 break;
6071 }
6072 case AArch64::SETGOP:
6073 case AArch64::SETGOPT:
6074 case AArch64::SETGOPN:
6075 case AArch64::SETGOPTN:
6076 case AArch64::SETGOM:
6077 case AArch64::SETGOMT:
6078 case AArch64::SETGOMN:
6079 case AArch64::SETGOMTN:
6080 case AArch64::SETGOE:
6081 case AArch64::SETGOET:
6082 case AArch64::SETGOEN:
6083 case AArch64::SETGOETN: {
6084 // Xd_wb == op0, Xn_wb == op1
6085 MCRegister Xd = Inst.getOperand(i: 2).getReg();
6086 MCRegister Xn = Inst.getOperand(i: 3).getReg();
6087
6088 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6089 assert(Xn == Inst.getOperand(1).getReg() && "Xn_wb and Xn do not match");
6090
6091 if (Xd == Xn)
6092 return Error(L: Loc[0], Msg: "invalid SET instruction, destination and size"
6093 " registers are the same");
6094 break;
6095 }
6096 }
6097
6098 // Now check immediate ranges. Separate from the above as there is overlap
6099 // in the instructions being checked and this keeps the nested conditionals
6100 // to a minimum.
6101 switch (Inst.getOpcode()) {
6102 case AArch64::ADDSWri:
6103 case AArch64::ADDSXri:
6104 case AArch64::ADDWri:
6105 case AArch64::ADDXri:
6106 case AArch64::SUBSWri:
6107 case AArch64::SUBSXri:
6108 case AArch64::SUBWri:
6109 case AArch64::SUBXri: {
6110 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
6111 // some slight duplication here.
6112 if (Inst.getOperand(i: 2).isExpr()) {
6113 const MCExpr *Expr = Inst.getOperand(i: 2).getExpr();
6114 AArch64::Specifier ELFSpec;
6115 AArch64::Specifier DarwinSpec;
6116 int64_t Addend;
6117 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
6118
6119 // Only allow these with ADDXri.
6120 if ((DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
6121 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF) &&
6122 Inst.getOpcode() == AArch64::ADDXri)
6123 return false;
6124
6125 // Only allow these with ADDXri/ADDWri
6126 if (llvm::is_contained(
6127 Set: {AArch64::S_LO12, AArch64::S_GOT_AUTH_LO12,
6128 AArch64::S_DTPREL_HI12, AArch64::S_DTPREL_LO12,
6129 AArch64::S_DTPREL_LO12_NC, AArch64::S_TPREL_HI12,
6130 AArch64::S_TPREL_LO12, AArch64::S_TPREL_LO12_NC,
6131 AArch64::S_TLSDESC_LO12, AArch64::S_TLSDESC_AUTH_LO12,
6132 AArch64::S_SECREL_LO12, AArch64::S_SECREL_HI12},
6133 Element: ELFSpec) &&
6134 (Inst.getOpcode() == AArch64::ADDXri ||
6135 Inst.getOpcode() == AArch64::ADDWri))
6136 return false;
6137
6138 // Don't allow symbol refs in the immediate field otherwise
6139 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
6140 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
6141 // 'cmp w0, 'borked')
6142 return Error(L: Loc.back(), Msg: "invalid immediate expression");
6143 }
6144 // We don't validate more complex expressions here
6145 }
6146 return false;
6147 }
6148 default:
6149 return false;
6150 }
6151}
6152
6153static std::string AArch64MnemonicSpellCheck(StringRef S,
6154 const FeatureBitset &FBS,
6155 unsigned VariantID = 0);
6156
6157bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
6158 uint64_t ErrorInfo,
6159 OperandVector &Operands) {
6160 switch (ErrCode) {
6161 case Match_InvalidTiedOperand: {
6162 auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]);
6163 if (Op.isVectorList())
6164 return Error(L: Loc, Msg: "operand must match destination register list");
6165
6166 assert(Op.isReg() && "Unexpected operand type");
6167 switch (Op.getRegEqualityTy()) {
6168 case RegConstraintEqualityTy::EqualsSubReg:
6169 return Error(L: Loc, Msg: "operand must be 64-bit form of destination register");
6170 case RegConstraintEqualityTy::EqualsSuperReg:
6171 return Error(L: Loc, Msg: "operand must be 32-bit form of destination register");
6172 case RegConstraintEqualityTy::EqualsReg:
6173 return Error(L: Loc, Msg: "operand must match destination register");
6174 }
6175 llvm_unreachable("Unknown RegConstraintEqualityTy");
6176 }
6177 case Match_MissingFeature:
6178 return Error(L: Loc,
6179 Msg: "instruction requires a CPU feature not currently enabled");
6180 case Match_InvalidOperand:
6181 return Error(L: Loc, Msg: "invalid operand for instruction");
6182 case Match_InvalidSuffix:
6183 return Error(L: Loc, Msg: "invalid type suffix for instruction");
6184 case Match_InvalidCondCode:
6185 return Error(L: Loc, Msg: "expected AArch64 condition code");
6186 case Match_AddSubRegExtendSmall:
6187 return Error(L: Loc,
6188 Msg: "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
6189 case Match_AddSubRegExtendLarge:
6190 return Error(L: Loc,
6191 Msg: "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
6192 case Match_AddSubSecondSource:
6193 return Error(L: Loc,
6194 Msg: "expected compatible register, symbol or integer in range [0, 4095]");
6195 case Match_LogicalSecondSource:
6196 return Error(L: Loc, Msg: "expected compatible register or logical immediate");
6197 case Match_InvalidMovImm32Shift:
6198 return Error(L: Loc, Msg: "expected 'lsl' with optional integer 0 or 16");
6199 case Match_InvalidMovImm64Shift:
6200 return Error(L: Loc, Msg: "expected 'lsl' with optional integer 0, 16, 32 or 48");
6201 case Match_AddSubRegShift32:
6202 return Error(L: Loc,
6203 Msg: "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
6204 case Match_AddSubRegShift64:
6205 return Error(L: Loc,
6206 Msg: "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
6207 case Match_InvalidFPImm:
6208 return Error(L: Loc,
6209 Msg: "expected compatible register or floating-point constant");
6210 case Match_InvalidMemoryIndexedSImm6:
6211 return Error(L: Loc, Msg: "index must be an integer in range [-32, 31].");
6212 case Match_InvalidMemoryIndexedSImm5:
6213 return Error(L: Loc, Msg: "index must be an integer in range [-16, 15].");
6214 case Match_InvalidMemoryIndexed1SImm4:
6215 return Error(L: Loc, Msg: "index must be an integer in range [-8, 7].");
6216 case Match_InvalidMemoryIndexed2SImm4:
6217 return Error(L: Loc, Msg: "index must be a multiple of 2 in range [-16, 14].");
6218 case Match_InvalidMemoryIndexed3SImm4:
6219 return Error(L: Loc, Msg: "index must be a multiple of 3 in range [-24, 21].");
6220 case Match_InvalidMemoryIndexed4SImm4:
6221 return Error(L: Loc, Msg: "index must be a multiple of 4 in range [-32, 28].");
6222 case Match_InvalidMemoryIndexed16SImm4:
6223 return Error(L: Loc, Msg: "index must be a multiple of 16 in range [-128, 112].");
6224 case Match_InvalidMemoryIndexed32SImm4:
6225 return Error(L: Loc, Msg: "index must be a multiple of 32 in range [-256, 224].");
6226 case Match_InvalidMemoryIndexed1SImm6:
6227 return Error(L: Loc, Msg: "index must be an integer in range [-32, 31].");
6228 case Match_InvalidMemoryIndexedSImm8:
6229 return Error(L: Loc, Msg: "index must be an integer in range [-128, 127].");
6230 case Match_InvalidMemoryIndexedSImm9:
6231 return Error(L: Loc, Msg: "index must be an integer in range [-256, 255].");
6232 case Match_InvalidMemoryIndexed16SImm9:
6233 return Error(L: Loc, Msg: "index must be a multiple of 16 in range [-4096, 4080].");
6234 case Match_InvalidMemoryIndexed8SImm10:
6235 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [-4096, 4088].");
6236 case Match_InvalidMemoryIndexed4SImm7:
6237 return Error(L: Loc, Msg: "index must be a multiple of 4 in range [-256, 252].");
6238 case Match_InvalidMemoryIndexed8SImm7:
6239 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [-512, 504].");
6240 case Match_InvalidMemoryIndexed16SImm7:
6241 return Error(L: Loc, Msg: "index must be a multiple of 16 in range [-1024, 1008].");
6242 case Match_InvalidMemoryIndexed8UImm5:
6243 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [0, 248].");
6244 case Match_InvalidMemoryIndexed8UImm3:
6245 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [0, 56].");
6246 case Match_InvalidMemoryIndexed4UImm5:
6247 return Error(L: Loc, Msg: "index must be a multiple of 4 in range [0, 124].");
6248 case Match_InvalidMemoryIndexed2UImm5:
6249 return Error(L: Loc, Msg: "index must be a multiple of 2 in range [0, 62].");
6250 case Match_InvalidMemoryIndexed8UImm6:
6251 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [0, 504].");
6252 case Match_InvalidMemoryIndexed16UImm6:
6253 return Error(L: Loc, Msg: "index must be a multiple of 16 in range [0, 1008].");
6254 case Match_InvalidMemoryIndexed4UImm6:
6255 return Error(L: Loc, Msg: "index must be a multiple of 4 in range [0, 252].");
6256 case Match_InvalidMemoryIndexed2UImm6:
6257 return Error(L: Loc, Msg: "index must be a multiple of 2 in range [0, 126].");
6258 case Match_InvalidMemoryIndexed1UImm6:
6259 return Error(L: Loc, Msg: "index must be in range [0, 63].");
6260 case Match_InvalidMemoryWExtend8:
6261 return Error(L: Loc,
6262 Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0");
6263 case Match_InvalidMemoryWExtend16:
6264 return Error(L: Loc,
6265 Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
6266 case Match_InvalidMemoryWExtend32:
6267 return Error(L: Loc,
6268 Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
6269 case Match_InvalidMemoryWExtend64:
6270 return Error(L: Loc,
6271 Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
6272 case Match_InvalidMemoryWExtend128:
6273 return Error(L: Loc,
6274 Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
6275 case Match_InvalidMemoryXExtend8:
6276 return Error(L: Loc,
6277 Msg: "expected 'lsl' or 'sxtx' with optional shift of #0");
6278 case Match_InvalidMemoryXExtend16:
6279 return Error(L: Loc,
6280 Msg: "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
6281 case Match_InvalidMemoryXExtend32:
6282 return Error(L: Loc,
6283 Msg: "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
6284 case Match_InvalidMemoryXExtend64:
6285 return Error(L: Loc,
6286 Msg: "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
6287 case Match_InvalidMemoryXExtend128:
6288 return Error(L: Loc,
6289 Msg: "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
6290 case Match_InvalidMemoryIndexed1:
6291 return Error(L: Loc, Msg: "index must be an integer in range [0, 4095].");
6292 case Match_InvalidMemoryIndexed2:
6293 return Error(L: Loc, Msg: "index must be a multiple of 2 in range [0, 8190].");
6294 case Match_InvalidMemoryIndexed4:
6295 return Error(L: Loc, Msg: "index must be a multiple of 4 in range [0, 16380].");
6296 case Match_InvalidMemoryIndexed8:
6297 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [0, 32760].");
6298 case Match_InvalidMemoryIndexed16:
6299 return Error(L: Loc, Msg: "index must be a multiple of 16 in range [0, 65520].");
6300 case Match_InvalidImm0_0:
6301 return Error(L: Loc, Msg: "immediate must be 0.");
6302 case Match_InvalidImm0_1:
6303 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 1].");
6304 case Match_InvalidImm0_3:
6305 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 3].");
6306 case Match_InvalidImm0_7:
6307 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 7].");
6308 case Match_InvalidImm0_15:
6309 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 15].");
6310 case Match_InvalidImm0_31:
6311 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 31].");
6312 case Match_InvalidImm0_63:
6313 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 63].");
6314 case Match_InvalidImm0_127:
6315 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 127].");
6316 case Match_InvalidImm0_255:
6317 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 255].");
6318 case Match_InvalidImm0_65535:
6319 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 65535].");
6320 case Match_InvalidImm1_8:
6321 return Error(L: Loc, Msg: "immediate must be an integer in range [1, 8].");
6322 case Match_InvalidImm1_16:
6323 return Error(L: Loc, Msg: "immediate must be an integer in range [1, 16].");
6324 case Match_InvalidImm1_32:
6325 return Error(L: Loc, Msg: "immediate must be an integer in range [1, 32].");
6326 case Match_InvalidImm1_64:
6327 return Error(L: Loc, Msg: "immediate must be an integer in range [1, 64].");
6328 case Match_InvalidImmM1_62:
6329 return Error(L: Loc, Msg: "immediate must be an integer in range [-1, 62].");
6330 case Match_InvalidMemoryIndexedRange2UImm0:
6331 return Error(L: Loc, Msg: "vector select offset must be the immediate range 0:1.");
6332 case Match_InvalidMemoryIndexedRange2UImm1:
6333 return Error(L: Loc, Msg: "vector select offset must be an immediate range of the "
6334 "form <immf>:<imml>, where the first "
6335 "immediate is a multiple of 2 in the range [0, 2], and "
6336 "the second immediate is immf + 1.");
6337 case Match_InvalidMemoryIndexedRange2UImm2:
6338 case Match_InvalidMemoryIndexedRange2UImm3:
6339 return Error(
6340 L: Loc,
6341 Msg: "vector select offset must be an immediate range of the form "
6342 "<immf>:<imml>, "
6343 "where the first immediate is a multiple of 2 in the range [0, 6] or "
6344 "[0, 14] "
6345 "depending on the instruction, and the second immediate is immf + 1.");
6346 case Match_InvalidMemoryIndexedRange4UImm0:
6347 return Error(L: Loc, Msg: "vector select offset must be the immediate range 0:3.");
6348 case Match_InvalidMemoryIndexedRange4UImm1:
6349 case Match_InvalidMemoryIndexedRange4UImm2:
6350 return Error(
6351 L: Loc,
6352 Msg: "vector select offset must be an immediate range of the form "
6353 "<immf>:<imml>, "
6354 "where the first immediate is a multiple of 4 in the range [0, 4] or "
6355 "[0, 12] "
6356 "depending on the instruction, and the second immediate is immf + 3.");
6357 case Match_InvalidSVEAddSubImm8:
6358 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 255]"
6359 " with a shift amount of 0");
6360 case Match_InvalidSVEAddSubImm16:
6361 case Match_InvalidSVEAddSubImm32:
6362 case Match_InvalidSVEAddSubImm64:
6363 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 255] or a "
6364 "multiple of 256 in range [256, 65280]");
6365 case Match_InvalidSVECpyImm8:
6366 return Error(L: Loc, Msg: "immediate must be an integer in range [-128, 255]"
6367 " with a shift amount of 0");
6368 case Match_InvalidSVECpyImm16:
6369 return Error(L: Loc, Msg: "immediate must be an integer in range [-128, 127] or a "
6370 "multiple of 256 in range [-32768, 65280]");
6371 case Match_InvalidSVECpyImm32:
6372 case Match_InvalidSVECpyImm64:
6373 return Error(L: Loc, Msg: "immediate must be an integer in range [-128, 127] or a "
6374 "multiple of 256 in range [-32768, 32512]");
6375 case Match_InvalidIndexRange0_0:
6376 return Error(L: Loc, Msg: "expected lane specifier '[0]'");
6377 case Match_InvalidIndexRange1_1:
6378 return Error(L: Loc, Msg: "expected lane specifier '[1]'");
6379 case Match_InvalidIndexRange0_15:
6380 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 15].");
6381 case Match_InvalidIndexRange0_7:
6382 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 7].");
6383 case Match_InvalidIndexRange0_3:
6384 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 3].");
6385 case Match_InvalidIndexRange0_1:
6386 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 1].");
6387 case Match_InvalidSVEIndexRange0_63:
6388 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 63].");
6389 case Match_InvalidSVEIndexRange0_31:
6390 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 31].");
6391 case Match_InvalidSVEIndexRange0_15:
6392 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 15].");
6393 case Match_InvalidSVEIndexRange0_7:
6394 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 7].");
6395 case Match_InvalidSVEIndexRange0_3:
6396 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 3].");
6397 case Match_InvalidLabel:
6398 return Error(L: Loc, Msg: "expected label or encodable integer pc offset");
6399 case Match_MRS:
6400 return Error(L: Loc, Msg: "expected readable system register");
6401 case Match_MSR:
6402 case Match_InvalidSVCR:
6403 return Error(L: Loc, Msg: "expected writable system register or pstate");
6404 case Match_InvalidComplexRotationEven:
6405 return Error(L: Loc, Msg: "complex rotation must be 0, 90, 180 or 270.");
6406 case Match_InvalidComplexRotationOdd:
6407 return Error(L: Loc, Msg: "complex rotation must be 90 or 270.");
6408 case Match_MnemonicFail: {
6409 std::string Suggestion = AArch64MnemonicSpellCheck(
6410 S: ((AArch64Operand &)*Operands[0]).getToken(),
6411 FBS: ComputeAvailableFeatures(FB: STI->getFeatureBits()));
6412 return Error(L: Loc, Msg: "unrecognized instruction mnemonic" + Suggestion);
6413 }
6414 case Match_InvalidGPR64shifted8:
6415 return Error(L: Loc, Msg: "register must be x0..x30 or xzr, without shift");
6416 case Match_InvalidGPR64shifted16:
6417 return Error(L: Loc, Msg: "register must be x0..x30 or xzr, with required shift 'lsl #1'");
6418 case Match_InvalidGPR64shifted32:
6419 return Error(L: Loc, Msg: "register must be x0..x30 or xzr, with required shift 'lsl #2'");
6420 case Match_InvalidGPR64shifted64:
6421 return Error(L: Loc, Msg: "register must be x0..x30 or xzr, with required shift 'lsl #3'");
6422 case Match_InvalidGPR64shifted128:
6423 return Error(
6424 L: Loc, Msg: "register must be x0..x30 or xzr, with required shift 'lsl #4'");
6425 case Match_InvalidGPR64NoXZRshifted8:
6426 return Error(L: Loc, Msg: "register must be x0..x30 without shift");
6427 case Match_InvalidGPR64NoXZRshifted16:
6428 return Error(L: Loc, Msg: "register must be x0..x30 with required shift 'lsl #1'");
6429 case Match_InvalidGPR64NoXZRshifted32:
6430 return Error(L: Loc, Msg: "register must be x0..x30 with required shift 'lsl #2'");
6431 case Match_InvalidGPR64NoXZRshifted64:
6432 return Error(L: Loc, Msg: "register must be x0..x30 with required shift 'lsl #3'");
6433 case Match_InvalidGPR64NoXZRshifted128:
6434 return Error(L: Loc, Msg: "register must be x0..x30 with required shift 'lsl #4'");
6435 case Match_InvalidZPR32UXTW8:
6436 case Match_InvalidZPR32SXTW8:
6437 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
6438 case Match_InvalidZPR32UXTW16:
6439 case Match_InvalidZPR32SXTW16:
6440 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
6441 case Match_InvalidZPR32UXTW32:
6442 case Match_InvalidZPR32SXTW32:
6443 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
6444 case Match_InvalidZPR32UXTW64:
6445 case Match_InvalidZPR32SXTW64:
6446 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
6447 case Match_InvalidZPR64UXTW8:
6448 case Match_InvalidZPR64SXTW8:
6449 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
6450 case Match_InvalidZPR64UXTW16:
6451 case Match_InvalidZPR64SXTW16:
6452 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
6453 case Match_InvalidZPR64UXTW32:
6454 case Match_InvalidZPR64SXTW32:
6455 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
6456 case Match_InvalidZPR64UXTW64:
6457 case Match_InvalidZPR64SXTW64:
6458 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
6459 case Match_InvalidZPR32LSL8:
6460 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s'");
6461 case Match_InvalidZPR32LSL16:
6462 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
6463 case Match_InvalidZPR32LSL32:
6464 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
6465 case Match_InvalidZPR32LSL64:
6466 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
6467 case Match_InvalidZPR64LSL8:
6468 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d'");
6469 case Match_InvalidZPR64LSL16:
6470 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
6471 case Match_InvalidZPR64LSL32:
6472 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
6473 case Match_InvalidZPR64LSL64:
6474 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
6475 case Match_InvalidZPR0:
6476 return Error(L: Loc, Msg: "expected register without element width suffix");
6477 case Match_InvalidZPR8:
6478 case Match_InvalidZPR16:
6479 case Match_InvalidZPR32:
6480 case Match_InvalidZPR64:
6481 case Match_InvalidZPR128:
6482 return Error(L: Loc, Msg: "invalid element width");
6483 case Match_InvalidZPR_3b8:
6484 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.b..z7.b");
6485 case Match_InvalidZPR_3b16:
6486 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.h..z7.h");
6487 case Match_InvalidZPR_3b32:
6488 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.s..z7.s");
6489 case Match_InvalidZPR_4b8:
6490 return Error(L: Loc,
6491 Msg: "Invalid restricted vector register, expected z0.b..z15.b");
6492 case Match_InvalidZPR_4b16:
6493 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.h..z15.h");
6494 case Match_InvalidZPR_4b32:
6495 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.s..z15.s");
6496 case Match_InvalidZPR_4b64:
6497 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.d..z15.d");
6498 case Match_InvalidZPRMul2_Lo8:
6499 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6500 "register in z0.b..z14.b");
6501 case Match_InvalidZPRMul2_Hi8:
6502 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6503 "register in z16.b..z30.b");
6504 case Match_InvalidZPRMul2_Lo16:
6505 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6506 "register in z0.h..z14.h");
6507 case Match_InvalidZPRMul2_Hi16:
6508 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6509 "register in z16.h..z30.h");
6510 case Match_InvalidZPRMul2_Lo32:
6511 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6512 "register in z0.s..z14.s");
6513 case Match_InvalidZPRMul2_Hi32:
6514 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6515 "register in z16.s..z30.s");
6516 case Match_InvalidZPRMul2_Lo64:
6517 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6518 "register in z0.d..z14.d");
6519 case Match_InvalidZPRMul2_Hi64:
6520 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6521 "register in z16.d..z30.d");
6522 case Match_InvalidZPR_K0:
6523 return Error(L: Loc, Msg: "invalid restricted vector register, expected register "
6524 "in z20..z23 or z28..z31");
6525 case Match_InvalidSVEPattern:
6526 return Error(L: Loc, Msg: "invalid predicate pattern");
6527 case Match_InvalidSVEPPRorPNRAnyReg:
6528 case Match_InvalidSVEPPRorPNRBReg:
6529 case Match_InvalidSVEPredicateAnyReg:
6530 case Match_InvalidSVEPredicateBReg:
6531 case Match_InvalidSVEPredicateHReg:
6532 case Match_InvalidSVEPredicateSReg:
6533 case Match_InvalidSVEPredicateDReg:
6534 return Error(L: Loc, Msg: "invalid predicate register.");
6535 case Match_InvalidSVEPredicate3bAnyReg:
6536 return Error(L: Loc, Msg: "invalid restricted predicate register, expected p0..p7 (without element suffix)");
6537 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6538 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6539 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6540 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6541 return Error(L: Loc, Msg: "Invalid predicate register, expected PN in range "
6542 "pn8..pn15 with element suffix.");
6543 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6544 return Error(L: Loc, Msg: "invalid restricted predicate-as-counter register "
6545 "expected pn8..pn15");
6546 case Match_InvalidSVEPNPredicateBReg:
6547 case Match_InvalidSVEPNPredicateHReg:
6548 case Match_InvalidSVEPNPredicateSReg:
6549 case Match_InvalidSVEPNPredicateDReg:
6550 return Error(L: Loc, Msg: "Invalid predicate register, expected PN in range "
6551 "pn0..pn15 with element suffix.");
6552 case Match_InvalidSVEVecLenSpecifier:
6553 return Error(L: Loc, Msg: "Invalid vector length specifier, expected VLx2 or VLx4");
6554 case Match_InvalidSVEPredicateListMul2x8:
6555 case Match_InvalidSVEPredicateListMul2x16:
6556 case Match_InvalidSVEPredicateListMul2x32:
6557 case Match_InvalidSVEPredicateListMul2x64:
6558 return Error(L: Loc, Msg: "Invalid vector list, expected list with 2 consecutive "
6559 "predicate registers, where the first vector is a multiple of 2 "
6560 "and with correct element type");
6561 case Match_InvalidSVEExactFPImmOperandHalfOne:
6562 return Error(L: Loc, Msg: "Invalid floating point constant, expected 0.5 or 1.0.");
6563 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6564 return Error(L: Loc, Msg: "Invalid floating point constant, expected 0.5 or 2.0.");
6565 case Match_InvalidSVEExactFPImmOperandZeroOne:
6566 return Error(L: Loc, Msg: "Invalid floating point constant, expected 0.0 or 1.0.");
6567 case Match_InvalidMatrixTileVectorH8:
6568 case Match_InvalidMatrixTileVectorV8:
6569 return Error(L: Loc, Msg: "invalid matrix operand, expected za0h.b or za0v.b");
6570 case Match_InvalidMatrixTileVectorH16:
6571 case Match_InvalidMatrixTileVectorV16:
6572 return Error(L: Loc,
6573 Msg: "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
6574 case Match_InvalidMatrixTileVectorH32:
6575 case Match_InvalidMatrixTileVectorV32:
6576 return Error(L: Loc,
6577 Msg: "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
6578 case Match_InvalidMatrixTileVectorH64:
6579 case Match_InvalidMatrixTileVectorV64:
6580 return Error(L: Loc,
6581 Msg: "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
6582 case Match_InvalidMatrixTileVectorH128:
6583 case Match_InvalidMatrixTileVectorV128:
6584 return Error(L: Loc,
6585 Msg: "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
6586 case Match_InvalidMatrixTile16:
6587 return Error(L: Loc, Msg: "invalid matrix operand, expected za[0-1].h");
6588 case Match_InvalidMatrixTile32:
6589 return Error(L: Loc, Msg: "invalid matrix operand, expected za[0-3].s");
6590 case Match_InvalidMatrixTile64:
6591 return Error(L: Loc, Msg: "invalid matrix operand, expected za[0-7].d");
6592 case Match_InvalidMatrix:
6593 return Error(L: Loc, Msg: "invalid matrix operand, expected za");
6594 case Match_InvalidMatrix8:
6595 return Error(L: Loc, Msg: "invalid matrix operand, expected suffix .b");
6596 case Match_InvalidMatrix16:
6597 return Error(L: Loc, Msg: "invalid matrix operand, expected suffix .h");
6598 case Match_InvalidMatrix32:
6599 return Error(L: Loc, Msg: "invalid matrix operand, expected suffix .s");
6600 case Match_InvalidMatrix64:
6601 return Error(L: Loc, Msg: "invalid matrix operand, expected suffix .d");
6602 case Match_InvalidMatrixIndexGPR32_12_15:
6603 return Error(L: Loc, Msg: "operand must be a register in range [w12, w15]");
6604 case Match_InvalidMatrixIndexGPR32_8_11:
6605 return Error(L: Loc, Msg: "operand must be a register in range [w8, w11]");
6606 case Match_InvalidSVEVectorList2x8Mul2:
6607 case Match_InvalidSVEVectorList2x16Mul2:
6608 case Match_InvalidSVEVectorList2x32Mul2:
6609 case Match_InvalidSVEVectorList2x64Mul2:
6610 case Match_InvalidSVEVectorList2x128Mul2:
6611 return Error(L: Loc, Msg: "Invalid vector list, expected list with 2 consecutive "
6612 "SVE vectors, where the first vector is a multiple of 2 "
6613 "and with matching element types");
6614 case Match_InvalidSVEVectorList2x8Mul2_Lo:
6615 case Match_InvalidSVEVectorList2x16Mul2_Lo:
6616 case Match_InvalidSVEVectorList2x32Mul2_Lo:
6617 case Match_InvalidSVEVectorList2x64Mul2_Lo:
6618 return Error(L: Loc, Msg: "Invalid vector list, expected list with 2 consecutive "
6619 "SVE vectors in the range z0-z14, where the first vector "
6620 "is a multiple of 2 "
6621 "and with matching element types");
6622 case Match_InvalidSVEVectorList2x8Mul2_Hi:
6623 case Match_InvalidSVEVectorList2x16Mul2_Hi:
6624 case Match_InvalidSVEVectorList2x32Mul2_Hi:
6625 case Match_InvalidSVEVectorList2x64Mul2_Hi:
6626 return Error(L: Loc,
6627 Msg: "Invalid vector list, expected list with 2 consecutive "
6628 "SVE vectors in the range z16-z30, where the first vector "
6629 "is a multiple of 2 "
6630 "and with matching element types");
6631 case Match_InvalidSVEVectorList4x8Mul4:
6632 case Match_InvalidSVEVectorList4x16Mul4:
6633 case Match_InvalidSVEVectorList4x32Mul4:
6634 case Match_InvalidSVEVectorList4x64Mul4:
6635 case Match_InvalidSVEVectorList4x128Mul4:
6636 return Error(L: Loc, Msg: "Invalid vector list, expected list with 4 consecutive "
6637 "SVE vectors, where the first vector is a multiple of 4 "
6638 "and with matching element types");
6639 case Match_InvalidLookupTable:
6640 return Error(L: Loc, Msg: "Invalid lookup table, expected zt0");
6641 case Match_InvalidSVEVectorListStrided2x8:
6642 case Match_InvalidSVEVectorListStrided2x16:
6643 case Match_InvalidSVEVectorListStrided2x32:
6644 case Match_InvalidSVEVectorListStrided2x64:
6645 return Error(
6646 L: Loc,
6647 Msg: "Invalid vector list, expected list with each SVE vector in the list "
6648 "8 registers apart, and the first register in the range [z0, z7] or "
6649 "[z16, z23] and with correct element type");
6650 case Match_InvalidSVEVectorListStrided4x8:
6651 case Match_InvalidSVEVectorListStrided4x16:
6652 case Match_InvalidSVEVectorListStrided4x32:
6653 case Match_InvalidSVEVectorListStrided4x64:
6654 return Error(
6655 L: Loc,
6656 Msg: "Invalid vector list, expected list with each SVE vector in the list "
6657 "4 registers apart, and the first register in the range [z0, z3] or "
6658 "[z16, z19] and with correct element type");
6659 case Match_AddSubLSLImm3ShiftLarge:
6660 return Error(L: Loc,
6661 Msg: "expected 'lsl' with optional integer in range [0, 7]");
6662 default:
6663 llvm_unreachable("unexpected error code!");
6664 }
6665}
6666
6667static const char *getSubtargetFeatureName(uint64_t Val);
6668
6669bool AArch64AsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
6670 OperandVector &Operands,
6671 MCStreamer &Out,
6672 uint64_t &ErrorInfo,
6673 bool MatchingInlineAsm) {
6674 assert(!Operands.empty() && "Unexpected empty operand list!");
6675 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
6676 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
6677
6678 StringRef Tok = Op.getToken();
6679 unsigned NumOperands = Operands.size();
6680
6681 if (NumOperands == 4 && Tok == "lsl") {
6682 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6683 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6684 if (Op2.isScalarReg() && Op3.isImm()) {
6685 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Val: Op3.getImm());
6686 if (Op3CE) {
6687 uint64_t Op3Val = Op3CE->getValue();
6688 uint64_t NewOp3Val = 0;
6689 uint64_t NewOp4Val = 0;
6690 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
6691 Reg: Op2.getReg())) {
6692 NewOp3Val = (32 - Op3Val) & 0x1f;
6693 NewOp4Val = 31 - Op3Val;
6694 } else {
6695 NewOp3Val = (64 - Op3Val) & 0x3f;
6696 NewOp4Val = 63 - Op3Val;
6697 }
6698
6699 const MCExpr *NewOp3 = MCConstantExpr::create(Value: NewOp3Val, Ctx&: getContext());
6700 const MCExpr *NewOp4 = MCConstantExpr::create(Value: NewOp4Val, Ctx&: getContext());
6701
6702 Operands[0] =
6703 AArch64Operand::CreateToken(Str: "ubfm", S: Op.getStartLoc(), Ctx&: getContext());
6704 Operands.push_back(Elt: AArch64Operand::CreateImm(
6705 Val: NewOp4, S: Op3.getStartLoc(), E: Op3.getEndLoc(), Ctx&: getContext()));
6706 Operands[3] = AArch64Operand::CreateImm(Val: NewOp3, S: Op3.getStartLoc(),
6707 E: Op3.getEndLoc(), Ctx&: getContext());
6708 }
6709 }
6710 } else if (NumOperands == 4 && Tok == "bfc") {
6711 // FIXME: Horrible hack to handle BFC->BFM alias.
6712 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6713 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
6714 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
6715
6716 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
6717 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(Val: LSBOp.getImm());
6718 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(Val: WidthOp.getImm());
6719
6720 if (LSBCE && WidthCE) {
6721 uint64_t LSB = LSBCE->getValue();
6722 uint64_t Width = WidthCE->getValue();
6723
6724 uint64_t RegWidth = 0;
6725 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6726 Reg: Op1.getReg()))
6727 RegWidth = 64;
6728 else
6729 RegWidth = 32;
6730
6731 if (LSB >= RegWidth)
6732 return Error(L: LSBOp.getStartLoc(),
6733 Msg: "expected integer in range [0, 31]");
6734 if (Width < 1 || Width > RegWidth)
6735 return Error(L: WidthOp.getStartLoc(),
6736 Msg: "expected integer in range [1, 32]");
6737
6738 uint64_t ImmR = 0;
6739 if (RegWidth == 32)
6740 ImmR = (32 - LSB) & 0x1f;
6741 else
6742 ImmR = (64 - LSB) & 0x3f;
6743
6744 uint64_t ImmS = Width - 1;
6745
6746 if (ImmR != 0 && ImmS >= ImmR)
6747 return Error(L: WidthOp.getStartLoc(),
6748 Msg: "requested insert overflows register");
6749
6750 const MCExpr *ImmRExpr = MCConstantExpr::create(Value: ImmR, Ctx&: getContext());
6751 const MCExpr *ImmSExpr = MCConstantExpr::create(Value: ImmS, Ctx&: getContext());
6752 Operands[0] =
6753 AArch64Operand::CreateToken(Str: "bfm", S: Op.getStartLoc(), Ctx&: getContext());
6754 Operands[2] = AArch64Operand::CreateReg(
6755 Reg: RegWidth == 32 ? AArch64::WZR : AArch64::XZR, Kind: RegKind::Scalar,
6756 S: SMLoc(), E: SMLoc(), Ctx&: getContext());
6757 Operands[3] = AArch64Operand::CreateImm(
6758 Val: ImmRExpr, S: LSBOp.getStartLoc(), E: LSBOp.getEndLoc(), Ctx&: getContext());
6759 Operands.emplace_back(
6760 Args: AArch64Operand::CreateImm(Val: ImmSExpr, S: WidthOp.getStartLoc(),
6761 E: WidthOp.getEndLoc(), Ctx&: getContext()));
6762 }
6763 }
6764 } else if (NumOperands == 5) {
6765 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
6766 // UBFIZ -> UBFM aliases.
6767 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
6768 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6769 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6770 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6771
6772 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6773 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Val: Op3.getImm());
6774 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Val: Op4.getImm());
6775
6776 if (Op3CE && Op4CE) {
6777 uint64_t Op3Val = Op3CE->getValue();
6778 uint64_t Op4Val = Op4CE->getValue();
6779
6780 uint64_t RegWidth = 0;
6781 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6782 Reg: Op1.getReg()))
6783 RegWidth = 64;
6784 else
6785 RegWidth = 32;
6786
6787 if (Op3Val >= RegWidth)
6788 return Error(L: Op3.getStartLoc(),
6789 Msg: "expected integer in range [0, 31]");
6790 if (Op4Val < 1 || Op4Val > RegWidth)
6791 return Error(L: Op4.getStartLoc(),
6792 Msg: "expected integer in range [1, 32]");
6793
6794 uint64_t NewOp3Val = 0;
6795 if (RegWidth == 32)
6796 NewOp3Val = (32 - Op3Val) & 0x1f;
6797 else
6798 NewOp3Val = (64 - Op3Val) & 0x3f;
6799
6800 uint64_t NewOp4Val = Op4Val - 1;
6801
6802 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
6803 return Error(L: Op4.getStartLoc(),
6804 Msg: "requested insert overflows register");
6805
6806 const MCExpr *NewOp3 =
6807 MCConstantExpr::create(Value: NewOp3Val, Ctx&: getContext());
6808 const MCExpr *NewOp4 =
6809 MCConstantExpr::create(Value: NewOp4Val, Ctx&: getContext());
6810 Operands[3] = AArch64Operand::CreateImm(
6811 Val: NewOp3, S: Op3.getStartLoc(), E: Op3.getEndLoc(), Ctx&: getContext());
6812 Operands[4] = AArch64Operand::CreateImm(
6813 Val: NewOp4, S: Op4.getStartLoc(), E: Op4.getEndLoc(), Ctx&: getContext());
6814 if (Tok == "bfi")
6815 Operands[0] = AArch64Operand::CreateToken(Str: "bfm", S: Op.getStartLoc(),
6816 Ctx&: getContext());
6817 else if (Tok == "sbfiz")
6818 Operands[0] = AArch64Operand::CreateToken(Str: "sbfm", S: Op.getStartLoc(),
6819 Ctx&: getContext());
6820 else if (Tok == "ubfiz")
6821 Operands[0] = AArch64Operand::CreateToken(Str: "ubfm", S: Op.getStartLoc(),
6822 Ctx&: getContext());
6823 else
6824 llvm_unreachable("No valid mnemonic for alias?");
6825 }
6826 }
6827
6828 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
6829 // UBFX -> UBFM aliases.
6830 } else if (NumOperands == 5 &&
6831 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
6832 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6833 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6834 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6835
6836 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6837 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Val: Op3.getImm());
6838 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Val: Op4.getImm());
6839
6840 if (Op3CE && Op4CE) {
6841 uint64_t Op3Val = Op3CE->getValue();
6842 uint64_t Op4Val = Op4CE->getValue();
6843
6844 uint64_t RegWidth = 0;
6845 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6846 Reg: Op1.getReg()))
6847 RegWidth = 64;
6848 else
6849 RegWidth = 32;
6850
6851 if (Op3Val >= RegWidth)
6852 return Error(L: Op3.getStartLoc(),
6853 Msg: "expected integer in range [0, 31]");
6854 if (Op4Val < 1 || Op4Val > RegWidth)
6855 return Error(L: Op4.getStartLoc(),
6856 Msg: "expected integer in range [1, 32]");
6857
6858 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
6859
6860 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
6861 return Error(L: Op4.getStartLoc(),
6862 Msg: "requested extract overflows register");
6863
6864 const MCExpr *NewOp4 =
6865 MCConstantExpr::create(Value: NewOp4Val, Ctx&: getContext());
6866 Operands[4] = AArch64Operand::CreateImm(
6867 Val: NewOp4, S: Op4.getStartLoc(), E: Op4.getEndLoc(), Ctx&: getContext());
6868 if (Tok == "bfxil")
6869 Operands[0] = AArch64Operand::CreateToken(Str: "bfm", S: Op.getStartLoc(),
6870 Ctx&: getContext());
6871 else if (Tok == "sbfx")
6872 Operands[0] = AArch64Operand::CreateToken(Str: "sbfm", S: Op.getStartLoc(),
6873 Ctx&: getContext());
6874 else if (Tok == "ubfx")
6875 Operands[0] = AArch64Operand::CreateToken(Str: "ubfm", S: Op.getStartLoc(),
6876 Ctx&: getContext());
6877 else
6878 llvm_unreachable("No valid mnemonic for alias?");
6879 }
6880 }
6881 }
6882 }
6883
6884 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
6885 // instruction for FP registers correctly in some rare circumstances. Convert
6886 // it to a safe instruction and warn (because silently changing someone's
6887 // assembly is rude).
6888 if (getSTI().hasFeature(Feature: AArch64::FeatureZCZeroingFPWorkaround) &&
6889 NumOperands == 4 && Tok == "movi") {
6890 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6891 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6892 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6893 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
6894 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
6895 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
6896 if (Suffix.lower() == ".2d" &&
6897 cast<MCConstantExpr>(Val: Op3.getImm())->getValue() == 0) {
6898 Warning(L: IDLoc, Msg: "instruction movi.2d with immediate #0 may not function"
6899 " correctly on this CPU, converting to equivalent movi.16b");
6900 // Switch the suffix to .16b.
6901 unsigned Idx = Op1.isToken() ? 1 : 2;
6902 Operands[Idx] =
6903 AArch64Operand::CreateToken(Str: ".16b", S: IDLoc, Ctx&: getContext());
6904 }
6905 }
6906 }
6907
6908 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
6909 // InstAlias can't quite handle this since the reg classes aren't
6910 // subclasses.
6911 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
6912 // The source register can be Wn here, but the matcher expects a
6913 // GPR64. Twiddle it here if necessary.
6914 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6915 if (Op.isScalarReg()) {
6916 MCRegister Reg = getXRegFromWReg(Reg: Op.getReg());
6917 Operands[2] = AArch64Operand::CreateReg(Reg, Kind: RegKind::Scalar,
6918 S: Op.getStartLoc(), E: Op.getEndLoc(),
6919 Ctx&: getContext());
6920 }
6921 }
6922 // FIXME: Likewise for sxt[bh] with a Xd dst operand
6923 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
6924 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6925 if (Op.isScalarReg() &&
6926 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6927 Reg: Op.getReg())) {
6928 // The source register can be Wn here, but the matcher expects a
6929 // GPR64. Twiddle it here if necessary.
6930 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6931 if (Op.isScalarReg()) {
6932 MCRegister Reg = getXRegFromWReg(Reg: Op.getReg());
6933 Operands[2] = AArch64Operand::CreateReg(Reg, Kind: RegKind::Scalar,
6934 S: Op.getStartLoc(),
6935 E: Op.getEndLoc(), Ctx&: getContext());
6936 }
6937 }
6938 }
6939 // FIXME: Likewise for uxt[bh] with a Xd dst operand
6940 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
6941 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6942 if (Op.isScalarReg() &&
6943 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6944 Reg: Op.getReg())) {
6945 // The source register can be Wn here, but the matcher expects a
6946 // GPR32. Twiddle it here if necessary.
6947 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6948 if (Op.isScalarReg()) {
6949 MCRegister Reg = getWRegFromXReg(Reg: Op.getReg());
6950 Operands[1] = AArch64Operand::CreateReg(Reg, Kind: RegKind::Scalar,
6951 S: Op.getStartLoc(),
6952 E: Op.getEndLoc(), Ctx&: getContext());
6953 }
6954 }
6955 }
6956
6957 MCInst Inst;
6958 FeatureBitset MissingFeatures;
6959 // First try to match against the secondary set of tables containing the
6960 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
6961 unsigned MatchResult =
6962 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6963 matchingInlineAsm: MatchingInlineAsm, VariantID: 1);
6964
6965 // If that fails, try against the alternate table containing long-form NEON:
6966 // "fadd v0.2s, v1.2s, v2.2s"
6967 if (MatchResult != Match_Success) {
6968 // But first, save the short-form match result: we can use it in case the
6969 // long-form match also fails.
6970 auto ShortFormNEONErrorInfo = ErrorInfo;
6971 auto ShortFormNEONMatchResult = MatchResult;
6972 auto ShortFormNEONMissingFeatures = MissingFeatures;
6973
6974 MatchResult =
6975 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6976 matchingInlineAsm: MatchingInlineAsm, VariantID: 0);
6977
6978 // Now, both matches failed, and the long-form match failed on the mnemonic
6979 // suffix token operand. The short-form match failure is probably more
6980 // relevant: use it instead.
6981 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
6982 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
6983 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
6984 MatchResult = ShortFormNEONMatchResult;
6985 ErrorInfo = ShortFormNEONErrorInfo;
6986 MissingFeatures = ShortFormNEONMissingFeatures;
6987 }
6988 }
6989
6990 switch (MatchResult) {
6991 case Match_Success: {
6992 // Perform range checking and other semantic validations
6993 SmallVector<SMLoc, 8> OperandLocs;
6994 NumOperands = Operands.size();
6995 for (unsigned i = 1; i < NumOperands; ++i)
6996 OperandLocs.push_back(Elt: Operands[i]->getStartLoc());
6997 if (validateInstruction(Inst, IDLoc, Loc&: OperandLocs))
6998 return true;
6999
7000 Inst.setLoc(IDLoc);
7001 Out.emitInstruction(Inst, STI: getSTI());
7002 return false;
7003 }
7004 case Match_MissingFeature: {
7005 assert(MissingFeatures.any() && "Unknown missing feature!");
7006 // Special case the error message for the very common case where only
7007 // a single subtarget feature is missing (neon, e.g.).
7008 std::string Msg = "instruction requires:";
7009 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
7010 if (MissingFeatures[i]) {
7011 Msg += " ";
7012 Msg += getSubtargetFeatureName(Val: i);
7013 }
7014 }
7015 return Error(L: IDLoc, Msg);
7016 }
7017 case Match_MnemonicFail:
7018 return showMatchError(Loc: IDLoc, ErrCode: MatchResult, ErrorInfo, Operands);
7019 case Match_InvalidOperand: {
7020 SMLoc ErrorLoc = IDLoc;
7021
7022 if (ErrorInfo != ~0ULL) {
7023 if (ErrorInfo >= Operands.size())
7024 return Error(L: IDLoc, Msg: "too few operands for instruction",
7025 Range: SMRange(IDLoc, getTok().getLoc()));
7026
7027 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
7028 if (ErrorLoc == SMLoc())
7029 ErrorLoc = IDLoc;
7030 }
7031 // If the match failed on a suffix token operand, tweak the diagnostic
7032 // accordingly.
7033 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
7034 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
7035 MatchResult = Match_InvalidSuffix;
7036
7037 return showMatchError(Loc: ErrorLoc, ErrCode: MatchResult, ErrorInfo, Operands);
7038 }
7039 case Match_InvalidTiedOperand:
7040 case Match_InvalidMemoryIndexed1:
7041 case Match_InvalidMemoryIndexed2:
7042 case Match_InvalidMemoryIndexed4:
7043 case Match_InvalidMemoryIndexed8:
7044 case Match_InvalidMemoryIndexed16:
7045 case Match_InvalidCondCode:
7046 case Match_AddSubLSLImm3ShiftLarge:
7047 case Match_AddSubRegExtendSmall:
7048 case Match_AddSubRegExtendLarge:
7049 case Match_AddSubSecondSource:
7050 case Match_LogicalSecondSource:
7051 case Match_AddSubRegShift32:
7052 case Match_AddSubRegShift64:
7053 case Match_InvalidMovImm32Shift:
7054 case Match_InvalidMovImm64Shift:
7055 case Match_InvalidFPImm:
7056 case Match_InvalidMemoryWExtend8:
7057 case Match_InvalidMemoryWExtend16:
7058 case Match_InvalidMemoryWExtend32:
7059 case Match_InvalidMemoryWExtend64:
7060 case Match_InvalidMemoryWExtend128:
7061 case Match_InvalidMemoryXExtend8:
7062 case Match_InvalidMemoryXExtend16:
7063 case Match_InvalidMemoryXExtend32:
7064 case Match_InvalidMemoryXExtend64:
7065 case Match_InvalidMemoryXExtend128:
7066 case Match_InvalidMemoryIndexed1SImm4:
7067 case Match_InvalidMemoryIndexed2SImm4:
7068 case Match_InvalidMemoryIndexed3SImm4:
7069 case Match_InvalidMemoryIndexed4SImm4:
7070 case Match_InvalidMemoryIndexed1SImm6:
7071 case Match_InvalidMemoryIndexed16SImm4:
7072 case Match_InvalidMemoryIndexed32SImm4:
7073 case Match_InvalidMemoryIndexed4SImm7:
7074 case Match_InvalidMemoryIndexed8SImm7:
7075 case Match_InvalidMemoryIndexed16SImm7:
7076 case Match_InvalidMemoryIndexed8UImm5:
7077 case Match_InvalidMemoryIndexed8UImm3:
7078 case Match_InvalidMemoryIndexed4UImm5:
7079 case Match_InvalidMemoryIndexed2UImm5:
7080 case Match_InvalidMemoryIndexed1UImm6:
7081 case Match_InvalidMemoryIndexed2UImm6:
7082 case Match_InvalidMemoryIndexed4UImm6:
7083 case Match_InvalidMemoryIndexed8UImm6:
7084 case Match_InvalidMemoryIndexed16UImm6:
7085 case Match_InvalidMemoryIndexedSImm6:
7086 case Match_InvalidMemoryIndexedSImm5:
7087 case Match_InvalidMemoryIndexedSImm8:
7088 case Match_InvalidMemoryIndexedSImm9:
7089 case Match_InvalidMemoryIndexed16SImm9:
7090 case Match_InvalidMemoryIndexed8SImm10:
7091 case Match_InvalidImm0_0:
7092 case Match_InvalidImm0_1:
7093 case Match_InvalidImm0_3:
7094 case Match_InvalidImm0_7:
7095 case Match_InvalidImm0_15:
7096 case Match_InvalidImm0_31:
7097 case Match_InvalidImm0_63:
7098 case Match_InvalidImm0_127:
7099 case Match_InvalidImm0_255:
7100 case Match_InvalidImm0_65535:
7101 case Match_InvalidImm1_8:
7102 case Match_InvalidImm1_16:
7103 case Match_InvalidImm1_32:
7104 case Match_InvalidImm1_64:
7105 case Match_InvalidImmM1_62:
7106 case Match_InvalidMemoryIndexedRange2UImm0:
7107 case Match_InvalidMemoryIndexedRange2UImm1:
7108 case Match_InvalidMemoryIndexedRange2UImm2:
7109 case Match_InvalidMemoryIndexedRange2UImm3:
7110 case Match_InvalidMemoryIndexedRange4UImm0:
7111 case Match_InvalidMemoryIndexedRange4UImm1:
7112 case Match_InvalidMemoryIndexedRange4UImm2:
7113 case Match_InvalidSVEAddSubImm8:
7114 case Match_InvalidSVEAddSubImm16:
7115 case Match_InvalidSVEAddSubImm32:
7116 case Match_InvalidSVEAddSubImm64:
7117 case Match_InvalidSVECpyImm8:
7118 case Match_InvalidSVECpyImm16:
7119 case Match_InvalidSVECpyImm32:
7120 case Match_InvalidSVECpyImm64:
7121 case Match_InvalidIndexRange0_0:
7122 case Match_InvalidIndexRange1_1:
7123 case Match_InvalidIndexRange0_15:
7124 case Match_InvalidIndexRange0_7:
7125 case Match_InvalidIndexRange0_3:
7126 case Match_InvalidIndexRange0_1:
7127 case Match_InvalidSVEIndexRange0_63:
7128 case Match_InvalidSVEIndexRange0_31:
7129 case Match_InvalidSVEIndexRange0_15:
7130 case Match_InvalidSVEIndexRange0_7:
7131 case Match_InvalidSVEIndexRange0_3:
7132 case Match_InvalidLabel:
7133 case Match_InvalidComplexRotationEven:
7134 case Match_InvalidComplexRotationOdd:
7135 case Match_InvalidGPR64shifted8:
7136 case Match_InvalidGPR64shifted16:
7137 case Match_InvalidGPR64shifted32:
7138 case Match_InvalidGPR64shifted64:
7139 case Match_InvalidGPR64shifted128:
7140 case Match_InvalidGPR64NoXZRshifted8:
7141 case Match_InvalidGPR64NoXZRshifted16:
7142 case Match_InvalidGPR64NoXZRshifted32:
7143 case Match_InvalidGPR64NoXZRshifted64:
7144 case Match_InvalidGPR64NoXZRshifted128:
7145 case Match_InvalidZPR32UXTW8:
7146 case Match_InvalidZPR32UXTW16:
7147 case Match_InvalidZPR32UXTW32:
7148 case Match_InvalidZPR32UXTW64:
7149 case Match_InvalidZPR32SXTW8:
7150 case Match_InvalidZPR32SXTW16:
7151 case Match_InvalidZPR32SXTW32:
7152 case Match_InvalidZPR32SXTW64:
7153 case Match_InvalidZPR64UXTW8:
7154 case Match_InvalidZPR64SXTW8:
7155 case Match_InvalidZPR64UXTW16:
7156 case Match_InvalidZPR64SXTW16:
7157 case Match_InvalidZPR64UXTW32:
7158 case Match_InvalidZPR64SXTW32:
7159 case Match_InvalidZPR64UXTW64:
7160 case Match_InvalidZPR64SXTW64:
7161 case Match_InvalidZPR32LSL8:
7162 case Match_InvalidZPR32LSL16:
7163 case Match_InvalidZPR32LSL32:
7164 case Match_InvalidZPR32LSL64:
7165 case Match_InvalidZPR64LSL8:
7166 case Match_InvalidZPR64LSL16:
7167 case Match_InvalidZPR64LSL32:
7168 case Match_InvalidZPR64LSL64:
7169 case Match_InvalidZPR0:
7170 case Match_InvalidZPR8:
7171 case Match_InvalidZPR16:
7172 case Match_InvalidZPR32:
7173 case Match_InvalidZPR64:
7174 case Match_InvalidZPR128:
7175 case Match_InvalidZPR_3b8:
7176 case Match_InvalidZPR_3b16:
7177 case Match_InvalidZPR_3b32:
7178 case Match_InvalidZPR_4b8:
7179 case Match_InvalidZPR_4b16:
7180 case Match_InvalidZPR_4b32:
7181 case Match_InvalidZPR_4b64:
7182 case Match_InvalidSVEPPRorPNRAnyReg:
7183 case Match_InvalidSVEPPRorPNRBReg:
7184 case Match_InvalidSVEPredicateAnyReg:
7185 case Match_InvalidSVEPattern:
7186 case Match_InvalidSVEVecLenSpecifier:
7187 case Match_InvalidSVEPredicateBReg:
7188 case Match_InvalidSVEPredicateHReg:
7189 case Match_InvalidSVEPredicateSReg:
7190 case Match_InvalidSVEPredicateDReg:
7191 case Match_InvalidSVEPredicate3bAnyReg:
7192 case Match_InvalidSVEPNPredicateB_p8to15Reg:
7193 case Match_InvalidSVEPNPredicateH_p8to15Reg:
7194 case Match_InvalidSVEPNPredicateS_p8to15Reg:
7195 case Match_InvalidSVEPNPredicateD_p8to15Reg:
7196 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
7197 case Match_InvalidSVEPNPredicateBReg:
7198 case Match_InvalidSVEPNPredicateHReg:
7199 case Match_InvalidSVEPNPredicateSReg:
7200 case Match_InvalidSVEPNPredicateDReg:
7201 case Match_InvalidSVEPredicateListMul2x8:
7202 case Match_InvalidSVEPredicateListMul2x16:
7203 case Match_InvalidSVEPredicateListMul2x32:
7204 case Match_InvalidSVEPredicateListMul2x64:
7205 case Match_InvalidSVEExactFPImmOperandHalfOne:
7206 case Match_InvalidSVEExactFPImmOperandHalfTwo:
7207 case Match_InvalidSVEExactFPImmOperandZeroOne:
7208 case Match_InvalidMatrixTile16:
7209 case Match_InvalidMatrixTile32:
7210 case Match_InvalidMatrixTile64:
7211 case Match_InvalidMatrix:
7212 case Match_InvalidMatrix8:
7213 case Match_InvalidMatrix16:
7214 case Match_InvalidMatrix32:
7215 case Match_InvalidMatrix64:
7216 case Match_InvalidMatrixTileVectorH8:
7217 case Match_InvalidMatrixTileVectorH16:
7218 case Match_InvalidMatrixTileVectorH32:
7219 case Match_InvalidMatrixTileVectorH64:
7220 case Match_InvalidMatrixTileVectorH128:
7221 case Match_InvalidMatrixTileVectorV8:
7222 case Match_InvalidMatrixTileVectorV16:
7223 case Match_InvalidMatrixTileVectorV32:
7224 case Match_InvalidMatrixTileVectorV64:
7225 case Match_InvalidMatrixTileVectorV128:
7226 case Match_InvalidSVCR:
7227 case Match_InvalidMatrixIndexGPR32_12_15:
7228 case Match_InvalidMatrixIndexGPR32_8_11:
7229 case Match_InvalidLookupTable:
7230 case Match_InvalidZPRMul2_Lo8:
7231 case Match_InvalidZPRMul2_Hi8:
7232 case Match_InvalidZPRMul2_Lo16:
7233 case Match_InvalidZPRMul2_Hi16:
7234 case Match_InvalidZPRMul2_Lo32:
7235 case Match_InvalidZPRMul2_Hi32:
7236 case Match_InvalidZPRMul2_Lo64:
7237 case Match_InvalidZPRMul2_Hi64:
7238 case Match_InvalidZPR_K0:
7239 case Match_InvalidSVEVectorList2x8Mul2:
7240 case Match_InvalidSVEVectorList2x16Mul2:
7241 case Match_InvalidSVEVectorList2x32Mul2:
7242 case Match_InvalidSVEVectorList2x64Mul2:
7243 case Match_InvalidSVEVectorList2x128Mul2:
7244 case Match_InvalidSVEVectorList4x8Mul4:
7245 case Match_InvalidSVEVectorList4x16Mul4:
7246 case Match_InvalidSVEVectorList4x32Mul4:
7247 case Match_InvalidSVEVectorList4x64Mul4:
7248 case Match_InvalidSVEVectorList4x128Mul4:
7249 case Match_InvalidSVEVectorList2x8Mul2_Lo:
7250 case Match_InvalidSVEVectorList2x16Mul2_Lo:
7251 case Match_InvalidSVEVectorList2x32Mul2_Lo:
7252 case Match_InvalidSVEVectorList2x64Mul2_Lo:
7253 case Match_InvalidSVEVectorList2x8Mul2_Hi:
7254 case Match_InvalidSVEVectorList2x16Mul2_Hi:
7255 case Match_InvalidSVEVectorList2x32Mul2_Hi:
7256 case Match_InvalidSVEVectorList2x64Mul2_Hi:
7257 case Match_InvalidSVEVectorListStrided2x8:
7258 case Match_InvalidSVEVectorListStrided2x16:
7259 case Match_InvalidSVEVectorListStrided2x32:
7260 case Match_InvalidSVEVectorListStrided2x64:
7261 case Match_InvalidSVEVectorListStrided4x8:
7262 case Match_InvalidSVEVectorListStrided4x16:
7263 case Match_InvalidSVEVectorListStrided4x32:
7264 case Match_InvalidSVEVectorListStrided4x64:
7265 case Match_MSR:
7266 case Match_MRS: {
7267 if (ErrorInfo >= Operands.size())
7268 return Error(L: IDLoc, Msg: "too few operands for instruction", Range: SMRange(IDLoc, (*Operands.back()).getEndLoc()));
7269 // Any time we get here, there's nothing fancy to do. Just get the
7270 // operand SMLoc and display the diagnostic.
7271 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
7272 if (ErrorLoc == SMLoc())
7273 ErrorLoc = IDLoc;
7274 return showMatchError(Loc: ErrorLoc, ErrCode: MatchResult, ErrorInfo, Operands);
7275 }
7276 }
7277
7278 llvm_unreachable("Implement any new match types added!");
7279}
7280
7281/// ParseDirective parses the arm specific directives
7282bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
7283 const MCContext::Environment Format = getContext().getObjectFileType();
7284 bool IsMachO = Format == MCContext::IsMachO;
7285 bool IsCOFF = Format == MCContext::IsCOFF;
7286 bool IsELF = Format == MCContext::IsELF;
7287
7288 auto IDVal = DirectiveID.getIdentifier().lower();
7289 SMLoc Loc = DirectiveID.getLoc();
7290 if (IDVal == ".arch")
7291 parseDirectiveArch(L: Loc);
7292 else if (IDVal == ".cpu")
7293 parseDirectiveCPU(L: Loc);
7294 else if (IDVal == ".tlsdesccall")
7295 parseDirectiveTLSDescCall(L: Loc);
7296 else if (IDVal == ".ltorg" || IDVal == ".pool")
7297 parseDirectiveLtorg(L: Loc);
7298 else if (IDVal == ".unreq")
7299 parseDirectiveUnreq(L: Loc);
7300 else if (IDVal == ".inst")
7301 parseDirectiveInst(L: Loc);
7302 else if (IDVal == ".cfi_negate_ra_state")
7303 parseDirectiveCFINegateRAState();
7304 else if (IDVal == ".cfi_negate_ra_state_with_pc")
7305 parseDirectiveCFINegateRAStateWithPC();
7306 else if (IDVal == ".cfi_b_key_frame")
7307 parseDirectiveCFIBKeyFrame();
7308 else if (IDVal == ".cfi_mte_tagged_frame")
7309 parseDirectiveCFIMTETaggedFrame();
7310 else if (IDVal == ".arch_extension")
7311 parseDirectiveArchExtension(L: Loc);
7312 else if (IDVal == ".variant_pcs")
7313 parseDirectiveVariantPCS(L: Loc);
7314 else if (IsMachO) {
7315 if (IDVal == MCLOHDirectiveName())
7316 parseDirectiveLOH(LOH: IDVal, L: Loc);
7317 else
7318 return true;
7319 } else if (IsCOFF) {
7320 if (IDVal == ".seh_stackalloc")
7321 parseDirectiveSEHAllocStack(L: Loc);
7322 else if (IDVal == ".seh_endprologue")
7323 parseDirectiveSEHPrologEnd(L: Loc);
7324 else if (IDVal == ".seh_save_r19r20_x")
7325 parseDirectiveSEHSaveR19R20X(L: Loc);
7326 else if (IDVal == ".seh_save_fplr")
7327 parseDirectiveSEHSaveFPLR(L: Loc);
7328 else if (IDVal == ".seh_save_fplr_x")
7329 parseDirectiveSEHSaveFPLRX(L: Loc);
7330 else if (IDVal == ".seh_save_reg")
7331 parseDirectiveSEHSaveReg(L: Loc);
7332 else if (IDVal == ".seh_save_reg_x")
7333 parseDirectiveSEHSaveRegX(L: Loc);
7334 else if (IDVal == ".seh_save_regp")
7335 parseDirectiveSEHSaveRegP(L: Loc);
7336 else if (IDVal == ".seh_save_regp_x")
7337 parseDirectiveSEHSaveRegPX(L: Loc);
7338 else if (IDVal == ".seh_save_lrpair")
7339 parseDirectiveSEHSaveLRPair(L: Loc);
7340 else if (IDVal == ".seh_save_freg")
7341 parseDirectiveSEHSaveFReg(L: Loc);
7342 else if (IDVal == ".seh_save_freg_x")
7343 parseDirectiveSEHSaveFRegX(L: Loc);
7344 else if (IDVal == ".seh_save_fregp")
7345 parseDirectiveSEHSaveFRegP(L: Loc);
7346 else if (IDVal == ".seh_save_fregp_x")
7347 parseDirectiveSEHSaveFRegPX(L: Loc);
7348 else if (IDVal == ".seh_set_fp")
7349 parseDirectiveSEHSetFP(L: Loc);
7350 else if (IDVal == ".seh_add_fp")
7351 parseDirectiveSEHAddFP(L: Loc);
7352 else if (IDVal == ".seh_nop")
7353 parseDirectiveSEHNop(L: Loc);
7354 else if (IDVal == ".seh_save_next")
7355 parseDirectiveSEHSaveNext(L: Loc);
7356 else if (IDVal == ".seh_startepilogue")
7357 parseDirectiveSEHEpilogStart(L: Loc);
7358 else if (IDVal == ".seh_endepilogue")
7359 parseDirectiveSEHEpilogEnd(L: Loc);
7360 else if (IDVal == ".seh_trap_frame")
7361 parseDirectiveSEHTrapFrame(L: Loc);
7362 else if (IDVal == ".seh_pushframe")
7363 parseDirectiveSEHMachineFrame(L: Loc);
7364 else if (IDVal == ".seh_context")
7365 parseDirectiveSEHContext(L: Loc);
7366 else if (IDVal == ".seh_ec_context")
7367 parseDirectiveSEHECContext(L: Loc);
7368 else if (IDVal == ".seh_clear_unwound_to_call")
7369 parseDirectiveSEHClearUnwoundToCall(L: Loc);
7370 else if (IDVal == ".seh_pac_sign_lr")
7371 parseDirectiveSEHPACSignLR(L: Loc);
7372 else if (IDVal == ".seh_save_any_reg")
7373 parseDirectiveSEHSaveAnyReg(L: Loc, Paired: false, Writeback: false);
7374 else if (IDVal == ".seh_save_any_reg_p")
7375 parseDirectiveSEHSaveAnyReg(L: Loc, Paired: true, Writeback: false);
7376 else if (IDVal == ".seh_save_any_reg_x")
7377 parseDirectiveSEHSaveAnyReg(L: Loc, Paired: false, Writeback: true);
7378 else if (IDVal == ".seh_save_any_reg_px")
7379 parseDirectiveSEHSaveAnyReg(L: Loc, Paired: true, Writeback: true);
7380 else if (IDVal == ".seh_allocz")
7381 parseDirectiveSEHAllocZ(L: Loc);
7382 else if (IDVal == ".seh_save_zreg")
7383 parseDirectiveSEHSaveZReg(L: Loc);
7384 else if (IDVal == ".seh_save_preg")
7385 parseDirectiveSEHSavePReg(L: Loc);
7386 else
7387 return true;
7388 } else if (IsELF) {
7389 if (IDVal == ".aeabi_subsection")
7390 parseDirectiveAeabiSubSectionHeader(L: Loc);
7391 else if (IDVal == ".aeabi_attribute")
7392 parseDirectiveAeabiAArch64Attr(L: Loc);
7393 else
7394 return true;
7395 } else
7396 return true;
7397 return false;
7398}
7399
7400static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo,
7401 SmallVector<StringRef, 4> &RequestedExtensions) {
7402 const bool NoCrypto = llvm::is_contained(Range&: RequestedExtensions, Element: "nocrypto");
7403 const bool Crypto = llvm::is_contained(Range&: RequestedExtensions, Element: "crypto");
7404
7405 if (!NoCrypto && Crypto) {
7406 // Map 'generic' (and others) to sha2 and aes, because
7407 // that was the traditional meaning of crypto.
7408 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7409 ArchInfo == AArch64::ARMV8_3A) {
7410 RequestedExtensions.push_back(Elt: "sha2");
7411 RequestedExtensions.push_back(Elt: "aes");
7412 }
7413 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7414 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7415 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7416 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7417 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7418 ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) {
7419 RequestedExtensions.push_back(Elt: "sm4");
7420 RequestedExtensions.push_back(Elt: "sha3");
7421 RequestedExtensions.push_back(Elt: "sha2");
7422 RequestedExtensions.push_back(Elt: "aes");
7423 }
7424 } else if (NoCrypto) {
7425 // Map 'generic' (and others) to sha2 and aes, because
7426 // that was the traditional meaning of crypto.
7427 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7428 ArchInfo == AArch64::ARMV8_3A) {
7429 RequestedExtensions.push_back(Elt: "nosha2");
7430 RequestedExtensions.push_back(Elt: "noaes");
7431 }
7432 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7433 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7434 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7435 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7436 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7437 ArchInfo == AArch64::ARMV9_4A) {
7438 RequestedExtensions.push_back(Elt: "nosm4");
7439 RequestedExtensions.push_back(Elt: "nosha3");
7440 RequestedExtensions.push_back(Elt: "nosha2");
7441 RequestedExtensions.push_back(Elt: "noaes");
7442 }
7443 }
7444}
7445
7446static SMLoc incrementLoc(SMLoc L, int Offset) {
7447 return SMLoc::getFromPointer(Ptr: L.getPointer() + Offset);
7448}
7449
7450/// parseDirectiveArch
7451/// ::= .arch token
7452bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
7453 SMLoc CurLoc = getLoc();
7454
7455 StringRef Name = getParser().parseStringToEndOfStatement().trim();
7456 StringRef Arch, ExtensionString;
7457 std::tie(args&: Arch, args&: ExtensionString) = Name.split(Separator: '+');
7458
7459 const AArch64::ArchInfo *ArchInfo = AArch64::parseArch(Arch);
7460 if (!ArchInfo)
7461 return Error(L: CurLoc, Msg: "unknown arch name");
7462
7463 if (parseToken(T: AsmToken::EndOfStatement))
7464 return true;
7465
7466 // Get the architecture and extension features.
7467 std::vector<StringRef> AArch64Features;
7468 AArch64Features.push_back(x: ArchInfo->ArchFeature);
7469 AArch64::getExtensionFeatures(Extensions: ArchInfo->DefaultExts, Features&: AArch64Features);
7470
7471 MCSubtargetInfo &STI = copySTI();
7472 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
7473 STI.setDefaultFeatures(CPU: "generic", /*TuneCPU*/ "generic",
7474 FS: join(Begin: ArchFeatures.begin(), End: ArchFeatures.end(), Separator: ","));
7475
7476 SmallVector<StringRef, 4> RequestedExtensions;
7477 if (!ExtensionString.empty())
7478 ExtensionString.split(A&: RequestedExtensions, Separator: '+');
7479
7480 ExpandCryptoAEK(ArchInfo: *ArchInfo, RequestedExtensions);
7481 CurLoc = incrementLoc(L: CurLoc, Offset: Arch.size());
7482
7483 for (auto Name : RequestedExtensions) {
7484 // Advance source location past '+'.
7485 CurLoc = incrementLoc(L: CurLoc, Offset: 1);
7486
7487 bool EnableFeature = !Name.consume_front_insensitive(Prefix: "no");
7488
7489 auto It = llvm::find_if(Range: ExtensionMap, P: [&Name](const auto &Extension) {
7490 return Extension.Name == Name;
7491 });
7492
7493 if (It == std::end(arr: ExtensionMap))
7494 return Error(L: CurLoc, Msg: "unsupported architectural extension: " + Name);
7495
7496 if (EnableFeature)
7497 STI.SetFeatureBitsTransitively(It->Features);
7498 else
7499 STI.ClearFeatureBitsTransitively(FB: It->Features);
7500 CurLoc = incrementLoc(L: CurLoc, Offset: Name.size());
7501 }
7502 FeatureBitset Features = ComputeAvailableFeatures(FB: STI.getFeatureBits());
7503 setAvailableFeatures(Features);
7504
7505 getTargetStreamer().emitDirectiveArch(Name);
7506 return false;
7507}
7508
7509/// parseDirectiveArchExtension
7510/// ::= .arch_extension [no]feature
7511bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
7512 SMLoc ExtLoc = getLoc();
7513
7514 StringRef FullName = getParser().parseStringToEndOfStatement().trim();
7515
7516 if (parseEOL())
7517 return true;
7518
7519 bool EnableFeature = true;
7520 StringRef Name = FullName;
7521 if (Name.starts_with_insensitive(Prefix: "no")) {
7522 EnableFeature = false;
7523 Name = Name.substr(Start: 2);
7524 }
7525
7526 auto It = llvm::find_if(Range: ExtensionMap, P: [&Name](const auto &Extension) {
7527 return Extension.Name == Name;
7528 });
7529
7530 if (It == std::end(arr: ExtensionMap))
7531 return Error(L: ExtLoc, Msg: "unsupported architectural extension: " + Name);
7532
7533 MCSubtargetInfo &STI = copySTI();
7534 if (EnableFeature)
7535 STI.SetFeatureBitsTransitively(It->Features);
7536 else
7537 STI.ClearFeatureBitsTransitively(FB: It->Features);
7538 FeatureBitset Features = ComputeAvailableFeatures(FB: STI.getFeatureBits());
7539 setAvailableFeatures(Features);
7540
7541 getTargetStreamer().emitDirectiveArchExtension(Name: FullName);
7542 return false;
7543}
7544
7545/// parseDirectiveCPU
7546/// ::= .cpu id
7547bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
7548 SMLoc CurLoc = getLoc();
7549
7550 StringRef CPU, ExtensionString;
7551 std::tie(args&: CPU, args&: ExtensionString) =
7552 getParser().parseStringToEndOfStatement().trim().split(Separator: '+');
7553
7554 if (parseToken(T: AsmToken::EndOfStatement))
7555 return true;
7556
7557 SmallVector<StringRef, 4> RequestedExtensions;
7558 if (!ExtensionString.empty())
7559 ExtensionString.split(A&: RequestedExtensions, Separator: '+');
7560
7561 const llvm::AArch64::ArchInfo *CpuArch = llvm::AArch64::getArchForCpu(CPU);
7562 if (!CpuArch) {
7563 Error(L: CurLoc, Msg: "unknown CPU name");
7564 return false;
7565 }
7566 ExpandCryptoAEK(ArchInfo: *CpuArch, RequestedExtensions);
7567
7568 MCSubtargetInfo &STI = copySTI();
7569 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, FS: "");
7570 CurLoc = incrementLoc(L: CurLoc, Offset: CPU.size());
7571
7572 for (auto Name : RequestedExtensions) {
7573 // Advance source location past '+'.
7574 CurLoc = incrementLoc(L: CurLoc, Offset: 1);
7575
7576 bool EnableFeature = !Name.consume_front_insensitive(Prefix: "no");
7577
7578 auto It = llvm::find_if(Range: ExtensionMap, P: [&Name](const auto &Extension) {
7579 return Extension.Name == Name;
7580 });
7581
7582 if (It == std::end(arr: ExtensionMap))
7583 return Error(L: CurLoc, Msg: "unsupported architectural extension: " + Name);
7584
7585 if (EnableFeature)
7586 STI.SetFeatureBitsTransitively(It->Features);
7587 else
7588 STI.ClearFeatureBitsTransitively(FB: It->Features);
7589 CurLoc = incrementLoc(L: CurLoc, Offset: Name.size());
7590 }
7591 FeatureBitset Features = ComputeAvailableFeatures(FB: STI.getFeatureBits());
7592 setAvailableFeatures(Features);
7593 return false;
7594}
7595
7596/// parseDirectiveInst
7597/// ::= .inst opcode [, ...]
7598bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
7599 if (getLexer().is(K: AsmToken::EndOfStatement))
7600 return Error(L: Loc, Msg: "expected expression following '.inst' directive");
7601
7602 auto parseOp = [&]() -> bool {
7603 SMLoc L = getLoc();
7604 const MCExpr *Expr = nullptr;
7605 if (check(P: getParser().parseExpression(Res&: Expr), Loc: L, Msg: "expected expression"))
7606 return true;
7607 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Val: Expr);
7608 if (check(P: !Value, Loc: L, Msg: "expected constant expression"))
7609 return true;
7610 getTargetStreamer().emitInst(Inst: Value->getValue());
7611 return false;
7612 };
7613
7614 return parseMany(parseOne: parseOp);
7615}
7616
7617// parseDirectiveTLSDescCall:
7618// ::= .tlsdesccall symbol
7619bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
7620 StringRef Name;
7621 if (check(P: getParser().parseIdentifier(Res&: Name), Loc: L, Msg: "expected symbol") ||
7622 parseToken(T: AsmToken::EndOfStatement))
7623 return true;
7624
7625 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
7626 const MCExpr *Expr = MCSymbolRefExpr::create(Symbol: Sym, Ctx&: getContext());
7627 Expr = MCSpecifierExpr::create(Expr, S: AArch64::S_TLSDESC, Ctx&: getContext());
7628
7629 MCInst Inst;
7630 Inst.setOpcode(AArch64::TLSDESCCALL);
7631 Inst.addOperand(Op: MCOperand::createExpr(Val: Expr));
7632
7633 getParser().getStreamer().emitInstruction(Inst, STI: getSTI());
7634 return false;
7635}
7636
7637/// ::= .loh <lohName | lohId> label1, ..., labelN
7638/// The number of arguments depends on the loh identifier.
7639bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
7640 MCLOHType Kind;
7641 if (getTok().isNot(K: AsmToken::Identifier)) {
7642 if (getTok().isNot(K: AsmToken::Integer))
7643 return TokError(Msg: "expected an identifier or a number in directive");
7644 // We successfully get a numeric value for the identifier.
7645 // Check if it is valid.
7646 int64_t Id = getTok().getIntVal();
7647 if (Id <= -1U && !isValidMCLOHType(Kind: Id))
7648 return TokError(Msg: "invalid numeric identifier in directive");
7649 Kind = (MCLOHType)Id;
7650 } else {
7651 StringRef Name = getTok().getIdentifier();
7652 // We successfully parse an identifier.
7653 // Check if it is a recognized one.
7654 int Id = MCLOHNameToId(Name);
7655
7656 if (Id == -1)
7657 return TokError(Msg: "invalid identifier in directive");
7658 Kind = (MCLOHType)Id;
7659 }
7660 // Consume the identifier.
7661 Lex();
7662 // Get the number of arguments of this LOH.
7663 int NbArgs = MCLOHIdToNbArgs(Kind);
7664
7665 assert(NbArgs != -1 && "Invalid number of arguments");
7666
7667 SmallVector<MCSymbol *, 3> Args;
7668 for (int Idx = 0; Idx < NbArgs; ++Idx) {
7669 StringRef Name;
7670 if (getParser().parseIdentifier(Res&: Name))
7671 return TokError(Msg: "expected identifier in directive");
7672 Args.push_back(Elt: getContext().getOrCreateSymbol(Name));
7673
7674 if (Idx + 1 == NbArgs)
7675 break;
7676 if (parseComma())
7677 return true;
7678 }
7679 if (parseEOL())
7680 return true;
7681
7682 getStreamer().emitLOHDirective(Kind, Args);
7683 return false;
7684}
7685
7686/// parseDirectiveLtorg
7687/// ::= .ltorg | .pool
7688bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
7689 if (parseEOL())
7690 return true;
7691 getTargetStreamer().emitCurrentConstantPool();
7692 return false;
7693}
7694
7695/// parseDirectiveReq
7696/// ::= name .req registername
7697bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7698 Lex(); // Eat the '.req' token.
7699 SMLoc SRegLoc = getLoc();
7700 RegKind RegisterKind = RegKind::Scalar;
7701 MCRegister RegNum;
7702 ParseStatus ParseRes = tryParseScalarRegister(RegNum);
7703
7704 if (!ParseRes.isSuccess()) {
7705 StringRef Kind;
7706 RegisterKind = RegKind::NeonVector;
7707 ParseRes = tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::NeonVector);
7708
7709 if (ParseRes.isFailure())
7710 return true;
7711
7712 if (ParseRes.isSuccess() && !Kind.empty())
7713 return Error(L: SRegLoc, Msg: "vector register without type specifier expected");
7714 }
7715
7716 if (!ParseRes.isSuccess()) {
7717 StringRef Kind;
7718 RegisterKind = RegKind::SVEDataVector;
7719 ParseRes =
7720 tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::SVEDataVector);
7721
7722 if (ParseRes.isFailure())
7723 return true;
7724
7725 if (ParseRes.isSuccess() && !Kind.empty())
7726 return Error(L: SRegLoc,
7727 Msg: "sve vector register without type specifier expected");
7728 }
7729
7730 if (!ParseRes.isSuccess()) {
7731 StringRef Kind;
7732 RegisterKind = RegKind::SVEPredicateVector;
7733 ParseRes = tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::SVEPredicateVector);
7734
7735 if (ParseRes.isFailure())
7736 return true;
7737
7738 if (ParseRes.isSuccess() && !Kind.empty())
7739 return Error(L: SRegLoc,
7740 Msg: "sve predicate register without type specifier expected");
7741 }
7742
7743 if (!ParseRes.isSuccess())
7744 return Error(L: SRegLoc, Msg: "register name or alias expected");
7745
7746 // Shouldn't be anything else.
7747 if (parseEOL())
7748 return true;
7749
7750 auto pair = std::make_pair(x&: RegisterKind, y&: RegNum);
7751 if (RegisterReqs.insert(KV: std::make_pair(x&: Name, y&: pair)).first->second != pair)
7752 Warning(L, Msg: "ignoring redefinition of register alias '" + Name + "'");
7753
7754 return false;
7755}
7756
7757/// parseDirectiveUneq
7758/// ::= .unreq registername
7759bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
7760 if (getTok().isNot(K: AsmToken::Identifier))
7761 return TokError(Msg: "unexpected input in .unreq directive.");
7762 RegisterReqs.erase(Key: getTok().getIdentifier().lower());
7763 Lex(); // Eat the identifier.
7764 return parseToken(T: AsmToken::EndOfStatement);
7765}
7766
7767bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
7768 if (parseEOL())
7769 return true;
7770 getStreamer().emitCFINegateRAState();
7771 return false;
7772}
7773
7774bool AArch64AsmParser::parseDirectiveCFINegateRAStateWithPC() {
7775 if (parseEOL())
7776 return true;
7777 getStreamer().emitCFINegateRAStateWithPC();
7778 return false;
7779}
7780
7781/// parseDirectiveCFIBKeyFrame
7782/// ::= .cfi_b_key
7783bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
7784 if (parseEOL())
7785 return true;
7786 getStreamer().emitCFIBKeyFrame();
7787 return false;
7788}
7789
7790/// parseDirectiveCFIMTETaggedFrame
7791/// ::= .cfi_mte_tagged_frame
7792bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() {
7793 if (parseEOL())
7794 return true;
7795 getStreamer().emitCFIMTETaggedFrame();
7796 return false;
7797}
7798
7799/// parseDirectiveVariantPCS
7800/// ::= .variant_pcs symbolname
7801bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
7802 StringRef Name;
7803 if (getParser().parseIdentifier(Res&: Name))
7804 return TokError(Msg: "expected symbol name");
7805 if (parseEOL())
7806 return true;
7807 getTargetStreamer().emitDirectiveVariantPCS(
7808 Symbol: getContext().getOrCreateSymbol(Name));
7809 return false;
7810}
7811
7812/// parseDirectiveSEHAllocStack
7813/// ::= .seh_stackalloc
7814bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
7815 int64_t Size;
7816 if (parseImmExpr(Out&: Size))
7817 return true;
7818 getTargetStreamer().emitARM64WinCFIAllocStack(Size);
7819 return false;
7820}
7821
7822/// parseDirectiveSEHPrologEnd
7823/// ::= .seh_endprologue
7824bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
7825 getTargetStreamer().emitARM64WinCFIPrologEnd();
7826 return false;
7827}
7828
7829/// parseDirectiveSEHSaveR19R20X
7830/// ::= .seh_save_r19r20_x
7831bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
7832 int64_t Offset;
7833 if (parseImmExpr(Out&: Offset))
7834 return true;
7835 getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset);
7836 return false;
7837}
7838
7839/// parseDirectiveSEHSaveFPLR
7840/// ::= .seh_save_fplr
7841bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
7842 int64_t Offset;
7843 if (parseImmExpr(Out&: Offset))
7844 return true;
7845 getTargetStreamer().emitARM64WinCFISaveFPLR(Offset);
7846 return false;
7847}
7848
7849/// parseDirectiveSEHSaveFPLRX
7850/// ::= .seh_save_fplr_x
7851bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
7852 int64_t Offset;
7853 if (parseImmExpr(Out&: Offset))
7854 return true;
7855 getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset);
7856 return false;
7857}
7858
7859/// parseDirectiveSEHSaveReg
7860/// ::= .seh_save_reg
7861bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
7862 unsigned Reg;
7863 int64_t Offset;
7864 if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::LR) ||
7865 parseComma() || parseImmExpr(Out&: Offset))
7866 return true;
7867 getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset);
7868 return false;
7869}
7870
7871/// parseDirectiveSEHSaveRegX
7872/// ::= .seh_save_reg_x
7873bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
7874 unsigned Reg;
7875 int64_t Offset;
7876 if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::LR) ||
7877 parseComma() || parseImmExpr(Out&: Offset))
7878 return true;
7879 getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset);
7880 return false;
7881}
7882
7883/// parseDirectiveSEHSaveRegP
7884/// ::= .seh_save_regp
7885bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
7886 unsigned Reg;
7887 int64_t Offset;
7888 if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::FP) ||
7889 parseComma() || parseImmExpr(Out&: Offset))
7890 return true;
7891 getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset);
7892 return false;
7893}
7894
7895/// parseDirectiveSEHSaveRegPX
7896/// ::= .seh_save_regp_x
7897bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
7898 unsigned Reg;
7899 int64_t Offset;
7900 if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::FP) ||
7901 parseComma() || parseImmExpr(Out&: Offset))
7902 return true;
7903 getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset);
7904 return false;
7905}
7906
7907/// parseDirectiveSEHSaveLRPair
7908/// ::= .seh_save_lrpair
7909bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
7910 unsigned Reg;
7911 int64_t Offset;
7912 L = getLoc();
7913 if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::LR) ||
7914 parseComma() || parseImmExpr(Out&: Offset))
7915 return true;
7916 if (check(P: ((Reg - 19) % 2 != 0), Loc: L,
7917 Msg: "expected register with even offset from x19"))
7918 return true;
7919 getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset);
7920 return false;
7921}
7922
7923/// parseDirectiveSEHSaveFReg
7924/// ::= .seh_save_freg
7925bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
7926 unsigned Reg;
7927 int64_t Offset;
7928 if (parseRegisterInRange(Out&: Reg, Base: AArch64::D0, First: AArch64::D8, Last: AArch64::D15) ||
7929 parseComma() || parseImmExpr(Out&: Offset))
7930 return true;
7931 getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset);
7932 return false;
7933}
7934
7935/// parseDirectiveSEHSaveFRegX
7936/// ::= .seh_save_freg_x
7937bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
7938 unsigned Reg;
7939 int64_t Offset;
7940 if (parseRegisterInRange(Out&: Reg, Base: AArch64::D0, First: AArch64::D8, Last: AArch64::D15) ||
7941 parseComma() || parseImmExpr(Out&: Offset))
7942 return true;
7943 getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset);
7944 return false;
7945}
7946
7947/// parseDirectiveSEHSaveFRegP
7948/// ::= .seh_save_fregp
7949bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
7950 unsigned Reg;
7951 int64_t Offset;
7952 if (parseRegisterInRange(Out&: Reg, Base: AArch64::D0, First: AArch64::D8, Last: AArch64::D14) ||
7953 parseComma() || parseImmExpr(Out&: Offset))
7954 return true;
7955 getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset);
7956 return false;
7957}
7958
7959/// parseDirectiveSEHSaveFRegPX
7960/// ::= .seh_save_fregp_x
7961bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
7962 unsigned Reg;
7963 int64_t Offset;
7964 if (parseRegisterInRange(Out&: Reg, Base: AArch64::D0, First: AArch64::D8, Last: AArch64::D14) ||
7965 parseComma() || parseImmExpr(Out&: Offset))
7966 return true;
7967 getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset);
7968 return false;
7969}
7970
7971/// parseDirectiveSEHSetFP
7972/// ::= .seh_set_fp
7973bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
7974 getTargetStreamer().emitARM64WinCFISetFP();
7975 return false;
7976}
7977
7978/// parseDirectiveSEHAddFP
7979/// ::= .seh_add_fp
7980bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
7981 int64_t Size;
7982 if (parseImmExpr(Out&: Size))
7983 return true;
7984 getTargetStreamer().emitARM64WinCFIAddFP(Size);
7985 return false;
7986}
7987
7988/// parseDirectiveSEHNop
7989/// ::= .seh_nop
7990bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
7991 getTargetStreamer().emitARM64WinCFINop();
7992 return false;
7993}
7994
7995/// parseDirectiveSEHSaveNext
7996/// ::= .seh_save_next
7997bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
7998 getTargetStreamer().emitARM64WinCFISaveNext();
7999 return false;
8000}
8001
8002/// parseDirectiveSEHEpilogStart
8003/// ::= .seh_startepilogue
8004bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
8005 getTargetStreamer().emitARM64WinCFIEpilogStart();
8006 return false;
8007}
8008
8009/// parseDirectiveSEHEpilogEnd
8010/// ::= .seh_endepilogue
8011bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
8012 getTargetStreamer().emitARM64WinCFIEpilogEnd();
8013 return false;
8014}
8015
8016/// parseDirectiveSEHTrapFrame
8017/// ::= .seh_trap_frame
8018bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
8019 getTargetStreamer().emitARM64WinCFITrapFrame();
8020 return false;
8021}
8022
8023/// parseDirectiveSEHMachineFrame
8024/// ::= .seh_pushframe
8025bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
8026 getTargetStreamer().emitARM64WinCFIMachineFrame();
8027 return false;
8028}
8029
8030/// parseDirectiveSEHContext
8031/// ::= .seh_context
8032bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
8033 getTargetStreamer().emitARM64WinCFIContext();
8034 return false;
8035}
8036
8037/// parseDirectiveSEHECContext
8038/// ::= .seh_ec_context
8039bool AArch64AsmParser::parseDirectiveSEHECContext(SMLoc L) {
8040 getTargetStreamer().emitARM64WinCFIECContext();
8041 return false;
8042}
8043
8044/// parseDirectiveSEHClearUnwoundToCall
8045/// ::= .seh_clear_unwound_to_call
8046bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
8047 getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
8048 return false;
8049}
8050
8051/// parseDirectiveSEHPACSignLR
8052/// ::= .seh_pac_sign_lr
8053bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) {
8054 getTargetStreamer().emitARM64WinCFIPACSignLR();
8055 return false;
8056}
8057
8058/// parseDirectiveSEHSaveAnyReg
8059/// ::= .seh_save_any_reg
8060/// ::= .seh_save_any_reg_p
8061/// ::= .seh_save_any_reg_x
8062/// ::= .seh_save_any_reg_px
8063bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired,
8064 bool Writeback) {
8065 MCRegister Reg;
8066 SMLoc Start, End;
8067 int64_t Offset;
8068 if (check(P: parseRegister(Reg, StartLoc&: Start, EndLoc&: End), Loc: getLoc(), Msg: "expected register") ||
8069 parseComma() || parseImmExpr(Out&: Offset))
8070 return true;
8071
8072 if (Reg == AArch64::FP || Reg == AArch64::LR ||
8073 (Reg >= AArch64::X0 && Reg <= AArch64::X28)) {
8074 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
8075 return Error(L, Msg: "invalid save_any_reg offset");
8076 unsigned EncodedReg;
8077 if (Reg == AArch64::FP)
8078 EncodedReg = 29;
8079 else if (Reg == AArch64::LR)
8080 EncodedReg = 30;
8081 else
8082 EncodedReg = Reg - AArch64::X0;
8083 if (Paired) {
8084 if (Reg == AArch64::LR)
8085 return Error(L: Start, Msg: "lr cannot be paired with another register");
8086 if (Writeback)
8087 getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(Reg: EncodedReg, Offset);
8088 else
8089 getTargetStreamer().emitARM64WinCFISaveAnyRegIP(Reg: EncodedReg, Offset);
8090 } else {
8091 if (Writeback)
8092 getTargetStreamer().emitARM64WinCFISaveAnyRegIX(Reg: EncodedReg, Offset);
8093 else
8094 getTargetStreamer().emitARM64WinCFISaveAnyRegI(Reg: EncodedReg, Offset);
8095 }
8096 } else if (Reg >= AArch64::D0 && Reg <= AArch64::D31) {
8097 unsigned EncodedReg = Reg - AArch64::D0;
8098 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
8099 return Error(L, Msg: "invalid save_any_reg offset");
8100 if (Paired) {
8101 if (Reg == AArch64::D31)
8102 return Error(L: Start, Msg: "d31 cannot be paired with another register");
8103 if (Writeback)
8104 getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(Reg: EncodedReg, Offset);
8105 else
8106 getTargetStreamer().emitARM64WinCFISaveAnyRegDP(Reg: EncodedReg, Offset);
8107 } else {
8108 if (Writeback)
8109 getTargetStreamer().emitARM64WinCFISaveAnyRegDX(Reg: EncodedReg, Offset);
8110 else
8111 getTargetStreamer().emitARM64WinCFISaveAnyRegD(Reg: EncodedReg, Offset);
8112 }
8113 } else if (Reg >= AArch64::Q0 && Reg <= AArch64::Q31) {
8114 unsigned EncodedReg = Reg - AArch64::Q0;
8115 if (Offset < 0 || Offset % 16)
8116 return Error(L, Msg: "invalid save_any_reg offset");
8117 if (Paired) {
8118 if (Reg == AArch64::Q31)
8119 return Error(L: Start, Msg: "q31 cannot be paired with another register");
8120 if (Writeback)
8121 getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(Reg: EncodedReg, Offset);
8122 else
8123 getTargetStreamer().emitARM64WinCFISaveAnyRegQP(Reg: EncodedReg, Offset);
8124 } else {
8125 if (Writeback)
8126 getTargetStreamer().emitARM64WinCFISaveAnyRegQX(Reg: EncodedReg, Offset);
8127 else
8128 getTargetStreamer().emitARM64WinCFISaveAnyRegQ(Reg: EncodedReg, Offset);
8129 }
8130 } else {
8131 return Error(L: Start, Msg: "save_any_reg register must be x, q or d register");
8132 }
8133 return false;
8134}
8135
8136/// parseDirectiveAllocZ
8137/// ::= .seh_allocz
8138bool AArch64AsmParser::parseDirectiveSEHAllocZ(SMLoc L) {
8139 int64_t Offset;
8140 if (parseImmExpr(Out&: Offset))
8141 return true;
8142 getTargetStreamer().emitARM64WinCFIAllocZ(Offset);
8143 return false;
8144}
8145
8146/// parseDirectiveSEHSaveZReg
8147/// ::= .seh_save_zreg
8148bool AArch64AsmParser::parseDirectiveSEHSaveZReg(SMLoc L) {
8149 MCRegister RegNum;
8150 StringRef Kind;
8151 int64_t Offset;
8152 ParseStatus Res =
8153 tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::SVEDataVector);
8154 if (!Res.isSuccess())
8155 return true;
8156 if (check(P: RegNum < AArch64::Z8 || RegNum > AArch64::Z23, Loc: L,
8157 Msg: "expected register in range z8 to z23"))
8158 return true;
8159 if (parseComma() || parseImmExpr(Out&: Offset))
8160 return true;
8161 getTargetStreamer().emitARM64WinCFISaveZReg(Reg: RegNum - AArch64::Z0, Offset);
8162 return false;
8163}
8164
8165/// parseDirectiveSEHSavePReg
8166/// ::= .seh_save_preg
8167bool AArch64AsmParser::parseDirectiveSEHSavePReg(SMLoc L) {
8168 MCRegister RegNum;
8169 StringRef Kind;
8170 int64_t Offset;
8171 ParseStatus Res =
8172 tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::SVEPredicateVector);
8173 if (!Res.isSuccess())
8174 return true;
8175 if (check(P: RegNum < AArch64::P4 || RegNum > AArch64::P15, Loc: L,
8176 Msg: "expected register in range p4 to p15"))
8177 return true;
8178 if (parseComma() || parseImmExpr(Out&: Offset))
8179 return true;
8180 getTargetStreamer().emitARM64WinCFISavePReg(Reg: RegNum - AArch64::P0, Offset);
8181 return false;
8182}
8183
8184bool AArch64AsmParser::parseDirectiveAeabiSubSectionHeader(SMLoc L) {
8185 // Handle parsing of .aeabi_subsection directives
8186 // - On first declaration of a subsection, expect exactly three identifiers
8187 // after `.aeabi_subsection`: the subsection name and two parameters.
8188 // - When switching to an existing subsection, it is valid to provide only
8189 // the subsection name, or the name together with the two parameters.
8190 MCAsmParser &Parser = getParser();
8191
8192 // Consume the name (subsection name)
8193 StringRef SubsectionName;
8194 AArch64BuildAttributes::VendorID SubsectionNameID;
8195 if (Parser.getTok().is(K: AsmToken::Identifier)) {
8196 SubsectionName = Parser.getTok().getIdentifier();
8197 SubsectionNameID = AArch64BuildAttributes::getVendorID(Vendor: SubsectionName);
8198 } else {
8199 Error(L: Parser.getTok().getLoc(), Msg: "subsection name not found");
8200 return true;
8201 }
8202 Parser.Lex();
8203
8204 std::unique_ptr<MCELFStreamer::AttributeSubSection> SubsectionExists =
8205 getTargetStreamer().getAttributesSubsectionByName(Name: SubsectionName);
8206 // Check whether only the subsection name was provided.
8207 // If so, the user is trying to switch to a subsection that should have been
8208 // declared before.
8209 if (Parser.getTok().is(K: llvm::AsmToken::EndOfStatement)) {
8210 if (SubsectionExists) {
8211 getTargetStreamer().emitAttributesSubsection(
8212 VendorName: SubsectionName,
8213 IsOptional: static_cast<AArch64BuildAttributes::SubsectionOptional>(
8214 SubsectionExists->IsOptional),
8215 ParameterType: static_cast<AArch64BuildAttributes::SubsectionType>(
8216 SubsectionExists->ParameterType));
8217 return false;
8218 }
8219 // If subsection does not exists, report error.
8220 else {
8221 Error(L: Parser.getTok().getLoc(),
8222 Msg: "Could not switch to subsection '" + SubsectionName +
8223 "' using subsection name, subsection has not been defined");
8224 return true;
8225 }
8226 }
8227
8228 // Otherwise, expecting 2 more parameters: consume a comma
8229 // parseComma() return *false* on success, and call Lex(), no need to call
8230 // Lex() again.
8231 if (Parser.parseComma()) {
8232 return true;
8233 }
8234
8235 // Consume the first parameter (optionality parameter)
8236 AArch64BuildAttributes::SubsectionOptional IsOptional;
8237 // options: optional/required
8238 if (Parser.getTok().is(K: AsmToken::Identifier)) {
8239 StringRef Optionality = Parser.getTok().getIdentifier();
8240 IsOptional = AArch64BuildAttributes::getOptionalID(Optional: Optionality);
8241 if (AArch64BuildAttributes::OPTIONAL_NOT_FOUND == IsOptional) {
8242 Error(L: Parser.getTok().getLoc(),
8243 Msg: AArch64BuildAttributes::getSubsectionOptionalUnknownError());
8244 return true;
8245 }
8246 if (SubsectionExists) {
8247 if (IsOptional != SubsectionExists->IsOptional) {
8248 Error(L: Parser.getTok().getLoc(),
8249 Msg: "optionality mismatch! subsection '" + SubsectionName +
8250 "' already exists with optionality defined as '" +
8251 AArch64BuildAttributes::getOptionalStr(
8252 Optional: SubsectionExists->IsOptional) +
8253 "' and not '" +
8254 AArch64BuildAttributes::getOptionalStr(Optional: IsOptional) + "'");
8255 return true;
8256 }
8257 }
8258 } else {
8259 Error(L: Parser.getTok().getLoc(),
8260 Msg: "optionality parameter not found, expected required|optional");
8261 return true;
8262 }
8263 // Check for possible IsOptional unaccepted values for known subsections
8264 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID) {
8265 if (AArch64BuildAttributes::REQUIRED == IsOptional) {
8266 Error(L: Parser.getTok().getLoc(),
8267 Msg: "aeabi_feature_and_bits must be marked as optional");
8268 return true;
8269 }
8270 }
8271 if (AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
8272 if (AArch64BuildAttributes::OPTIONAL == IsOptional) {
8273 Error(L: Parser.getTok().getLoc(),
8274 Msg: "aeabi_pauthabi must be marked as required");
8275 return true;
8276 }
8277 }
8278 Parser.Lex();
8279 // consume a comma
8280 if (Parser.parseComma()) {
8281 return true;
8282 }
8283
8284 // Consume the second parameter (type parameter)
8285 AArch64BuildAttributes::SubsectionType Type;
8286 if (Parser.getTok().is(K: AsmToken::Identifier)) {
8287 StringRef Name = Parser.getTok().getIdentifier();
8288 Type = AArch64BuildAttributes::getTypeID(Type: Name);
8289 if (AArch64BuildAttributes::TYPE_NOT_FOUND == Type) {
8290 Error(L: Parser.getTok().getLoc(),
8291 Msg: AArch64BuildAttributes::getSubsectionTypeUnknownError());
8292 return true;
8293 }
8294 if (SubsectionExists) {
8295 if (Type != SubsectionExists->ParameterType) {
8296 Error(L: Parser.getTok().getLoc(),
8297 Msg: "type mismatch! subsection '" + SubsectionName +
8298 "' already exists with type defined as '" +
8299 AArch64BuildAttributes::getTypeStr(
8300 Type: SubsectionExists->ParameterType) +
8301 "' and not '" + AArch64BuildAttributes::getTypeStr(Type) +
8302 "'");
8303 return true;
8304 }
8305 }
8306 } else {
8307 Error(L: Parser.getTok().getLoc(),
8308 Msg: "type parameter not found, expected uleb128|ntbs");
8309 return true;
8310 }
8311 // Check for possible unaccepted 'type' values for known subsections
8312 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID ||
8313 AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
8314 if (AArch64BuildAttributes::NTBS == Type) {
8315 Error(L: Parser.getTok().getLoc(),
8316 Msg: SubsectionName + " must be marked as ULEB128");
8317 return true;
8318 }
8319 }
8320 Parser.Lex();
8321
8322 // Parsing finished, check for trailing tokens.
8323 if (Parser.getTok().isNot(K: llvm::AsmToken::EndOfStatement)) {
8324 Error(L: Parser.getTok().getLoc(), Msg: "unexpected token for AArch64 build "
8325 "attributes subsection header directive");
8326 return true;
8327 }
8328
8329 getTargetStreamer().emitAttributesSubsection(VendorName: SubsectionName, IsOptional, ParameterType: Type);
8330
8331 return false;
8332}
8333
8334bool AArch64AsmParser::parseDirectiveAeabiAArch64Attr(SMLoc L) {
8335 // Expecting 2 Tokens: after '.aeabi_attribute', e.g.:
8336 // .aeabi_attribute (1)Tag_Feature_BTI, (2)[uleb128|ntbs]
8337 // separated by a comma.
8338 MCAsmParser &Parser = getParser();
8339
8340 std::unique_ptr<MCELFStreamer::AttributeSubSection> ActiveSubsection =
8341 getTargetStreamer().getActiveAttributesSubsection();
8342 if (nullptr == ActiveSubsection) {
8343 Error(L: Parser.getTok().getLoc(),
8344 Msg: "no active subsection, build attribute can not be added");
8345 return true;
8346 }
8347 StringRef ActiveSubsectionName = ActiveSubsection->VendorName;
8348 unsigned ActiveSubsectionType = ActiveSubsection->ParameterType;
8349
8350 unsigned ActiveSubsectionID = AArch64BuildAttributes::VENDOR_UNKNOWN;
8351 if (AArch64BuildAttributes::getVendorName(
8352 Vendor: AArch64BuildAttributes::AEABI_PAUTHABI) == ActiveSubsectionName)
8353 ActiveSubsectionID = AArch64BuildAttributes::AEABI_PAUTHABI;
8354 if (AArch64BuildAttributes::getVendorName(
8355 Vendor: AArch64BuildAttributes::AEABI_FEATURE_AND_BITS) ==
8356 ActiveSubsectionName)
8357 ActiveSubsectionID = AArch64BuildAttributes::AEABI_FEATURE_AND_BITS;
8358
8359 StringRef TagStr = "";
8360 unsigned Tag;
8361 if (Parser.getTok().is(K: AsmToken::Integer)) {
8362 Tag = getTok().getIntVal();
8363 } else if (Parser.getTok().is(K: AsmToken::Identifier)) {
8364 TagStr = Parser.getTok().getIdentifier();
8365 switch (ActiveSubsectionID) {
8366 case AArch64BuildAttributes::VENDOR_UNKNOWN:
8367 // Tag was provided as an unrecognized string instead of an unsigned
8368 // integer
8369 Error(L: Parser.getTok().getLoc(), Msg: "unrecognized Tag: '" + TagStr +
8370 "' \nExcept for public subsections, "
8371 "tags have to be an unsigned int.");
8372 return true;
8373 break;
8374 case AArch64BuildAttributes::AEABI_PAUTHABI:
8375 Tag = AArch64BuildAttributes::getPauthABITagsID(PauthABITag: TagStr);
8376 if (AArch64BuildAttributes::PAUTHABI_TAG_NOT_FOUND == Tag) {
8377 Error(L: Parser.getTok().getLoc(), Msg: "unknown AArch64 build attribute '" +
8378 TagStr + "' for subsection '" +
8379 ActiveSubsectionName + "'");
8380 return true;
8381 }
8382 break;
8383 case AArch64BuildAttributes::AEABI_FEATURE_AND_BITS:
8384 Tag = AArch64BuildAttributes::getFeatureAndBitsTagsID(FeatureAndBitsTag: TagStr);
8385 if (AArch64BuildAttributes::FEATURE_AND_BITS_TAG_NOT_FOUND == Tag) {
8386 Error(L: Parser.getTok().getLoc(), Msg: "unknown AArch64 build attribute '" +
8387 TagStr + "' for subsection '" +
8388 ActiveSubsectionName + "'");
8389 return true;
8390 }
8391 break;
8392 }
8393 } else {
8394 Error(L: Parser.getTok().getLoc(), Msg: "AArch64 build attributes tag not found");
8395 return true;
8396 }
8397 Parser.Lex();
8398 // consume a comma
8399 // parseComma() return *false* on success, and call Lex(), no need to call
8400 // Lex() again.
8401 if (Parser.parseComma()) {
8402 return true;
8403 }
8404
8405 // Consume the second parameter (attribute value)
8406 unsigned ValueInt = unsigned(-1);
8407 std::string ValueStr = "";
8408 if (Parser.getTok().is(K: AsmToken::Integer)) {
8409 if (AArch64BuildAttributes::NTBS == ActiveSubsectionType) {
8410 Error(
8411 L: Parser.getTok().getLoc(),
8412 Msg: "active subsection type is NTBS (string), found ULEB128 (unsigned)");
8413 return true;
8414 }
8415 ValueInt = getTok().getIntVal();
8416 } else if (Parser.getTok().is(K: AsmToken::Identifier)) {
8417 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8418 Error(
8419 L: Parser.getTok().getLoc(),
8420 Msg: "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8421 return true;
8422 }
8423 ValueStr = Parser.getTok().getIdentifier();
8424 } else if (Parser.getTok().is(K: AsmToken::String)) {
8425 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8426 Error(
8427 L: Parser.getTok().getLoc(),
8428 Msg: "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8429 return true;
8430 }
8431 ValueStr = Parser.getTok().getString();
8432 } else {
8433 Error(L: Parser.getTok().getLoc(), Msg: "AArch64 build attributes value not found");
8434 return true;
8435 }
8436 // Check for possible unaccepted values for known tags
8437 // (AEABI_FEATURE_AND_BITS)
8438 if (ActiveSubsectionID == AArch64BuildAttributes::AEABI_FEATURE_AND_BITS) {
8439 if (0 != ValueInt && 1 != ValueInt) {
8440 Error(L: Parser.getTok().getLoc(),
8441 Msg: "unknown AArch64 build attributes Value for Tag '" + TagStr +
8442 "' options are 0|1");
8443 return true;
8444 }
8445 }
8446 Parser.Lex();
8447
8448 // Parsing finished. Check for trailing tokens.
8449 if (Parser.getTok().isNot(K: llvm::AsmToken::EndOfStatement)) {
8450 Error(L: Parser.getTok().getLoc(),
8451 Msg: "unexpected token for AArch64 build attributes tag and value "
8452 "attribute directive");
8453 return true;
8454 }
8455
8456 if (unsigned(-1) != ValueInt) {
8457 getTargetStreamer().emitAttribute(VendorName: ActiveSubsectionName, Tag, Value: ValueInt, String: "");
8458 }
8459 if ("" != ValueStr) {
8460 getTargetStreamer().emitAttribute(VendorName: ActiveSubsectionName, Tag, Value: unsigned(-1),
8461 String: ValueStr);
8462 }
8463 return false;
8464}
8465
8466bool AArch64AsmParser::parseDataExpr(const MCExpr *&Res) {
8467 SMLoc EndLoc;
8468
8469 if (getParser().parseExpression(Res))
8470 return true;
8471 MCAsmParser &Parser = getParser();
8472 if (!parseOptionalToken(T: AsmToken::At))
8473 return false;
8474 if (getLexer().getKind() != AsmToken::Identifier)
8475 return Error(L: getLoc(), Msg: "expected relocation specifier");
8476
8477 std::string Identifier = Parser.getTok().getIdentifier().lower();
8478 SMLoc Loc = getLoc();
8479 Lex();
8480 if (Identifier == "auth")
8481 return parseAuthExpr(Res, EndLoc);
8482
8483 auto Spec = AArch64::S_None;
8484 if (STI->getTargetTriple().isOSBinFormatMachO()) {
8485 if (Identifier == "got")
8486 Spec = AArch64::S_MACHO_GOT;
8487 } else {
8488 // Unofficial, experimental syntax that will be changed.
8489 if (Identifier == "gotpcrel")
8490 Spec = AArch64::S_GOTPCREL;
8491 else if (Identifier == "plt")
8492 Spec = AArch64::S_PLT;
8493 else if (Identifier == "funcinit")
8494 Spec = AArch64::S_FUNCINIT;
8495 }
8496 if (Spec == AArch64::S_None)
8497 return Error(L: Loc, Msg: "invalid relocation specifier");
8498 if (auto *SRE = dyn_cast<MCSymbolRefExpr>(Val: Res))
8499 Res = MCSymbolRefExpr::create(Symbol: &SRE->getSymbol(), specifier: Spec, Ctx&: getContext(),
8500 Loc: SRE->getLoc());
8501 else
8502 return Error(L: Loc, Msg: "@ specifier only allowed after a symbol");
8503
8504 for (;;) {
8505 std::optional<MCBinaryExpr::Opcode> Opcode;
8506 if (parseOptionalToken(T: AsmToken::Plus))
8507 Opcode = MCBinaryExpr::Add;
8508 else if (parseOptionalToken(T: AsmToken::Minus))
8509 Opcode = MCBinaryExpr::Sub;
8510 else
8511 break;
8512 const MCExpr *Term;
8513 if (getParser().parsePrimaryExpr(Res&: Term, EndLoc, TypeInfo: nullptr))
8514 return true;
8515 Res = MCBinaryExpr::create(Op: *Opcode, LHS: Res, RHS: Term, Ctx&: getContext(), Loc: Res->getLoc());
8516 }
8517 return false;
8518}
8519
8520/// parseAuthExpr
8521/// ::= _sym@AUTH(ib,123[,addr])
8522/// ::= (_sym + 5)@AUTH(ib,123[,addr])
8523/// ::= (_sym - 5)@AUTH(ib,123[,addr])
8524bool AArch64AsmParser::parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc) {
8525 MCAsmParser &Parser = getParser();
8526 MCContext &Ctx = getContext();
8527 AsmToken Tok = Parser.getTok();
8528
8529 // At this point, we encountered "<id>@AUTH". There is no fallback anymore.
8530 if (parseToken(T: AsmToken::LParen, Msg: "expected '('"))
8531 return true;
8532
8533 if (Parser.getTok().isNot(K: AsmToken::Identifier))
8534 return TokError(Msg: "expected key name");
8535
8536 StringRef KeyStr = Parser.getTok().getIdentifier();
8537 auto KeyIDOrNone = AArch64StringToPACKeyID(Name: KeyStr);
8538 if (!KeyIDOrNone)
8539 return TokError(Msg: "invalid key '" + KeyStr + "'");
8540 Parser.Lex();
8541
8542 if (parseToken(T: AsmToken::Comma, Msg: "expected ','"))
8543 return true;
8544
8545 if (Parser.getTok().isNot(K: AsmToken::Integer))
8546 return TokError(Msg: "expected integer discriminator");
8547 int64_t Discriminator = Parser.getTok().getIntVal();
8548
8549 if (!isUInt<16>(x: Discriminator))
8550 return TokError(Msg: "integer discriminator " + Twine(Discriminator) +
8551 " out of range [0, 0xFFFF]");
8552 Parser.Lex();
8553
8554 bool UseAddressDiversity = false;
8555 if (Parser.getTok().is(K: AsmToken::Comma)) {
8556 Parser.Lex();
8557 if (Parser.getTok().isNot(K: AsmToken::Identifier) ||
8558 Parser.getTok().getIdentifier() != "addr")
8559 return TokError(Msg: "expected 'addr'");
8560 UseAddressDiversity = true;
8561 Parser.Lex();
8562 }
8563
8564 EndLoc = Parser.getTok().getEndLoc();
8565 if (parseToken(T: AsmToken::RParen, Msg: "expected ')'"))
8566 return true;
8567
8568 Res = AArch64AuthMCExpr::create(Expr: Res, Discriminator, Key: *KeyIDOrNone,
8569 HasAddressDiversity: UseAddressDiversity, Ctx, Loc: Res->getLoc());
8570 return false;
8571}
8572
8573bool AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
8574 AArch64::Specifier &ELFSpec,
8575 AArch64::Specifier &DarwinSpec,
8576 int64_t &Addend) {
8577 ELFSpec = AArch64::S_INVALID;
8578 DarwinSpec = AArch64::S_None;
8579 Addend = 0;
8580
8581 if (auto *AE = dyn_cast<MCSpecifierExpr>(Val: Expr)) {
8582 ELFSpec = AE->getSpecifier();
8583 Expr = AE->getSubExpr();
8584 }
8585
8586 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Val: Expr);
8587 if (SE) {
8588 // It's a simple symbol reference with no addend.
8589 DarwinSpec = AArch64::Specifier(SE->getKind());
8590 return true;
8591 }
8592
8593 // Check that it looks like a symbol + an addend
8594 MCValue Res;
8595 bool Relocatable = Expr->evaluateAsRelocatable(Res, Asm: nullptr);
8596 if (!Relocatable || Res.getSubSym())
8597 return false;
8598
8599 // Treat expressions with an ELFSpec (like ":abs_g1:3", or
8600 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
8601 if (!Res.getAddSym() && ELFSpec == AArch64::S_INVALID)
8602 return false;
8603
8604 if (Res.getAddSym())
8605 DarwinSpec = AArch64::Specifier(Res.getSpecifier());
8606 Addend = Res.getConstant();
8607
8608 // It's some symbol reference + a constant addend, but really
8609 // shouldn't use both Darwin and ELF syntax.
8610 return ELFSpec == AArch64::S_INVALID || DarwinSpec == AArch64::S_None;
8611}
8612
8613/// Force static initialization.
8614extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void
8615LLVMInitializeAArch64AsmParser() {
8616 RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
8617 RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
8618 RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
8619 RegisterMCAsmParser<AArch64AsmParser> W(getTheARM64_32Target());
8620 RegisterMCAsmParser<AArch64AsmParser> V(getTheAArch64_32Target());
8621}
8622
8623#define GET_REGISTER_MATCHER
8624#define GET_SUBTARGET_FEATURE_NAME
8625#define GET_MATCHER_IMPLEMENTATION
8626#define GET_MNEMONIC_SPELL_CHECKER
8627#include "AArch64GenAsmMatcher.inc"
8628
8629// Define this matcher function after the auto-generated include so we
8630// have the match class enum definitions.
8631unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
8632 unsigned Kind) {
8633 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
8634
8635 auto MatchesOpImmediate = [&](int64_t ExpectedVal) -> MatchResultTy {
8636 if (!Op.isImm())
8637 return Match_InvalidOperand;
8638 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Op.getImm());
8639 if (!CE)
8640 return Match_InvalidOperand;
8641 if (CE->getValue() == ExpectedVal)
8642 return Match_Success;
8643 return Match_InvalidOperand;
8644 };
8645
8646 switch (Kind) {
8647 default:
8648 return Match_InvalidOperand;
8649 case MCK_MPR:
8650 // If the Kind is a token for the MPR register class which has the "za"
8651 // register (SME accumulator array), check if the asm is a literal "za"
8652 // token. This is for the "smstart za" alias that defines the register
8653 // as a literal token.
8654 if (Op.isTokenEqual(Str: "za"))
8655 return Match_Success;
8656 return Match_InvalidOperand;
8657
8658 // If the kind is a token for a literal immediate, check if our asm operand
8659 // matches. This is for InstAliases which have a fixed-value immediate in
8660 // the asm string, such as hints which are parsed into a specific
8661 // instruction definition.
8662#define MATCH_HASH(N) \
8663 case MCK__HASH_##N: \
8664 return MatchesOpImmediate(N);
8665 MATCH_HASH(0)
8666 MATCH_HASH(1)
8667 MATCH_HASH(2)
8668 MATCH_HASH(3)
8669 MATCH_HASH(4)
8670 MATCH_HASH(6)
8671 MATCH_HASH(7)
8672 MATCH_HASH(8)
8673 MATCH_HASH(10)
8674 MATCH_HASH(12)
8675 MATCH_HASH(14)
8676 MATCH_HASH(16)
8677 MATCH_HASH(24)
8678 MATCH_HASH(25)
8679 MATCH_HASH(26)
8680 MATCH_HASH(27)
8681 MATCH_HASH(28)
8682 MATCH_HASH(29)
8683 MATCH_HASH(30)
8684 MATCH_HASH(31)
8685 MATCH_HASH(32)
8686 MATCH_HASH(40)
8687 MATCH_HASH(48)
8688 MATCH_HASH(64)
8689#undef MATCH_HASH
8690#define MATCH_HASH_MINUS(N) \
8691 case MCK__HASH__MINUS_##N: \
8692 return MatchesOpImmediate(-N);
8693 MATCH_HASH_MINUS(4)
8694 MATCH_HASH_MINUS(8)
8695 MATCH_HASH_MINUS(16)
8696#undef MATCH_HASH_MINUS
8697 }
8698}
8699
8700ParseStatus AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
8701
8702 SMLoc S = getLoc();
8703
8704 if (getTok().isNot(K: AsmToken::Identifier))
8705 return Error(L: S, Msg: "expected register");
8706
8707 MCRegister FirstReg;
8708 ParseStatus Res = tryParseScalarRegister(RegNum&: FirstReg);
8709 if (!Res.isSuccess())
8710 return Error(L: S, Msg: "expected first even register of a consecutive same-size "
8711 "even/odd register pair");
8712
8713 const MCRegisterClass &WRegClass =
8714 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
8715 const MCRegisterClass &XRegClass =
8716 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
8717
8718 bool isXReg = XRegClass.contains(Reg: FirstReg),
8719 isWReg = WRegClass.contains(Reg: FirstReg);
8720 if (!isXReg && !isWReg)
8721 return Error(L: S, Msg: "expected first even register of a consecutive same-size "
8722 "even/odd register pair");
8723
8724 const MCRegisterInfo *RI = getContext().getRegisterInfo();
8725 unsigned FirstEncoding = RI->getEncodingValue(Reg: FirstReg);
8726
8727 if (FirstEncoding & 0x1)
8728 return Error(L: S, Msg: "expected first even register of a consecutive same-size "
8729 "even/odd register pair");
8730
8731 if (getTok().isNot(K: AsmToken::Comma))
8732 return Error(L: getLoc(), Msg: "expected comma");
8733 // Eat the comma
8734 Lex();
8735
8736 SMLoc E = getLoc();
8737 MCRegister SecondReg;
8738 Res = tryParseScalarRegister(RegNum&: SecondReg);
8739 if (!Res.isSuccess())
8740 return Error(L: E, Msg: "expected second odd register of a consecutive same-size "
8741 "even/odd register pair");
8742
8743 if (RI->getEncodingValue(Reg: SecondReg) != FirstEncoding + 1 ||
8744 (isXReg && !XRegClass.contains(Reg: SecondReg)) ||
8745 (isWReg && !WRegClass.contains(Reg: SecondReg)))
8746 return Error(L: E, Msg: "expected second odd register of a consecutive same-size "
8747 "even/odd register pair");
8748
8749 MCRegister Pair;
8750 if (isXReg) {
8751 Pair = RI->getMatchingSuperReg(Reg: FirstReg, SubIdx: AArch64::sube64,
8752 RC: &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
8753 } else {
8754 Pair = RI->getMatchingSuperReg(Reg: FirstReg, SubIdx: AArch64::sube32,
8755 RC: &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
8756 }
8757
8758 Operands.push_back(Elt: AArch64Operand::CreateReg(Reg: Pair, Kind: RegKind::Scalar, S,
8759 E: getLoc(), Ctx&: getContext()));
8760
8761 return ParseStatus::Success;
8762}
8763
8764template <bool ParseShiftExtend, bool ParseSuffix>
8765ParseStatus AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
8766 const SMLoc S = getLoc();
8767 // Check for a SVE vector register specifier first.
8768 MCRegister RegNum;
8769 StringRef Kind;
8770
8771 ParseStatus Res =
8772 tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::SVEDataVector);
8773
8774 if (!Res.isSuccess())
8775 return Res;
8776
8777 if (ParseSuffix && Kind.empty())
8778 return ParseStatus::NoMatch;
8779
8780 const auto &KindRes = parseVectorKind(Suffix: Kind, VectorKind: RegKind::SVEDataVector);
8781 if (!KindRes)
8782 return ParseStatus::NoMatch;
8783
8784 unsigned ElementWidth = KindRes->second;
8785
8786 // No shift/extend is the default.
8787 if (!ParseShiftExtend || getTok().isNot(K: AsmToken::Comma)) {
8788 Operands.push_back(Elt: AArch64Operand::CreateVectorReg(
8789 Reg: RegNum, Kind: RegKind::SVEDataVector, ElementWidth, S, E: S, Ctx&: getContext()));
8790
8791 ParseStatus Res = tryParseVectorIndex(Operands);
8792 if (Res.isFailure())
8793 return ParseStatus::Failure;
8794 return ParseStatus::Success;
8795 }
8796
8797 // Eat the comma
8798 Lex();
8799
8800 // Match the shift
8801 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
8802 Res = tryParseOptionalShiftExtend(Operands&: ExtOpnd);
8803 if (!Res.isSuccess())
8804 return Res;
8805
8806 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
8807 Operands.push_back(Elt: AArch64Operand::CreateVectorReg(
8808 Reg: RegNum, Kind: RegKind::SVEDataVector, ElementWidth, S, E: Ext->getEndLoc(),
8809 Ctx&: getContext(), ExtTy: Ext->getShiftExtendType(), ShiftAmount: Ext->getShiftExtendAmount(),
8810 HasExplicitAmount: Ext->hasShiftExtendAmount()));
8811
8812 return ParseStatus::Success;
8813}
8814
8815ParseStatus AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
8816 MCAsmParser &Parser = getParser();
8817
8818 SMLoc SS = getLoc();
8819 const AsmToken &TokE = getTok();
8820 bool IsHash = TokE.is(K: AsmToken::Hash);
8821
8822 if (!IsHash && TokE.isNot(K: AsmToken::Identifier))
8823 return ParseStatus::NoMatch;
8824
8825 int64_t Pattern;
8826 if (IsHash) {
8827 Lex(); // Eat hash
8828
8829 // Parse the immediate operand.
8830 const MCExpr *ImmVal;
8831 SS = getLoc();
8832 if (Parser.parseExpression(Res&: ImmVal))
8833 return ParseStatus::Failure;
8834
8835 auto *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
8836 if (!MCE)
8837 return TokError(Msg: "invalid operand for instruction");
8838
8839 Pattern = MCE->getValue();
8840 } else {
8841 // Parse the pattern
8842 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(Name: TokE.getString());
8843 if (!Pat)
8844 return ParseStatus::NoMatch;
8845
8846 Lex();
8847 Pattern = Pat->Encoding;
8848 assert(Pattern >= 0 && Pattern < 32);
8849 }
8850
8851 Operands.push_back(
8852 Elt: AArch64Operand::CreateImm(Val: MCConstantExpr::create(Value: Pattern, Ctx&: getContext()),
8853 S: SS, E: getLoc(), Ctx&: getContext()));
8854
8855 return ParseStatus::Success;
8856}
8857
8858ParseStatus
8859AArch64AsmParser::tryParseSVEVecLenSpecifier(OperandVector &Operands) {
8860 int64_t Pattern;
8861 SMLoc SS = getLoc();
8862 const AsmToken &TokE = getTok();
8863 // Parse the pattern
8864 auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName(
8865 Name: TokE.getString());
8866 if (!Pat)
8867 return ParseStatus::NoMatch;
8868
8869 Lex();
8870 Pattern = Pat->Encoding;
8871 assert(Pattern >= 0 && Pattern <= 1 && "Pattern does not exist");
8872
8873 Operands.push_back(
8874 Elt: AArch64Operand::CreateImm(Val: MCConstantExpr::create(Value: Pattern, Ctx&: getContext()),
8875 S: SS, E: getLoc(), Ctx&: getContext()));
8876
8877 return ParseStatus::Success;
8878}
8879
8880ParseStatus AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
8881 SMLoc SS = getLoc();
8882
8883 MCRegister XReg;
8884 if (!tryParseScalarRegister(RegNum&: XReg).isSuccess())
8885 return ParseStatus::NoMatch;
8886
8887 MCContext &ctx = getContext();
8888 const MCRegisterInfo *RI = ctx.getRegisterInfo();
8889 MCRegister X8Reg = RI->getMatchingSuperReg(
8890 Reg: XReg, SubIdx: AArch64::x8sub_0,
8891 RC: &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
8892 if (!X8Reg)
8893 return Error(L: SS,
8894 Msg: "expected an even-numbered x-register in the range [x0,x22]");
8895
8896 Operands.push_back(
8897 Elt: AArch64Operand::CreateReg(Reg: X8Reg, Kind: RegKind::Scalar, S: SS, E: getLoc(), Ctx&: ctx));
8898 return ParseStatus::Success;
8899}
8900
8901ParseStatus AArch64AsmParser::tryParseImmRange(OperandVector &Operands) {
8902 SMLoc S = getLoc();
8903
8904 if (getTok().isNot(K: AsmToken::Integer))
8905 return ParseStatus::NoMatch;
8906
8907 if (getLexer().peekTok().isNot(K: AsmToken::Colon))
8908 return ParseStatus::NoMatch;
8909
8910 const MCExpr *ImmF;
8911 if (getParser().parseExpression(Res&: ImmF))
8912 return ParseStatus::NoMatch;
8913
8914 if (getTok().isNot(K: AsmToken::Colon))
8915 return ParseStatus::NoMatch;
8916
8917 Lex(); // Eat ':'
8918 if (getTok().isNot(K: AsmToken::Integer))
8919 return ParseStatus::NoMatch;
8920
8921 SMLoc E = getTok().getLoc();
8922 const MCExpr *ImmL;
8923 if (getParser().parseExpression(Res&: ImmL))
8924 return ParseStatus::NoMatch;
8925
8926 unsigned ImmFVal = cast<MCConstantExpr>(Val: ImmF)->getValue();
8927 unsigned ImmLVal = cast<MCConstantExpr>(Val: ImmL)->getValue();
8928
8929 Operands.push_back(
8930 Elt: AArch64Operand::CreateImmRange(First: ImmFVal, Last: ImmLVal, S, E, Ctx&: getContext()));
8931 return ParseStatus::Success;
8932}
8933
8934template <int Adj>
8935ParseStatus AArch64AsmParser::tryParseAdjImm0_63(OperandVector &Operands) {
8936 SMLoc S = getLoc();
8937
8938 parseOptionalToken(T: AsmToken::Hash);
8939 bool IsNegative = parseOptionalToken(T: AsmToken::Minus);
8940
8941 if (getTok().isNot(K: AsmToken::Integer))
8942 return ParseStatus::NoMatch;
8943
8944 const MCExpr *Ex;
8945 if (getParser().parseExpression(Res&: Ex))
8946 return ParseStatus::NoMatch;
8947
8948 int64_t Imm = dyn_cast<MCConstantExpr>(Val: Ex)->getValue();
8949 if (IsNegative)
8950 Imm = -Imm;
8951
8952 // We want an adjusted immediate in the range [0, 63]. If we don't have one,
8953 // return a value, which is certain to trigger a error message about invalid
8954 // immediate range instead of a non-descriptive invalid operand error.
8955 static_assert(Adj == 1 || Adj == -1, "Unsafe immediate adjustment");
8956 if (Imm == INT64_MIN || Imm == INT64_MAX || Imm + Adj < 0 || Imm + Adj > 63)
8957 Imm = -2;
8958 else
8959 Imm += Adj;
8960
8961 SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
8962 Operands.push_back(Elt: AArch64Operand::CreateImm(
8963 Val: MCConstantExpr::create(Value: Imm, Ctx&: getContext()), S, E, Ctx&: getContext()));
8964
8965 return ParseStatus::Success;
8966}
8967