1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
10#include "MCTargetDesc/AArch64AddressingModes.h"
11#include "MCTargetDesc/AArch64InstPrinter.h"
12#include "MCTargetDesc/AArch64MCAsmInfo.h"
13#include "MCTargetDesc/AArch64MCTargetDesc.h"
14#include "MCTargetDesc/AArch64TargetStreamer.h"
15#include "TargetInfo/AArch64TargetInfo.h"
16#include "Utils/AArch64BaseInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringExtras.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
26#include "llvm/ADT/StringSwitch.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCAsmInfo.h"
29#include "llvm/MC/MCContext.h"
30#include "llvm/MC/MCExpr.h"
31#include "llvm/MC/MCInst.h"
32#include "llvm/MC/MCLinkerOptimizationHint.h"
33#include "llvm/MC/MCObjectFileInfo.h"
34#include "llvm/MC/MCParser/AsmLexer.h"
35#include "llvm/MC/MCParser/MCAsmParser.h"
36#include "llvm/MC/MCParser/MCAsmParserExtension.h"
37#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
38#include "llvm/MC/MCParser/MCTargetAsmParser.h"
39#include "llvm/MC/MCRegisterInfo.h"
40#include "llvm/MC/MCStreamer.h"
41#include "llvm/MC/MCSubtargetInfo.h"
42#include "llvm/MC/MCSymbol.h"
43#include "llvm/MC/MCTargetOptions.h"
44#include "llvm/MC/MCValue.h"
45#include "llvm/MC/TargetRegistry.h"
46#include "llvm/Support/AArch64BuildAttributes.h"
47#include "llvm/Support/Compiler.h"
48#include "llvm/Support/ErrorHandling.h"
49#include "llvm/Support/MathExtras.h"
50#include "llvm/Support/SMLoc.h"
51#include "llvm/Support/raw_ostream.h"
52#include "llvm/TargetParser/AArch64TargetParser.h"
53#include "llvm/TargetParser/SubtargetFeature.h"
54#include <cassert>
55#include <cctype>
56#include <cstdint>
57#include <cstdio>
58#include <optional>
59#include <string>
60#include <tuple>
61#include <utility>
62#include <vector>
63
64using namespace llvm;
65
66namespace {
67
68enum class RegKind {
69 Scalar,
70 NeonVector,
71 SVEDataVector,
72 SVEPredicateAsCounter,
73 SVEPredicateVector,
74 Matrix,
75 LookupTable
76};
77
78enum class MatrixKind { Array, Tile, Row, Col };
79
80enum RegConstraintEqualityTy {
81 EqualsReg,
82 EqualsSuperReg,
83 EqualsSubReg
84};
85
86class AArch64AsmParser : public MCTargetAsmParser {
87private:
88 StringRef Mnemonic; ///< Instruction mnemonic.
89
90 // Map of register aliases registers via the .req directive.
91 StringMap<std::pair<RegKind, MCRegister>> RegisterReqs;
92
93 class PrefixInfo {
94 public:
95 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
96 PrefixInfo Prefix;
97 switch (Inst.getOpcode()) {
98 case AArch64::MOVPRFX_ZZ:
99 Prefix.Active = true;
100 Prefix.Dst = Inst.getOperand(i: 0).getReg();
101 break;
102 case AArch64::MOVPRFX_ZPmZ_B:
103 case AArch64::MOVPRFX_ZPmZ_H:
104 case AArch64::MOVPRFX_ZPmZ_S:
105 case AArch64::MOVPRFX_ZPmZ_D:
106 Prefix.Active = true;
107 Prefix.Predicated = true;
108 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
109 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
110 "No destructive element size set for movprfx");
111 Prefix.Dst = Inst.getOperand(i: 0).getReg();
112 Prefix.Pg = Inst.getOperand(i: 2).getReg();
113 break;
114 case AArch64::MOVPRFX_ZPzZ_B:
115 case AArch64::MOVPRFX_ZPzZ_H:
116 case AArch64::MOVPRFX_ZPzZ_S:
117 case AArch64::MOVPRFX_ZPzZ_D:
118 Prefix.Active = true;
119 Prefix.Predicated = true;
120 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
121 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
122 "No destructive element size set for movprfx");
123 Prefix.Dst = Inst.getOperand(i: 0).getReg();
124 Prefix.Pg = Inst.getOperand(i: 1).getReg();
125 break;
126 default:
127 break;
128 }
129
130 return Prefix;
131 }
132
133 PrefixInfo() = default;
134 bool isActive() const { return Active; }
135 bool isPredicated() const { return Predicated; }
136 unsigned getElementSize() const {
137 assert(Predicated);
138 return ElementSize;
139 }
140 MCRegister getDstReg() const { return Dst; }
141 MCRegister getPgReg() const {
142 assert(Predicated);
143 return Pg;
144 }
145
146 private:
147 bool Active = false;
148 bool Predicated = false;
149 unsigned ElementSize;
150 MCRegister Dst;
151 MCRegister Pg;
152 } NextPrefix;
153
154 AArch64TargetStreamer &getTargetStreamer() {
155 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
156 return static_cast<AArch64TargetStreamer &>(TS);
157 }
158
159 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
160
161 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 bool parseSyslAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
163 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
164 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
165 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
166 std::string &Suggestion);
167 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
168 MCRegister matchRegisterNameAlias(StringRef Name, RegKind Kind);
169 bool parseRegister(OperandVector &Operands);
170 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
171 bool parseNeonVectorList(OperandVector &Operands);
172 bool parseOptionalMulOperand(OperandVector &Operands);
173 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
174 bool parseKeywordOperand(OperandVector &Operands);
175 bool parseOperand(OperandVector &Operands, bool isCondCode,
176 bool invertCondCode);
177 bool parseImmExpr(int64_t &Out);
178 bool parseComma();
179 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
180 unsigned Last);
181
182 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
183 OperandVector &Operands);
184
185 bool parseDataExpr(const MCExpr *&Res) override;
186 bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc);
187
188 bool parseDirectiveArch(SMLoc L);
189 bool parseDirectiveArchExtension(SMLoc L);
190 bool parseDirectiveCPU(SMLoc L);
191 bool parseDirectiveInst(SMLoc L);
192
193 bool parseDirectiveTLSDescCall(SMLoc L);
194
195 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
196 bool parseDirectiveLtorg(SMLoc L);
197
198 bool parseDirectiveReq(StringRef Name, SMLoc L);
199 bool parseDirectiveUnreq(SMLoc L);
200 bool parseDirectiveCFINegateRAState();
201 bool parseDirectiveCFINegateRAStateWithPC();
202 bool parseDirectiveCFIBKeyFrame();
203 bool parseDirectiveCFIMTETaggedFrame();
204
205 bool parseDirectiveVariantPCS(SMLoc L);
206
207 bool parseDirectiveSEHAllocStack(SMLoc L);
208 bool parseDirectiveSEHPrologEnd(SMLoc L);
209 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
210 bool parseDirectiveSEHSaveFPLR(SMLoc L);
211 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
212 bool parseDirectiveSEHSaveReg(SMLoc L);
213 bool parseDirectiveSEHSaveRegX(SMLoc L);
214 bool parseDirectiveSEHSaveRegP(SMLoc L);
215 bool parseDirectiveSEHSaveRegPX(SMLoc L);
216 bool parseDirectiveSEHSaveLRPair(SMLoc L);
217 bool parseDirectiveSEHSaveFReg(SMLoc L);
218 bool parseDirectiveSEHSaveFRegX(SMLoc L);
219 bool parseDirectiveSEHSaveFRegP(SMLoc L);
220 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
221 bool parseDirectiveSEHSetFP(SMLoc L);
222 bool parseDirectiveSEHAddFP(SMLoc L);
223 bool parseDirectiveSEHNop(SMLoc L);
224 bool parseDirectiveSEHSaveNext(SMLoc L);
225 bool parseDirectiveSEHEpilogStart(SMLoc L);
226 bool parseDirectiveSEHEpilogEnd(SMLoc L);
227 bool parseDirectiveSEHTrapFrame(SMLoc L);
228 bool parseDirectiveSEHMachineFrame(SMLoc L);
229 bool parseDirectiveSEHContext(SMLoc L);
230 bool parseDirectiveSEHECContext(SMLoc L);
231 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
232 bool parseDirectiveSEHPACSignLR(SMLoc L);
233 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
234 bool parseDirectiveSEHAllocZ(SMLoc L);
235 bool parseDirectiveSEHSaveZReg(SMLoc L);
236 bool parseDirectiveSEHSavePReg(SMLoc L);
237 bool parseDirectiveAeabiSubSectionHeader(SMLoc L);
238 bool parseDirectiveAeabiAArch64Attr(SMLoc L);
239
240 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
241 SmallVectorImpl<SMLoc> &Loc);
242 unsigned getNumRegsForRegKind(RegKind K);
243 bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
244 OperandVector &Operands, MCStreamer &Out,
245 uint64_t &ErrorInfo,
246 bool MatchingInlineAsm) override;
247 /// @name Auto-generated Match Functions
248 /// {
249
250#define GET_ASSEMBLER_HEADER
251#include "AArch64GenAsmMatcher.inc"
252
253 /// }
254
255 ParseStatus tryParseScalarRegister(MCRegister &Reg);
256 ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
257 RegKind MatchKind);
258 ParseStatus tryParseMatrixRegister(OperandVector &Operands);
259 ParseStatus tryParseSVCR(OperandVector &Operands);
260 ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
261 ParseStatus tryParseBarrierOperand(OperandVector &Operands);
262 ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
263 ParseStatus tryParseSysReg(OperandVector &Operands);
264 ParseStatus tryParseSysCROperand(OperandVector &Operands);
265 template <bool IsSVEPrefetch = false>
266 ParseStatus tryParsePrefetch(OperandVector &Operands);
267 ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
268 ParseStatus tryParsePSBHint(OperandVector &Operands);
269 ParseStatus tryParseBTIHint(OperandVector &Operands);
270 ParseStatus tryParseCMHPriorityHint(OperandVector &Operands);
271 ParseStatus tryParseTIndexHint(OperandVector &Operands);
272 ParseStatus tryParseAdrpLabel(OperandVector &Operands);
273 ParseStatus tryParseAdrLabel(OperandVector &Operands);
274 template <bool AddFPZeroAsLiteral>
275 ParseStatus tryParseFPImm(OperandVector &Operands);
276 ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
277 ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
278 bool tryParseNeonVectorRegister(OperandVector &Operands);
279 ParseStatus tryParseVectorIndex(OperandVector &Operands);
280 ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
281 ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
282 template <bool ParseShiftExtend,
283 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
284 ParseStatus tryParseGPROperand(OperandVector &Operands);
285 ParseStatus tryParseZTOperand(OperandVector &Operands);
286 template <bool ParseShiftExtend, bool ParseSuffix>
287 ParseStatus tryParseSVEDataVector(OperandVector &Operands);
288 template <RegKind RK>
289 ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
290 ParseStatus
291 tryParseSVEPredicateOrPredicateAsCounterVector(OperandVector &Operands);
292 template <RegKind VectorKind>
293 ParseStatus tryParseVectorList(OperandVector &Operands,
294 bool ExpectMatch = false);
295 ParseStatus tryParseMatrixTileList(OperandVector &Operands);
296 ParseStatus tryParseSVEPattern(OperandVector &Operands);
297 ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
298 ParseStatus tryParseGPR64x8(OperandVector &Operands);
299 ParseStatus tryParseImmRange(OperandVector &Operands);
300 template <int> ParseStatus tryParseAdjImm0_63(OperandVector &Operands);
301 ParseStatus tryParsePHintInstOperand(OperandVector &Operands);
302
303public:
304 enum AArch64MatchResultTy {
305 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
306#define GET_OPERAND_DIAGNOSTIC_TYPES
307#include "AArch64GenAsmMatcher.inc"
308 };
309 bool IsILP32;
310 bool IsWindowsArm64EC;
311
312 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
313 const MCInstrInfo &MII, const MCTargetOptions &Options)
314 : MCTargetAsmParser(Options, STI, MII) {
315 IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
316 IsWindowsArm64EC = STI.getTargetTriple().isWindowsArm64EC();
317 MCAsmParserExtension::Initialize(Parser);
318 MCStreamer &S = getParser().getStreamer();
319 if (S.getTargetStreamer() == nullptr)
320 new AArch64TargetStreamer(S);
321
322 // Alias .hword/.word/.[dx]word to the target-independent
323 // .2byte/.4byte/.8byte directives as they have the same form and
324 // semantics:
325 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
326 Parser.addAliasForDirective(Directive: ".hword", Alias: ".2byte");
327 Parser.addAliasForDirective(Directive: ".word", Alias: ".4byte");
328 Parser.addAliasForDirective(Directive: ".dword", Alias: ".8byte");
329 Parser.addAliasForDirective(Directive: ".xword", Alias: ".8byte");
330
331 // Initialize the set of available features.
332 setAvailableFeatures(ComputeAvailableFeatures(FB: getSTI().getFeatureBits()));
333 }
334
335 bool areEqualRegs(const MCParsedAsmOperand &Op1,
336 const MCParsedAsmOperand &Op2) const override;
337 bool parseInstruction(ParseInstructionInfo &Info, StringRef Name,
338 SMLoc NameLoc, OperandVector &Operands) override;
339 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
340 ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
341 SMLoc &EndLoc) override;
342 bool ParseDirective(AsmToken DirectiveID) override;
343 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
344 unsigned Kind) override;
345
346 static bool classifySymbolRef(const MCExpr *Expr, AArch64::Specifier &ELFSpec,
347 AArch64::Specifier &DarwinSpec,
348 int64_t &Addend);
349};
350
351/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
352/// instruction.
353class AArch64Operand : public MCParsedAsmOperand {
354private:
355 enum KindTy {
356 k_Immediate,
357 k_ShiftedImm,
358 k_ImmRange,
359 k_CondCode,
360 k_Register,
361 k_MatrixRegister,
362 k_MatrixTileList,
363 k_SVCR,
364 k_VectorList,
365 k_VectorIndex,
366 k_Token,
367 k_SysReg,
368 k_SysCR,
369 k_Prefetch,
370 k_ShiftExtend,
371 k_FPImm,
372 k_Barrier,
373 k_PSBHint,
374 k_PHint,
375 k_BTIHint,
376 k_CMHPriorityHint,
377 k_TIndexHint,
378 } Kind;
379
380 SMLoc StartLoc, EndLoc;
381
382 struct TokOp {
383 const char *Data;
384 unsigned Length;
385 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
386 };
387
388 // Separate shift/extend operand.
389 struct ShiftExtendOp {
390 AArch64_AM::ShiftExtendType Type;
391 unsigned Amount;
392 bool HasExplicitAmount;
393 };
394
395 struct RegOp {
396 MCRegister Reg;
397 RegKind Kind;
398 int ElementWidth;
399
400 // The register may be allowed as a different register class,
401 // e.g. for GPR64as32 or GPR32as64.
402 RegConstraintEqualityTy EqualityTy;
403
404 // In some cases the shift/extend needs to be explicitly parsed together
405 // with the register, rather than as a separate operand. This is needed
406 // for addressing modes where the instruction as a whole dictates the
407 // scaling/extend, rather than specific bits in the instruction.
408 // By parsing them as a single operand, we avoid the need to pass an
409 // extra operand in all CodeGen patterns (because all operands need to
410 // have an associated value), and we avoid the need to update TableGen to
411 // accept operands that have no associated bits in the instruction.
412 //
413 // An added benefit of parsing them together is that the assembler
414 // can give a sensible diagnostic if the scaling is not correct.
415 //
416 // The default is 'lsl #0' (HasExplicitAmount = false) if no
417 // ShiftExtend is specified.
418 ShiftExtendOp ShiftExtend;
419 };
420
421 struct MatrixRegOp {
422 MCRegister Reg;
423 unsigned ElementWidth;
424 MatrixKind Kind;
425 };
426
427 struct MatrixTileListOp {
428 unsigned RegMask = 0;
429 };
430
431 struct VectorListOp {
432 MCRegister Reg;
433 unsigned Count;
434 unsigned Stride;
435 unsigned NumElements;
436 unsigned ElementWidth;
437 RegKind RegisterKind;
438 };
439
440 struct VectorIndexOp {
441 int Val;
442 };
443
444 struct ImmOp {
445 const MCExpr *Val;
446 };
447
448 struct ShiftedImmOp {
449 const MCExpr *Val;
450 unsigned ShiftAmount;
451 };
452
453 struct ImmRangeOp {
454 unsigned First;
455 unsigned Last;
456 };
457
458 struct CondCodeOp {
459 AArch64CC::CondCode Code;
460 };
461
462 struct FPImmOp {
463 uint64_t Val; // APFloat value bitcasted to uint64_t.
464 bool IsExact; // describes whether parsed value was exact.
465 };
466
467 struct BarrierOp {
468 const char *Data;
469 unsigned Length;
470 unsigned Val; // Not the enum since not all values have names.
471 bool HasnXSModifier;
472 };
473
474 struct SysRegOp {
475 const char *Data;
476 unsigned Length;
477 uint32_t MRSReg;
478 uint32_t MSRReg;
479 uint32_t PStateField;
480 };
481
482 struct SysCRImmOp {
483 unsigned Val;
484 };
485
486 struct PrefetchOp {
487 const char *Data;
488 unsigned Length;
489 unsigned Val;
490 };
491
492 struct PSBHintOp {
493 const char *Data;
494 unsigned Length;
495 unsigned Val;
496 };
497 struct PHintOp {
498 const char *Data;
499 unsigned Length;
500 unsigned Val;
501 };
502 struct BTIHintOp {
503 const char *Data;
504 unsigned Length;
505 unsigned Val;
506 };
507 struct CMHPriorityHintOp {
508 const char *Data;
509 unsigned Length;
510 unsigned Val;
511 };
512 struct TIndexHintOp {
513 const char *Data;
514 unsigned Length;
515 unsigned Val;
516 };
517
518 struct SVCROp {
519 const char *Data;
520 unsigned Length;
521 unsigned PStateField;
522 };
523
524 union {
525 struct TokOp Tok;
526 struct RegOp Reg;
527 struct MatrixRegOp MatrixReg;
528 struct MatrixTileListOp MatrixTileList;
529 struct VectorListOp VectorList;
530 struct VectorIndexOp VectorIndex;
531 struct ImmOp Imm;
532 struct ShiftedImmOp ShiftedImm;
533 struct ImmRangeOp ImmRange;
534 struct CondCodeOp CondCode;
535 struct FPImmOp FPImm;
536 struct BarrierOp Barrier;
537 struct SysRegOp SysReg;
538 struct SysCRImmOp SysCRImm;
539 struct PrefetchOp Prefetch;
540 struct PSBHintOp PSBHint;
541 struct PHintOp PHint;
542 struct BTIHintOp BTIHint;
543 struct CMHPriorityHintOp CMHPriorityHint;
544 struct TIndexHintOp TIndexHint;
545 struct ShiftExtendOp ShiftExtend;
546 struct SVCROp SVCR;
547 };
548
549 // Keep the MCContext around as the MCExprs may need manipulated during
550 // the add<>Operands() calls.
551 MCContext &Ctx;
552
553public:
554 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
555
556 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
557 Kind = o.Kind;
558 StartLoc = o.StartLoc;
559 EndLoc = o.EndLoc;
560 switch (Kind) {
561 case k_Token:
562 Tok = o.Tok;
563 break;
564 case k_Immediate:
565 Imm = o.Imm;
566 break;
567 case k_ShiftedImm:
568 ShiftedImm = o.ShiftedImm;
569 break;
570 case k_ImmRange:
571 ImmRange = o.ImmRange;
572 break;
573 case k_CondCode:
574 CondCode = o.CondCode;
575 break;
576 case k_FPImm:
577 FPImm = o.FPImm;
578 break;
579 case k_Barrier:
580 Barrier = o.Barrier;
581 break;
582 case k_Register:
583 Reg = o.Reg;
584 break;
585 case k_MatrixRegister:
586 MatrixReg = o.MatrixReg;
587 break;
588 case k_MatrixTileList:
589 MatrixTileList = o.MatrixTileList;
590 break;
591 case k_VectorList:
592 VectorList = o.VectorList;
593 break;
594 case k_VectorIndex:
595 VectorIndex = o.VectorIndex;
596 break;
597 case k_SysReg:
598 SysReg = o.SysReg;
599 break;
600 case k_SysCR:
601 SysCRImm = o.SysCRImm;
602 break;
603 case k_Prefetch:
604 Prefetch = o.Prefetch;
605 break;
606 case k_PSBHint:
607 PSBHint = o.PSBHint;
608 break;
609 case k_PHint:
610 PHint = o.PHint;
611 break;
612 case k_BTIHint:
613 BTIHint = o.BTIHint;
614 break;
615 case k_CMHPriorityHint:
616 CMHPriorityHint = o.CMHPriorityHint;
617 break;
618 case k_TIndexHint:
619 TIndexHint = o.TIndexHint;
620 break;
621 case k_ShiftExtend:
622 ShiftExtend = o.ShiftExtend;
623 break;
624 case k_SVCR:
625 SVCR = o.SVCR;
626 break;
627 }
628 }
629
630 /// getStartLoc - Get the location of the first token of this operand.
631 SMLoc getStartLoc() const override { return StartLoc; }
632 /// getEndLoc - Get the location of the last token of this operand.
633 SMLoc getEndLoc() const override { return EndLoc; }
634
635 StringRef getToken() const {
636 assert(Kind == k_Token && "Invalid access!");
637 return StringRef(Tok.Data, Tok.Length);
638 }
639
640 bool isTokenSuffix() const {
641 assert(Kind == k_Token && "Invalid access!");
642 return Tok.IsSuffix;
643 }
644
645 const MCExpr *getImm() const {
646 assert(Kind == k_Immediate && "Invalid access!");
647 return Imm.Val;
648 }
649
650 const MCExpr *getShiftedImmVal() const {
651 assert(Kind == k_ShiftedImm && "Invalid access!");
652 return ShiftedImm.Val;
653 }
654
655 unsigned getShiftedImmShift() const {
656 assert(Kind == k_ShiftedImm && "Invalid access!");
657 return ShiftedImm.ShiftAmount;
658 }
659
660 unsigned getFirstImmVal() const {
661 assert(Kind == k_ImmRange && "Invalid access!");
662 return ImmRange.First;
663 }
664
665 unsigned getLastImmVal() const {
666 assert(Kind == k_ImmRange && "Invalid access!");
667 return ImmRange.Last;
668 }
669
670 AArch64CC::CondCode getCondCode() const {
671 assert(Kind == k_CondCode && "Invalid access!");
672 return CondCode.Code;
673 }
674
675 APFloat getFPImm() const {
676 assert (Kind == k_FPImm && "Invalid access!");
677 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
678 }
679
680 bool getFPImmIsExact() const {
681 assert (Kind == k_FPImm && "Invalid access!");
682 return FPImm.IsExact;
683 }
684
685 unsigned getBarrier() const {
686 assert(Kind == k_Barrier && "Invalid access!");
687 return Barrier.Val;
688 }
689
690 StringRef getBarrierName() const {
691 assert(Kind == k_Barrier && "Invalid access!");
692 return StringRef(Barrier.Data, Barrier.Length);
693 }
694
695 bool getBarriernXSModifier() const {
696 assert(Kind == k_Barrier && "Invalid access!");
697 return Barrier.HasnXSModifier;
698 }
699
700 MCRegister getReg() const override {
701 assert(Kind == k_Register && "Invalid access!");
702 return Reg.Reg;
703 }
704
705 MCRegister getMatrixReg() const {
706 assert(Kind == k_MatrixRegister && "Invalid access!");
707 return MatrixReg.Reg;
708 }
709
710 unsigned getMatrixElementWidth() const {
711 assert(Kind == k_MatrixRegister && "Invalid access!");
712 return MatrixReg.ElementWidth;
713 }
714
715 MatrixKind getMatrixKind() const {
716 assert(Kind == k_MatrixRegister && "Invalid access!");
717 return MatrixReg.Kind;
718 }
719
720 unsigned getMatrixTileListRegMask() const {
721 assert(isMatrixTileList() && "Invalid access!");
722 return MatrixTileList.RegMask;
723 }
724
725 RegConstraintEqualityTy getRegEqualityTy() const {
726 assert(Kind == k_Register && "Invalid access!");
727 return Reg.EqualityTy;
728 }
729
730 MCRegister getVectorListStart() const {
731 assert(Kind == k_VectorList && "Invalid access!");
732 return VectorList.Reg;
733 }
734
735 unsigned getVectorListCount() const {
736 assert(Kind == k_VectorList && "Invalid access!");
737 return VectorList.Count;
738 }
739
740 unsigned getVectorListStride() const {
741 assert(Kind == k_VectorList && "Invalid access!");
742 return VectorList.Stride;
743 }
744
745 int getVectorIndex() const {
746 assert(Kind == k_VectorIndex && "Invalid access!");
747 return VectorIndex.Val;
748 }
749
750 StringRef getSysReg() const {
751 assert(Kind == k_SysReg && "Invalid access!");
752 return StringRef(SysReg.Data, SysReg.Length);
753 }
754
755 unsigned getSysCR() const {
756 assert(Kind == k_SysCR && "Invalid access!");
757 return SysCRImm.Val;
758 }
759
760 unsigned getPrefetch() const {
761 assert(Kind == k_Prefetch && "Invalid access!");
762 return Prefetch.Val;
763 }
764
765 unsigned getPSBHint() const {
766 assert(Kind == k_PSBHint && "Invalid access!");
767 return PSBHint.Val;
768 }
769
770 unsigned getPHint() const {
771 assert(Kind == k_PHint && "Invalid access!");
772 return PHint.Val;
773 }
774
775 StringRef getPSBHintName() const {
776 assert(Kind == k_PSBHint && "Invalid access!");
777 return StringRef(PSBHint.Data, PSBHint.Length);
778 }
779
780 StringRef getPHintName() const {
781 assert(Kind == k_PHint && "Invalid access!");
782 return StringRef(PHint.Data, PHint.Length);
783 }
784
785 unsigned getBTIHint() const {
786 assert(Kind == k_BTIHint && "Invalid access!");
787 return BTIHint.Val;
788 }
789
790 StringRef getBTIHintName() const {
791 assert(Kind == k_BTIHint && "Invalid access!");
792 return StringRef(BTIHint.Data, BTIHint.Length);
793 }
794
795 unsigned getCMHPriorityHint() const {
796 assert(Kind == k_CMHPriorityHint && "Invalid access!");
797 return CMHPriorityHint.Val;
798 }
799
800 StringRef getCMHPriorityHintName() const {
801 assert(Kind == k_CMHPriorityHint && "Invalid access!");
802 return StringRef(CMHPriorityHint.Data, CMHPriorityHint.Length);
803 }
804
805 unsigned getTIndexHint() const {
806 assert(Kind == k_TIndexHint && "Invalid access!");
807 return TIndexHint.Val;
808 }
809
810 StringRef getTIndexHintName() const {
811 assert(Kind == k_TIndexHint && "Invalid access!");
812 return StringRef(TIndexHint.Data, TIndexHint.Length);
813 }
814
815 StringRef getSVCR() const {
816 assert(Kind == k_SVCR && "Invalid access!");
817 return StringRef(SVCR.Data, SVCR.Length);
818 }
819
820 StringRef getPrefetchName() const {
821 assert(Kind == k_Prefetch && "Invalid access!");
822 return StringRef(Prefetch.Data, Prefetch.Length);
823 }
824
825 AArch64_AM::ShiftExtendType getShiftExtendType() const {
826 if (Kind == k_ShiftExtend)
827 return ShiftExtend.Type;
828 if (Kind == k_Register)
829 return Reg.ShiftExtend.Type;
830 llvm_unreachable("Invalid access!");
831 }
832
833 unsigned getShiftExtendAmount() const {
834 if (Kind == k_ShiftExtend)
835 return ShiftExtend.Amount;
836 if (Kind == k_Register)
837 return Reg.ShiftExtend.Amount;
838 llvm_unreachable("Invalid access!");
839 }
840
841 bool hasShiftExtendAmount() const {
842 if (Kind == k_ShiftExtend)
843 return ShiftExtend.HasExplicitAmount;
844 if (Kind == k_Register)
845 return Reg.ShiftExtend.HasExplicitAmount;
846 llvm_unreachable("Invalid access!");
847 }
848
849 bool isImm() const override { return Kind == k_Immediate; }
850 bool isMem() const override { return false; }
851
852 bool isUImm6() const {
853 if (!isImm())
854 return false;
855 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
856 if (!MCE)
857 return false;
858 int64_t Val = MCE->getValue();
859 return (Val >= 0 && Val < 64);
860 }
861
862 template <int Width> bool isSImm() const {
863 return bool(isSImmScaled<Width, 1>());
864 }
865
866 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
867 return isImmScaled<Bits, Scale>(true);
868 }
869
870 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
871 DiagnosticPredicate isUImmScaled() const {
872 if (IsRange && isImmRange() &&
873 (getLastImmVal() != getFirstImmVal() + Offset))
874 return DiagnosticPredicate::NoMatch;
875
876 return isImmScaled<Bits, Scale, IsRange>(false);
877 }
878
879 template <int Bits, int Scale, bool IsRange = false>
880 DiagnosticPredicate isImmScaled(bool Signed) const {
881 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
882 (isImmRange() && !IsRange))
883 return DiagnosticPredicate::NoMatch;
884
885 int64_t Val;
886 if (isImmRange())
887 Val = getFirstImmVal();
888 else {
889 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
890 if (!MCE)
891 return DiagnosticPredicate::NoMatch;
892 Val = MCE->getValue();
893 }
894
895 int64_t MinVal, MaxVal;
896 if (Signed) {
897 int64_t Shift = Bits - 1;
898 MinVal = (int64_t(1) << Shift) * -Scale;
899 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
900 } else {
901 MinVal = 0;
902 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
903 }
904
905 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
906 return DiagnosticPredicate::Match;
907
908 return DiagnosticPredicate::NearMatch;
909 }
910
911 DiagnosticPredicate isSVEPattern() const {
912 if (!isImm())
913 return DiagnosticPredicate::NoMatch;
914 auto *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
915 if (!MCE)
916 return DiagnosticPredicate::NoMatch;
917 int64_t Val = MCE->getValue();
918 if (Val >= 0 && Val < 32)
919 return DiagnosticPredicate::Match;
920 return DiagnosticPredicate::NearMatch;
921 }
922
923 DiagnosticPredicate isSVEVecLenSpecifier() const {
924 if (!isImm())
925 return DiagnosticPredicate::NoMatch;
926 auto *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
927 if (!MCE)
928 return DiagnosticPredicate::NoMatch;
929 int64_t Val = MCE->getValue();
930 if (Val >= 0 && Val <= 1)
931 return DiagnosticPredicate::Match;
932 return DiagnosticPredicate::NearMatch;
933 }
934
935 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
936 AArch64::Specifier ELFSpec;
937 AArch64::Specifier DarwinSpec;
938 int64_t Addend;
939 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
940 Addend)) {
941 // If we don't understand the expression, assume the best and
942 // let the fixup and relocation code deal with it.
943 return true;
944 }
945
946 if (DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
947 llvm::is_contained(
948 Set: {AArch64::S_LO12, AArch64::S_GOT_LO12, AArch64::S_GOT_AUTH_LO12,
949 AArch64::S_DTPREL_LO12, AArch64::S_DTPREL_LO12_NC,
950 AArch64::S_TPREL_LO12, AArch64::S_TPREL_LO12_NC,
951 AArch64::S_GOTTPREL_LO12_NC, AArch64::S_TLSDESC_LO12,
952 AArch64::S_TLSDESC_AUTH_LO12, AArch64::S_SECREL_LO12,
953 AArch64::S_SECREL_HI12, AArch64::S_GOT_PAGE_LO15},
954 Element: ELFSpec)) {
955 // Note that we don't range-check the addend. It's adjusted modulo page
956 // size when converted, so there is no "out of range" condition when using
957 // @pageoff.
958 return true;
959 } else if (DarwinSpec == AArch64::S_MACHO_GOTPAGEOFF ||
960 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF) {
961 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
962 return Addend == 0;
963 }
964
965 return false;
966 }
967
968 template <int Scale> bool isUImm12Offset() const {
969 if (!isImm())
970 return false;
971
972 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
973 if (!MCE)
974 return isSymbolicUImm12Offset(Expr: getImm());
975
976 int64_t Val = MCE->getValue();
977 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
978 }
979
980 template <int N, int M>
981 bool isImmInRange() const {
982 if (!isImm())
983 return false;
984 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
985 if (!MCE)
986 return false;
987 int64_t Val = MCE->getValue();
988 return (Val >= N && Val <= M);
989 }
990
991 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
992 // a logical immediate can always be represented when inverted.
993 template <typename T>
994 bool isLogicalImm() const {
995 if (!isImm())
996 return false;
997 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
998 if (!MCE)
999 return false;
1000
1001 int64_t Val = MCE->getValue();
1002 // Avoid left shift by 64 directly.
1003 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
1004 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
1005 if ((Val & Upper) && (Val & Upper) != Upper)
1006 return false;
1007
1008 return AArch64_AM::isLogicalImmediate(imm: Val & ~Upper, regSize: sizeof(T) * 8);
1009 }
1010
1011 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
1012
1013 bool isImmRange() const { return Kind == k_ImmRange; }
1014
1015 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
1016 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
1017 /// immediate that can be shifted by 'Shift'.
1018 template <unsigned Width>
1019 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
1020 if (isShiftedImm() && Width == getShiftedImmShift())
1021 if (auto *CE = dyn_cast<MCConstantExpr>(Val: getShiftedImmVal()))
1022 return std::make_pair(x: CE->getValue(), y: Width);
1023
1024 if (isImm())
1025 if (auto *CE = dyn_cast<MCConstantExpr>(Val: getImm())) {
1026 int64_t Val = CE->getValue();
1027 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
1028 return std::make_pair(x: Val >> Width, y: Width);
1029 else
1030 return std::make_pair(x&: Val, y: 0u);
1031 }
1032
1033 return {};
1034 }
1035
1036 bool isAddSubImm() const {
1037 if (!isShiftedImm() && !isImm())
1038 return false;
1039
1040 const MCExpr *Expr;
1041
1042 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
1043 if (isShiftedImm()) {
1044 unsigned Shift = ShiftedImm.ShiftAmount;
1045 Expr = ShiftedImm.Val;
1046 if (Shift != 0 && Shift != 12)
1047 return false;
1048 } else {
1049 Expr = getImm();
1050 }
1051
1052 AArch64::Specifier ELFSpec;
1053 AArch64::Specifier DarwinSpec;
1054 int64_t Addend;
1055 if (AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
1056 Addend)) {
1057 return DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
1058 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF ||
1059 (DarwinSpec == AArch64::S_MACHO_GOTPAGEOFF && Addend == 0) ||
1060 llvm::is_contained(
1061 Set: {AArch64::S_LO12, AArch64::S_GOT_AUTH_LO12,
1062 AArch64::S_DTPREL_HI12, AArch64::S_DTPREL_LO12,
1063 AArch64::S_DTPREL_LO12_NC, AArch64::S_TPREL_HI12,
1064 AArch64::S_TPREL_LO12, AArch64::S_TPREL_LO12_NC,
1065 AArch64::S_TLSDESC_LO12, AArch64::S_TLSDESC_AUTH_LO12,
1066 AArch64::S_SECREL_HI12, AArch64::S_SECREL_LO12},
1067 Element: ELFSpec);
1068 }
1069
1070 // If it's a constant, it should be a real immediate in range.
1071 if (auto ShiftedVal = getShiftedVal<12>())
1072 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1073
1074 // If it's an expression, we hope for the best and let the fixup/relocation
1075 // code deal with it.
1076 return true;
1077 }
1078
1079 bool isAddSubImmNeg() const {
1080 if (!isShiftedImm() && !isImm())
1081 return false;
1082
1083 // Otherwise it should be a real negative immediate in range.
1084 if (auto ShiftedVal = getShiftedVal<12>())
1085 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1086
1087 return false;
1088 }
1089
1090 // Signed value in the range -128 to +127. For element widths of
1091 // 16 bits or higher it may also be a signed multiple of 256 in the
1092 // range -32768 to +32512.
1093 // For element-width of 8 bits a range of -128 to 255 is accepted,
1094 // since a copy of a byte can be either signed/unsigned.
1095 template <typename T>
1096 DiagnosticPredicate isSVECpyImm() const {
1097 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(Val: getImm())))
1098 return DiagnosticPredicate::NoMatch;
1099
1100 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1101 std::is_same<int8_t, T>::value;
1102 if (auto ShiftedImm = getShiftedVal<8>())
1103 if (!(IsByte && ShiftedImm->second) &&
1104 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1105 << ShiftedImm->second))
1106 return DiagnosticPredicate::Match;
1107
1108 return DiagnosticPredicate::NearMatch;
1109 }
1110
1111 // Unsigned value in the range 0 to 255. For element widths of
1112 // 16 bits or higher it may also be a signed multiple of 256 in the
1113 // range 0 to 65280.
1114 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1115 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(Val: getImm())))
1116 return DiagnosticPredicate::NoMatch;
1117
1118 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1119 std::is_same<int8_t, T>::value;
1120 if (auto ShiftedImm = getShiftedVal<8>())
1121 if (!(IsByte && ShiftedImm->second) &&
1122 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1123 << ShiftedImm->second))
1124 return DiagnosticPredicate::Match;
1125
1126 return DiagnosticPredicate::NearMatch;
1127 }
1128
1129 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1130 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1131 return DiagnosticPredicate::Match;
1132 return DiagnosticPredicate::NoMatch;
1133 }
1134
1135 bool isCondCode() const { return Kind == k_CondCode; }
1136
1137 bool isSIMDImmType10() const {
1138 if (!isImm())
1139 return false;
1140 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
1141 if (!MCE)
1142 return false;
1143 return AArch64_AM::isAdvSIMDModImmType10(Imm: MCE->getValue());
1144 }
1145
1146 template<int N>
1147 bool isBranchTarget() const {
1148 if (!isImm())
1149 return false;
1150 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
1151 if (!MCE)
1152 return true;
1153 int64_t Val = MCE->getValue();
1154 if (Val & 0x3)
1155 return false;
1156 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1157 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1158 }
1159
1160 bool isMovWSymbol(ArrayRef<AArch64::Specifier> AllowedModifiers) const {
1161 if (!isImm())
1162 return false;
1163
1164 AArch64::Specifier ELFSpec;
1165 AArch64::Specifier DarwinSpec;
1166 int64_t Addend;
1167 if (!AArch64AsmParser::classifySymbolRef(Expr: getImm(), ELFSpec, DarwinSpec,
1168 Addend)) {
1169 return false;
1170 }
1171 if (DarwinSpec != AArch64::S_None)
1172 return false;
1173
1174 return llvm::is_contained(Range&: AllowedModifiers, Element: ELFSpec);
1175 }
1176
1177 bool isMovWSymbolG3() const {
1178 return isMovWSymbol(AllowedModifiers: {AArch64::S_ABS_G3, AArch64::S_PREL_G3});
1179 }
1180
1181 bool isMovWSymbolG2() const {
1182 return isMovWSymbol(AllowedModifiers: {AArch64::S_ABS_G2, AArch64::S_ABS_G2_S,
1183 AArch64::S_ABS_G2_NC, AArch64::S_PREL_G2,
1184 AArch64::S_PREL_G2_NC, AArch64::S_TPREL_G2,
1185 AArch64::S_DTPREL_G2});
1186 }
1187
1188 bool isMovWSymbolG1() const {
1189 return isMovWSymbol(AllowedModifiers: {AArch64::S_ABS_G1, AArch64::S_ABS_G1_S,
1190 AArch64::S_ABS_G1_NC, AArch64::S_PREL_G1,
1191 AArch64::S_PREL_G1_NC, AArch64::S_GOTTPREL_G1,
1192 AArch64::S_TPREL_G1, AArch64::S_TPREL_G1_NC,
1193 AArch64::S_DTPREL_G1, AArch64::S_DTPREL_G1_NC});
1194 }
1195
1196 bool isMovWSymbolG0() const {
1197 return isMovWSymbol(AllowedModifiers: {AArch64::S_ABS_G0, AArch64::S_ABS_G0_S,
1198 AArch64::S_ABS_G0_NC, AArch64::S_PREL_G0,
1199 AArch64::S_PREL_G0_NC, AArch64::S_GOTTPREL_G0_NC,
1200 AArch64::S_TPREL_G0, AArch64::S_TPREL_G0_NC,
1201 AArch64::S_DTPREL_G0, AArch64::S_DTPREL_G0_NC});
1202 }
1203
1204 template<int RegWidth, int Shift>
1205 bool isMOVZMovAlias() const {
1206 if (!isImm()) return false;
1207
1208 const MCExpr *E = getImm();
1209 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: E)) {
1210 uint64_t Value = CE->getValue();
1211
1212 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1213 }
1214 // Only supports the case of Shift being 0 if an expression is used as an
1215 // operand
1216 return !Shift && E;
1217 }
1218
1219 template<int RegWidth, int Shift>
1220 bool isMOVNMovAlias() const {
1221 if (!isImm()) return false;
1222
1223 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1224 if (!CE) return false;
1225 uint64_t Value = CE->getValue();
1226
1227 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1228 }
1229
1230 bool isFPImm() const {
1231 return Kind == k_FPImm &&
1232 AArch64_AM::getFP64Imm(Imm: getFPImm().bitcastToAPInt()) != -1;
1233 }
1234
1235 bool isBarrier() const {
1236 return Kind == k_Barrier && !getBarriernXSModifier();
1237 }
1238 bool isBarriernXS() const {
1239 return Kind == k_Barrier && getBarriernXSModifier();
1240 }
1241 bool isSysReg() const { return Kind == k_SysReg; }
1242
1243 bool isMRSSystemRegister() const {
1244 if (!isSysReg()) return false;
1245
1246 return SysReg.MRSReg != -1U;
1247 }
1248
1249 bool isMSRSystemRegister() const {
1250 if (!isSysReg()) return false;
1251 return SysReg.MSRReg != -1U;
1252 }
1253
1254 bool isSystemPStateFieldWithImm0_1() const {
1255 if (!isSysReg()) return false;
1256 return AArch64PState::lookupPStateImm0_1ByEncoding(Encoding: SysReg.PStateField);
1257 }
1258
1259 bool isSystemPStateFieldWithImm0_15() const {
1260 if (!isSysReg())
1261 return false;
1262 return AArch64PState::lookupPStateImm0_15ByEncoding(Encoding: SysReg.PStateField);
1263 }
1264
1265 bool isSVCR() const {
1266 if (Kind != k_SVCR)
1267 return false;
1268 return SVCR.PStateField != -1U;
1269 }
1270
1271 bool isReg() const override {
1272 return Kind == k_Register;
1273 }
1274
1275 bool isVectorList() const { return Kind == k_VectorList; }
1276
1277 bool isScalarReg() const {
1278 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1279 }
1280
1281 bool isNeonVectorReg() const {
1282 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1283 }
1284
1285 bool isNeonVectorRegLo() const {
1286 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1287 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1288 Reg: Reg.Reg) ||
1289 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1290 Reg: Reg.Reg));
1291 }
1292
1293 bool isNeonVectorReg0to7() const {
1294 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1295 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1296 Reg: Reg.Reg));
1297 }
1298
1299 bool isMatrix() const { return Kind == k_MatrixRegister; }
1300 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1301
1302 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1303 RegKind RK;
1304 switch (Class) {
1305 case AArch64::PPRRegClassID:
1306 case AArch64::PPR_3bRegClassID:
1307 case AArch64::PPR_p8to15RegClassID:
1308 case AArch64::PNRRegClassID:
1309 case AArch64::PNR_p8to15RegClassID:
1310 case AArch64::PPRorPNRRegClassID:
1311 RK = RegKind::SVEPredicateAsCounter;
1312 break;
1313 default:
1314 llvm_unreachable("Unsupported register class");
1315 }
1316
1317 return (Kind == k_Register && Reg.Kind == RK) &&
1318 AArch64MCRegisterClasses[Class].contains(Reg: getReg());
1319 }
1320
1321 template <unsigned Class> bool isSVEVectorReg() const {
1322 RegKind RK;
1323 switch (Class) {
1324 case AArch64::ZPRRegClassID:
1325 case AArch64::ZPR_3bRegClassID:
1326 case AArch64::ZPR_4bRegClassID:
1327 case AArch64::ZPRMul2_LoRegClassID:
1328 case AArch64::ZPRMul2_HiRegClassID:
1329 case AArch64::ZPR_KRegClassID:
1330 RK = RegKind::SVEDataVector;
1331 break;
1332 case AArch64::PPRRegClassID:
1333 case AArch64::PPR_3bRegClassID:
1334 case AArch64::PPR_p8to15RegClassID:
1335 case AArch64::PNRRegClassID:
1336 case AArch64::PNR_p8to15RegClassID:
1337 case AArch64::PPRorPNRRegClassID:
1338 RK = RegKind::SVEPredicateVector;
1339 break;
1340 default:
1341 llvm_unreachable("Unsupported register class");
1342 }
1343
1344 return (Kind == k_Register && Reg.Kind == RK) &&
1345 AArch64MCRegisterClasses[Class].contains(Reg: getReg());
1346 }
1347
1348 template <unsigned Class> bool isFPRasZPR() const {
1349 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1350 AArch64MCRegisterClasses[Class].contains(Reg: getReg());
1351 }
1352
1353 template <int ElementWidth, unsigned Class>
1354 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1355 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1356 return DiagnosticPredicate::NoMatch;
1357
1358 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1359 return DiagnosticPredicate::Match;
1360
1361 return DiagnosticPredicate::NearMatch;
1362 }
1363
1364 template <int ElementWidth, unsigned Class>
1365 DiagnosticPredicate isSVEPredicateOrPredicateAsCounterRegOfWidth() const {
1366 if (Kind != k_Register || (Reg.Kind != RegKind::SVEPredicateAsCounter &&
1367 Reg.Kind != RegKind::SVEPredicateVector))
1368 return DiagnosticPredicate::NoMatch;
1369
1370 if ((isSVEPredicateAsCounterReg<Class>() ||
1371 isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) &&
1372 Reg.ElementWidth == ElementWidth)
1373 return DiagnosticPredicate::Match;
1374
1375 return DiagnosticPredicate::NearMatch;
1376 }
1377
1378 template <int ElementWidth, unsigned Class>
1379 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1380 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1381 return DiagnosticPredicate::NoMatch;
1382
1383 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1384 return DiagnosticPredicate::Match;
1385
1386 return DiagnosticPredicate::NearMatch;
1387 }
1388
1389 template <int ElementWidth, unsigned Class>
1390 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1391 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1392 return DiagnosticPredicate::NoMatch;
1393
1394 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1395 return DiagnosticPredicate::Match;
1396
1397 return DiagnosticPredicate::NearMatch;
1398 }
1399
1400 template <int ElementWidth, unsigned Class,
1401 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1402 bool ShiftWidthAlwaysSame>
1403 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1404 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1405 if (!VectorMatch.isMatch())
1406 return DiagnosticPredicate::NoMatch;
1407
1408 // Give a more specific diagnostic when the user has explicitly typed in
1409 // a shift-amount that does not match what is expected, but for which
1410 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1411 bool MatchShift = getShiftExtendAmount() == Log2_32(Value: ShiftWidth / 8);
1412 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1413 ShiftExtendTy == AArch64_AM::SXTW) &&
1414 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1415 return DiagnosticPredicate::NoMatch;
1416
1417 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1418 return DiagnosticPredicate::Match;
1419
1420 return DiagnosticPredicate::NearMatch;
1421 }
1422
1423 bool isGPR32as64() const {
1424 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1425 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg: Reg.Reg);
1426 }
1427
1428 bool isGPR64as32() const {
1429 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1430 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg: Reg.Reg);
1431 }
1432
1433 bool isGPR64x8() const {
1434 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1435 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1436 Reg: Reg.Reg);
1437 }
1438
1439 bool isWSeqPair() const {
1440 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1441 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1442 Reg: Reg.Reg);
1443 }
1444
1445 bool isXSeqPair() const {
1446 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1447 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1448 Reg: Reg.Reg);
1449 }
1450
1451 bool isSyspXzrPair() const {
1452 return isGPR64<AArch64::GPR64RegClassID>() && Reg.Reg == AArch64::XZR;
1453 }
1454
1455 template<int64_t Angle, int64_t Remainder>
1456 DiagnosticPredicate isComplexRotation() const {
1457 if (!isImm())
1458 return DiagnosticPredicate::NoMatch;
1459
1460 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1461 if (!CE)
1462 return DiagnosticPredicate::NoMatch;
1463 uint64_t Value = CE->getValue();
1464
1465 if (Value % Angle == Remainder && Value <= 270)
1466 return DiagnosticPredicate::Match;
1467 return DiagnosticPredicate::NearMatch;
1468 }
1469
1470 template <unsigned RegClassID> bool isGPR64() const {
1471 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1472 AArch64MCRegisterClasses[RegClassID].contains(Reg: getReg());
1473 }
1474
1475 template <unsigned RegClassID, int ExtWidth>
1476 DiagnosticPredicate isGPR64WithShiftExtend() const {
1477 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1478 return DiagnosticPredicate::NoMatch;
1479
1480 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1481 getShiftExtendAmount() == Log2_32(Value: ExtWidth / 8))
1482 return DiagnosticPredicate::Match;
1483 return DiagnosticPredicate::NearMatch;
1484 }
1485
1486 /// Is this a vector list with the type implicit (presumably attached to the
1487 /// instruction itself)?
1488 template <RegKind VectorKind, unsigned NumRegs, bool IsConsecutive = false>
1489 bool isImplicitlyTypedVectorList() const {
1490 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1491 VectorList.NumElements == 0 &&
1492 VectorList.RegisterKind == VectorKind &&
1493 (!IsConsecutive || (VectorList.Stride == 1));
1494 }
1495
1496 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1497 unsigned ElementWidth, unsigned Stride = 1>
1498 bool isTypedVectorList() const {
1499 if (Kind != k_VectorList)
1500 return false;
1501 if (VectorList.Count != NumRegs)
1502 return false;
1503 if (VectorList.RegisterKind != VectorKind)
1504 return false;
1505 if (VectorList.ElementWidth != ElementWidth)
1506 return false;
1507 if (VectorList.Stride != Stride)
1508 return false;
1509 return VectorList.NumElements == NumElements;
1510 }
1511
1512 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1513 unsigned ElementWidth, unsigned RegClass>
1514 DiagnosticPredicate isTypedVectorListMultiple() const {
1515 bool Res =
1516 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1517 if (!Res)
1518 return DiagnosticPredicate::NoMatch;
1519 if (!AArch64MCRegisterClasses[RegClass].contains(Reg: VectorList.Reg))
1520 return DiagnosticPredicate::NearMatch;
1521 return DiagnosticPredicate::Match;
1522 }
1523
1524 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1525 unsigned ElementWidth>
1526 DiagnosticPredicate isTypedVectorListStrided() const {
1527 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1528 ElementWidth, Stride>();
1529 if (!Res)
1530 return DiagnosticPredicate::NoMatch;
1531 if ((VectorList.Reg < (AArch64::Z0 + Stride)) ||
1532 ((VectorList.Reg >= AArch64::Z16) &&
1533 (VectorList.Reg < (AArch64::Z16 + Stride))))
1534 return DiagnosticPredicate::Match;
1535 return DiagnosticPredicate::NoMatch;
1536 }
1537
1538 template <int Min, int Max>
1539 DiagnosticPredicate isVectorIndex() const {
1540 if (Kind != k_VectorIndex)
1541 return DiagnosticPredicate::NoMatch;
1542 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1543 return DiagnosticPredicate::Match;
1544 return DiagnosticPredicate::NearMatch;
1545 }
1546
1547 bool isToken() const override { return Kind == k_Token; }
1548
1549 bool isTokenEqual(StringRef Str) const {
1550 return Kind == k_Token && getToken() == Str;
1551 }
1552 bool isSysCR() const { return Kind == k_SysCR; }
1553 bool isPrefetch() const { return Kind == k_Prefetch; }
1554 bool isPSBHint() const { return Kind == k_PSBHint; }
1555 bool isPHint() const { return Kind == k_PHint; }
1556 bool isBTIHint() const { return Kind == k_BTIHint; }
1557 bool isCMHPriorityHint() const { return Kind == k_CMHPriorityHint; }
1558 bool isTIndexHint() const { return Kind == k_TIndexHint; }
1559 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1560 bool isShifter() const {
1561 if (!isShiftExtend())
1562 return false;
1563
1564 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1565 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1566 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1567 ST == AArch64_AM::MSL);
1568 }
1569
1570 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1571 if (Kind != k_FPImm)
1572 return DiagnosticPredicate::NoMatch;
1573
1574 if (getFPImmIsExact()) {
1575 // Lookup the immediate from table of supported immediates.
1576 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(Enum: ImmEnum);
1577 assert(Desc && "Unknown enum value");
1578
1579 // Calculate its FP value.
1580 APFloat RealVal(APFloat::IEEEdouble());
1581 auto StatusOrErr =
1582 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1583 if (errorToBool(Err: StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1584 llvm_unreachable("FP immediate is not exact");
1585
1586 if (getFPImm().bitwiseIsEqual(RHS: RealVal))
1587 return DiagnosticPredicate::Match;
1588 }
1589
1590 return DiagnosticPredicate::NearMatch;
1591 }
1592
1593 template <unsigned ImmA, unsigned ImmB>
1594 DiagnosticPredicate isExactFPImm() const {
1595 DiagnosticPredicate Res = DiagnosticPredicate::NoMatch;
1596 if ((Res = isExactFPImm<ImmA>()))
1597 return DiagnosticPredicate::Match;
1598 if ((Res = isExactFPImm<ImmB>()))
1599 return DiagnosticPredicate::Match;
1600 return Res;
1601 }
1602
1603 bool isExtend() const {
1604 if (!isShiftExtend())
1605 return false;
1606
1607 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1608 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1609 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1610 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1611 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1612 ET == AArch64_AM::LSL) &&
1613 getShiftExtendAmount() <= 4;
1614 }
1615
1616 bool isExtend64() const {
1617 if (!isExtend())
1618 return false;
1619 // Make sure the extend expects a 32-bit source register.
1620 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1621 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1622 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1623 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1624 }
1625
1626 bool isExtendLSL64() const {
1627 if (!isExtend())
1628 return false;
1629 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1630 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1631 ET == AArch64_AM::LSL) &&
1632 getShiftExtendAmount() <= 4;
1633 }
1634
1635 bool isLSLImm3Shift() const {
1636 if (!isShiftExtend())
1637 return false;
1638 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1639 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7;
1640 }
1641
1642 template<int Width> bool isMemXExtend() const {
1643 if (!isExtend())
1644 return false;
1645 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1646 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1647 (getShiftExtendAmount() == Log2_32(Value: Width / 8) ||
1648 getShiftExtendAmount() == 0);
1649 }
1650
1651 template<int Width> bool isMemWExtend() const {
1652 if (!isExtend())
1653 return false;
1654 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1655 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1656 (getShiftExtendAmount() == Log2_32(Value: Width / 8) ||
1657 getShiftExtendAmount() == 0);
1658 }
1659
1660 template <unsigned width>
1661 bool isArithmeticShifter() const {
1662 if (!isShifter())
1663 return false;
1664
1665 // An arithmetic shifter is LSL, LSR, or ASR.
1666 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1667 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1668 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1669 }
1670
1671 template <unsigned width>
1672 bool isLogicalShifter() const {
1673 if (!isShifter())
1674 return false;
1675
1676 // A logical shifter is LSL, LSR, ASR or ROR.
1677 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1678 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1679 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1680 getShiftExtendAmount() < width;
1681 }
1682
1683 bool isMovImm32Shifter() const {
1684 if (!isShifter())
1685 return false;
1686
1687 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1688 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1689 if (ST != AArch64_AM::LSL)
1690 return false;
1691 uint64_t Val = getShiftExtendAmount();
1692 return (Val == 0 || Val == 16);
1693 }
1694
1695 bool isMovImm64Shifter() const {
1696 if (!isShifter())
1697 return false;
1698
1699 // A MOVi shifter is LSL of 0 or 16.
1700 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1701 if (ST != AArch64_AM::LSL)
1702 return false;
1703 uint64_t Val = getShiftExtendAmount();
1704 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1705 }
1706
1707 bool isLogicalVecShifter() const {
1708 if (!isShifter())
1709 return false;
1710
1711 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1712 unsigned Shift = getShiftExtendAmount();
1713 return getShiftExtendType() == AArch64_AM::LSL &&
1714 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1715 }
1716
1717 bool isLogicalVecHalfWordShifter() const {
1718 if (!isLogicalVecShifter())
1719 return false;
1720
1721 // A logical vector shifter is a left shift by 0 or 8.
1722 unsigned Shift = getShiftExtendAmount();
1723 return getShiftExtendType() == AArch64_AM::LSL &&
1724 (Shift == 0 || Shift == 8);
1725 }
1726
1727 bool isMoveVecShifter() const {
1728 if (!isShiftExtend())
1729 return false;
1730
1731 // A logical vector shifter is a left shift by 8 or 16.
1732 unsigned Shift = getShiftExtendAmount();
1733 return getShiftExtendType() == AArch64_AM::MSL &&
1734 (Shift == 8 || Shift == 16);
1735 }
1736
1737 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1738 // to LDUR/STUR when the offset is not legal for the former but is for
1739 // the latter. As such, in addition to checking for being a legal unscaled
1740 // address, also check that it is not a legal scaled address. This avoids
1741 // ambiguity in the matcher.
1742 template<int Width>
1743 bool isSImm9OffsetFB() const {
1744 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1745 }
1746
1747 bool isAdrpLabel() const {
1748 // Validation was handled during parsing, so we just verify that
1749 // something didn't go haywire.
1750 if (!isImm())
1751 return false;
1752
1753 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Imm.Val)) {
1754 int64_t Val = CE->getValue();
1755 int64_t Min = - (4096 * (1LL << (21 - 1)));
1756 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1757 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1758 }
1759
1760 return true;
1761 }
1762
1763 bool isAdrLabel() const {
1764 // Validation was handled during parsing, so we just verify that
1765 // something didn't go haywire.
1766 if (!isImm())
1767 return false;
1768
1769 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Imm.Val)) {
1770 int64_t Val = CE->getValue();
1771 int64_t Min = - (1LL << (21 - 1));
1772 int64_t Max = ((1LL << (21 - 1)) - 1);
1773 return Val >= Min && Val <= Max;
1774 }
1775
1776 return true;
1777 }
1778
1779 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1780 DiagnosticPredicate isMatrixRegOperand() const {
1781 if (!isMatrix())
1782 return DiagnosticPredicate::NoMatch;
1783 if (getMatrixKind() != Kind ||
1784 !AArch64MCRegisterClasses[RegClass].contains(Reg: getMatrixReg()) ||
1785 EltSize != getMatrixElementWidth())
1786 return DiagnosticPredicate::NearMatch;
1787 return DiagnosticPredicate::Match;
1788 }
1789
1790 bool isPAuthPCRelLabel16Operand() const {
1791 // PAuth PCRel16 operands are similar to regular branch targets, but only
1792 // negative values are allowed for concrete immediates as signing instr
1793 // should be in a lower address.
1794 if (!isImm())
1795 return false;
1796 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
1797 if (!MCE)
1798 return true;
1799 int64_t Val = MCE->getValue();
1800 if (Val & 0b11)
1801 return false;
1802 return (Val <= 0) && (Val > -(1 << 18));
1803 }
1804
1805 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1806 // Add as immediates when possible. Null MCExpr = 0.
1807 if (!Expr)
1808 Inst.addOperand(Op: MCOperand::createImm(Val: 0));
1809 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Expr))
1810 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue()));
1811 else
1812 Inst.addOperand(Op: MCOperand::createExpr(Val: Expr));
1813 }
1814
1815 void addRegOperands(MCInst &Inst, unsigned N) const {
1816 assert(N == 1 && "Invalid number of operands!");
1817 Inst.addOperand(Op: MCOperand::createReg(Reg: getReg()));
1818 }
1819
1820 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1821 assert(N == 1 && "Invalid number of operands!");
1822 Inst.addOperand(Op: MCOperand::createReg(Reg: getMatrixReg()));
1823 }
1824
1825 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1826 assert(N == 1 && "Invalid number of operands!");
1827 assert(
1828 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1829
1830 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1831 MCRegister Reg = RI->getRegClass(i: AArch64::GPR32RegClassID)
1832 .getRegister(i: RI->getEncodingValue(Reg: getReg()));
1833
1834 Inst.addOperand(Op: MCOperand::createReg(Reg));
1835 }
1836
1837 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1838 assert(N == 1 && "Invalid number of operands!");
1839 assert(
1840 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1841
1842 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1843 MCRegister Reg = RI->getRegClass(i: AArch64::GPR64RegClassID)
1844 .getRegister(i: RI->getEncodingValue(Reg: getReg()));
1845
1846 Inst.addOperand(Op: MCOperand::createReg(Reg));
1847 }
1848
1849 template <int Width>
1850 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1851 unsigned Base;
1852 switch (Width) {
1853 case 8: Base = AArch64::B0; break;
1854 case 16: Base = AArch64::H0; break;
1855 case 32: Base = AArch64::S0; break;
1856 case 64: Base = AArch64::D0; break;
1857 case 128: Base = AArch64::Q0; break;
1858 default:
1859 llvm_unreachable("Unsupported width");
1860 }
1861 Inst.addOperand(Op: MCOperand::createReg(Reg: AArch64::Z0 + getReg() - Base));
1862 }
1863
1864 void addPPRorPNRRegOperands(MCInst &Inst, unsigned N) const {
1865 assert(N == 1 && "Invalid number of operands!");
1866 MCRegister Reg = getReg();
1867 // Normalise to PPR
1868 if (Reg >= AArch64::PN0 && Reg <= AArch64::PN15)
1869 Reg = Reg - AArch64::PN0 + AArch64::P0;
1870 Inst.addOperand(Op: MCOperand::createReg(Reg));
1871 }
1872
1873 void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const {
1874 assert(N == 1 && "Invalid number of operands!");
1875 Inst.addOperand(
1876 Op: MCOperand::createReg(Reg: (getReg() - AArch64::PN0) + AArch64::P0));
1877 }
1878
1879 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1880 assert(N == 1 && "Invalid number of operands!");
1881 assert(
1882 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1883 Inst.addOperand(Op: MCOperand::createReg(Reg: AArch64::D0 + getReg() - AArch64::Q0));
1884 }
1885
1886 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1887 assert(N == 1 && "Invalid number of operands!");
1888 assert(
1889 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1890 Inst.addOperand(Op: MCOperand::createReg(Reg: getReg()));
1891 }
1892
1893 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1894 assert(N == 1 && "Invalid number of operands!");
1895 Inst.addOperand(Op: MCOperand::createReg(Reg: getReg()));
1896 }
1897
1898 void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const {
1899 assert(N == 1 && "Invalid number of operands!");
1900 Inst.addOperand(Op: MCOperand::createReg(Reg: getReg()));
1901 }
1902
1903 enum VecListIndexType {
1904 VecListIdx_DReg = 0,
1905 VecListIdx_QReg = 1,
1906 VecListIdx_ZReg = 2,
1907 VecListIdx_PReg = 3,
1908 };
1909
1910 template <VecListIndexType RegTy, unsigned NumRegs,
1911 bool IsConsecutive = false>
1912 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1913 assert(N == 1 && "Invalid number of operands!");
1914 assert((!IsConsecutive || (getVectorListStride() == 1)) &&
1915 "Expected consecutive registers");
1916 static const unsigned FirstRegs[][5] = {
1917 /* DReg */ { AArch64::Q0,
1918 AArch64::D0, AArch64::D0_D1,
1919 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1920 /* QReg */ { AArch64::Q0,
1921 AArch64::Q0, AArch64::Q0_Q1,
1922 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1923 /* ZReg */ { AArch64::Z0,
1924 AArch64::Z0, AArch64::Z0_Z1,
1925 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1926 /* PReg */ { AArch64::P0,
1927 AArch64::P0, AArch64::P0_P1 }
1928 };
1929
1930 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1931 " NumRegs must be <= 4 for ZRegs");
1932
1933 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1934 " NumRegs must be <= 2 for PRegs");
1935
1936 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1937 Inst.addOperand(Op: MCOperand::createReg(Reg: FirstReg + getVectorListStart() -
1938 FirstRegs[(unsigned)RegTy][0]));
1939 }
1940
1941 template <unsigned NumRegs>
1942 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1943 assert(N == 1 && "Invalid number of operands!");
1944 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1945
1946 switch (NumRegs) {
1947 case 2:
1948 if (getVectorListStart() < AArch64::Z16) {
1949 assert((getVectorListStart() < AArch64::Z8) &&
1950 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1951 Inst.addOperand(Op: MCOperand::createReg(
1952 Reg: AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1953 } else {
1954 assert((getVectorListStart() < AArch64::Z24) &&
1955 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1956 Inst.addOperand(Op: MCOperand::createReg(
1957 Reg: AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1958 }
1959 break;
1960 case 4:
1961 if (getVectorListStart() < AArch64::Z16) {
1962 assert((getVectorListStart() < AArch64::Z4) &&
1963 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1964 Inst.addOperand(Op: MCOperand::createReg(
1965 Reg: AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1966 } else {
1967 assert((getVectorListStart() < AArch64::Z20) &&
1968 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1969 Inst.addOperand(Op: MCOperand::createReg(
1970 Reg: AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1971 }
1972 break;
1973 default:
1974 llvm_unreachable("Unsupported number of registers for strided vec list");
1975 }
1976 }
1977
1978 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1979 assert(N == 1 && "Invalid number of operands!");
1980 unsigned RegMask = getMatrixTileListRegMask();
1981 assert(RegMask <= 0xFF && "Invalid mask!");
1982 Inst.addOperand(Op: MCOperand::createImm(Val: RegMask));
1983 }
1984
1985 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1986 assert(N == 1 && "Invalid number of operands!");
1987 Inst.addOperand(Op: MCOperand::createImm(Val: getVectorIndex()));
1988 }
1989
1990 template <unsigned ImmIs0, unsigned ImmIs1>
1991 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1992 assert(N == 1 && "Invalid number of operands!");
1993 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1994 Inst.addOperand(Op: MCOperand::createImm(Val: bool(isExactFPImm<ImmIs1>())));
1995 }
1996
1997 void addImmOperands(MCInst &Inst, unsigned N) const {
1998 assert(N == 1 && "Invalid number of operands!");
1999 // If this is a pageoff symrefexpr with an addend, adjust the addend
2000 // to be only the page-offset portion. Otherwise, just add the expr
2001 // as-is.
2002 addExpr(Inst, Expr: getImm());
2003 }
2004
2005 template <int Shift>
2006 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
2007 assert(N == 2 && "Invalid number of operands!");
2008 if (auto ShiftedVal = getShiftedVal<Shift>()) {
2009 Inst.addOperand(Op: MCOperand::createImm(Val: ShiftedVal->first));
2010 Inst.addOperand(Op: MCOperand::createImm(Val: ShiftedVal->second));
2011 } else if (isShiftedImm()) {
2012 addExpr(Inst, Expr: getShiftedImmVal());
2013 Inst.addOperand(Op: MCOperand::createImm(Val: getShiftedImmShift()));
2014 } else {
2015 addExpr(Inst, Expr: getImm());
2016 Inst.addOperand(Op: MCOperand::createImm(Val: 0));
2017 }
2018 }
2019
2020 template <int Shift>
2021 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
2022 assert(N == 2 && "Invalid number of operands!");
2023 if (auto ShiftedVal = getShiftedVal<Shift>()) {
2024 Inst.addOperand(Op: MCOperand::createImm(Val: -ShiftedVal->first));
2025 Inst.addOperand(Op: MCOperand::createImm(Val: ShiftedVal->second));
2026 } else
2027 llvm_unreachable("Not a shifted negative immediate");
2028 }
2029
2030 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2031 assert(N == 1 && "Invalid number of operands!");
2032 Inst.addOperand(Op: MCOperand::createImm(Val: getCondCode()));
2033 }
2034
2035 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
2036 assert(N == 1 && "Invalid number of operands!");
2037 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2038 if (!MCE)
2039 addExpr(Inst, Expr: getImm());
2040 else
2041 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 12));
2042 }
2043
2044 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2045 addImmOperands(Inst, N);
2046 }
2047
2048 template<int Scale>
2049 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2050 assert(N == 1 && "Invalid number of operands!");
2051 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2052
2053 if (!MCE) {
2054 Inst.addOperand(Op: MCOperand::createExpr(Val: getImm()));
2055 return;
2056 }
2057 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() / Scale));
2058 }
2059
2060 void addUImm6Operands(MCInst &Inst, unsigned N) const {
2061 assert(N == 1 && "Invalid number of operands!");
2062 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2063 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue()));
2064 }
2065
2066 template <int Scale>
2067 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
2068 assert(N == 1 && "Invalid number of operands!");
2069 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2070 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() / Scale));
2071 }
2072
2073 template <int Scale>
2074 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
2075 assert(N == 1 && "Invalid number of operands!");
2076 Inst.addOperand(Op: MCOperand::createImm(Val: getFirstImmVal() / Scale));
2077 }
2078
2079 template <typename T>
2080 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
2081 assert(N == 1 && "Invalid number of operands!");
2082 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2083 std::make_unsigned_t<T> Val = MCE->getValue();
2084 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(imm: Val, regSize: sizeof(T) * 8);
2085 Inst.addOperand(Op: MCOperand::createImm(Val: encoding));
2086 }
2087
2088 template <typename T>
2089 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
2090 assert(N == 1 && "Invalid number of operands!");
2091 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2092 std::make_unsigned_t<T> Val = ~MCE->getValue();
2093 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(imm: Val, regSize: sizeof(T) * 8);
2094 Inst.addOperand(Op: MCOperand::createImm(Val: encoding));
2095 }
2096
2097 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
2098 assert(N == 1 && "Invalid number of operands!");
2099 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2100 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(Imm: MCE->getValue());
2101 Inst.addOperand(Op: MCOperand::createImm(Val: encoding));
2102 }
2103
2104 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
2105 // Branch operands don't encode the low bits, so shift them off
2106 // here. If it's a label, however, just put it on directly as there's
2107 // not enough information now to do anything.
2108 assert(N == 1 && "Invalid number of operands!");
2109 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2110 if (!MCE) {
2111 addExpr(Inst, Expr: getImm());
2112 return;
2113 }
2114 assert(MCE && "Invalid constant immediate operand!");
2115 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2));
2116 }
2117
2118 void addPAuthPCRelLabel16Operands(MCInst &Inst, unsigned N) const {
2119 // PC-relative operands don't encode the low bits, so shift them off
2120 // here. If it's a label, however, just put it on directly as there's
2121 // not enough information now to do anything.
2122 assert(N == 1 && "Invalid number of operands!");
2123 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2124 if (!MCE) {
2125 addExpr(Inst, Expr: getImm());
2126 return;
2127 }
2128 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2));
2129 }
2130
2131 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
2132 // Branch operands don't encode the low bits, so shift them off
2133 // here. If it's a label, however, just put it on directly as there's
2134 // not enough information now to do anything.
2135 assert(N == 1 && "Invalid number of operands!");
2136 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2137 if (!MCE) {
2138 addExpr(Inst, Expr: getImm());
2139 return;
2140 }
2141 assert(MCE && "Invalid constant immediate operand!");
2142 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2));
2143 }
2144
2145 void addPCRelLabel9Operands(MCInst &Inst, unsigned N) const {
2146 // Branch operands don't encode the low bits, so shift them off
2147 // here. If it's a label, however, just put it on directly as there's
2148 // not enough information now to do anything.
2149 assert(N == 1 && "Invalid number of operands!");
2150 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2151 if (!MCE) {
2152 addExpr(Inst, Expr: getImm());
2153 return;
2154 }
2155 assert(MCE && "Invalid constant immediate operand!");
2156 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2));
2157 }
2158
2159 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
2160 // Branch operands don't encode the low bits, so shift them off
2161 // here. If it's a label, however, just put it on directly as there's
2162 // not enough information now to do anything.
2163 assert(N == 1 && "Invalid number of operands!");
2164 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm());
2165 if (!MCE) {
2166 addExpr(Inst, Expr: getImm());
2167 return;
2168 }
2169 assert(MCE && "Invalid constant immediate operand!");
2170 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2));
2171 }
2172
2173 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2174 assert(N == 1 && "Invalid number of operands!");
2175 Inst.addOperand(Op: MCOperand::createImm(
2176 Val: AArch64_AM::getFP64Imm(Imm: getFPImm().bitcastToAPInt())));
2177 }
2178
2179 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2180 assert(N == 1 && "Invalid number of operands!");
2181 Inst.addOperand(Op: MCOperand::createImm(Val: getBarrier()));
2182 }
2183
2184 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2185 assert(N == 1 && "Invalid number of operands!");
2186 Inst.addOperand(Op: MCOperand::createImm(Val: getBarrier()));
2187 }
2188
2189 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2190 assert(N == 1 && "Invalid number of operands!");
2191
2192 Inst.addOperand(Op: MCOperand::createImm(Val: SysReg.MRSReg));
2193 }
2194
2195 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2196 assert(N == 1 && "Invalid number of operands!");
2197
2198 Inst.addOperand(Op: MCOperand::createImm(Val: SysReg.MSRReg));
2199 }
2200
2201 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2202 assert(N == 1 && "Invalid number of operands!");
2203
2204 Inst.addOperand(Op: MCOperand::createImm(Val: SysReg.PStateField));
2205 }
2206
2207 void addSVCROperands(MCInst &Inst, unsigned N) const {
2208 assert(N == 1 && "Invalid number of operands!");
2209
2210 Inst.addOperand(Op: MCOperand::createImm(Val: SVCR.PStateField));
2211 }
2212
2213 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2214 assert(N == 1 && "Invalid number of operands!");
2215
2216 Inst.addOperand(Op: MCOperand::createImm(Val: SysReg.PStateField));
2217 }
2218
2219 void addSysCROperands(MCInst &Inst, unsigned N) const {
2220 assert(N == 1 && "Invalid number of operands!");
2221 Inst.addOperand(Op: MCOperand::createImm(Val: getSysCR()));
2222 }
2223
2224 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2225 assert(N == 1 && "Invalid number of operands!");
2226 Inst.addOperand(Op: MCOperand::createImm(Val: getPrefetch()));
2227 }
2228
2229 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2230 assert(N == 1 && "Invalid number of operands!");
2231 Inst.addOperand(Op: MCOperand::createImm(Val: getPSBHint()));
2232 }
2233
2234 void addPHintOperands(MCInst &Inst, unsigned N) const {
2235 assert(N == 1 && "Invalid number of operands!");
2236 Inst.addOperand(Op: MCOperand::createImm(Val: getPHint()));
2237 }
2238
2239 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2240 assert(N == 1 && "Invalid number of operands!");
2241 Inst.addOperand(Op: MCOperand::createImm(Val: getBTIHint()));
2242 }
2243
2244 void addCMHPriorityHintOperands(MCInst &Inst, unsigned N) const {
2245 assert(N == 1 && "Invalid number of operands!");
2246 Inst.addOperand(Op: MCOperand::createImm(Val: getCMHPriorityHint()));
2247 }
2248
2249 void addTIndexHintOperands(MCInst &Inst, unsigned N) const {
2250 assert(N == 1 && "Invalid number of operands!");
2251 Inst.addOperand(Op: MCOperand::createImm(Val: getTIndexHint()));
2252 }
2253
2254 void addShifterOperands(MCInst &Inst, unsigned N) const {
2255 assert(N == 1 && "Invalid number of operands!");
2256 unsigned Imm =
2257 AArch64_AM::getShifterImm(ST: getShiftExtendType(), Imm: getShiftExtendAmount());
2258 Inst.addOperand(Op: MCOperand::createImm(Val: Imm));
2259 }
2260
2261 void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const {
2262 assert(N == 1 && "Invalid number of operands!");
2263 unsigned Imm = getShiftExtendAmount();
2264 Inst.addOperand(Op: MCOperand::createImm(Val: Imm));
2265 }
2266
2267 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2268 assert(N == 1 && "Invalid number of operands!");
2269
2270 if (!isScalarReg())
2271 return;
2272
2273 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2274 MCRegister Reg = RI->getRegClass(i: AArch64::GPR64RegClassID)
2275 .getRegister(i: RI->getEncodingValue(Reg: getReg()));
2276 if (Reg != AArch64::XZR)
2277 llvm_unreachable("wrong register");
2278
2279 Inst.addOperand(Op: MCOperand::createReg(Reg: AArch64::XZR));
2280 }
2281
2282 void addExtendOperands(MCInst &Inst, unsigned N) const {
2283 assert(N == 1 && "Invalid number of operands!");
2284 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2285 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2286 unsigned Imm = AArch64_AM::getArithExtendImm(ET, Imm: getShiftExtendAmount());
2287 Inst.addOperand(Op: MCOperand::createImm(Val: Imm));
2288 }
2289
2290 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2291 assert(N == 1 && "Invalid number of operands!");
2292 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2293 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2294 unsigned Imm = AArch64_AM::getArithExtendImm(ET, Imm: getShiftExtendAmount());
2295 Inst.addOperand(Op: MCOperand::createImm(Val: Imm));
2296 }
2297
2298 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2299 assert(N == 2 && "Invalid number of operands!");
2300 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2301 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2302 Inst.addOperand(Op: MCOperand::createImm(Val: IsSigned));
2303 Inst.addOperand(Op: MCOperand::createImm(Val: getShiftExtendAmount() != 0));
2304 }
2305
2306 // For 8-bit load/store instructions with a register offset, both the
2307 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2308 // they're disambiguated by whether the shift was explicit or implicit rather
2309 // than its size.
2310 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2311 assert(N == 2 && "Invalid number of operands!");
2312 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2313 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2314 Inst.addOperand(Op: MCOperand::createImm(Val: IsSigned));
2315 Inst.addOperand(Op: MCOperand::createImm(Val: hasShiftExtendAmount()));
2316 }
2317
2318 template<int Shift>
2319 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2320 assert(N == 1 && "Invalid number of operands!");
2321
2322 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
2323 if (CE) {
2324 uint64_t Value = CE->getValue();
2325 Inst.addOperand(Op: MCOperand::createImm(Val: (Value >> Shift) & 0xffff));
2326 } else {
2327 addExpr(Inst, Expr: getImm());
2328 }
2329 }
2330
2331 template<int Shift>
2332 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2333 assert(N == 1 && "Invalid number of operands!");
2334
2335 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2336 uint64_t Value = CE->getValue();
2337 Inst.addOperand(Op: MCOperand::createImm(Val: (~Value >> Shift) & 0xffff));
2338 }
2339
2340 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2341 assert(N == 1 && "Invalid number of operands!");
2342 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2343 Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() / 90));
2344 }
2345
2346 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2347 assert(N == 1 && "Invalid number of operands!");
2348 const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm());
2349 Inst.addOperand(Op: MCOperand::createImm(Val: (MCE->getValue() - 90) / 180));
2350 }
2351
2352 void print(raw_ostream &OS, const MCAsmInfo &MAI) const override;
2353
2354 static std::unique_ptr<AArch64Operand>
2355 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2356 auto Op = std::make_unique<AArch64Operand>(args: k_Token, args&: Ctx);
2357 Op->Tok.Data = Str.data();
2358 Op->Tok.Length = Str.size();
2359 Op->Tok.IsSuffix = IsSuffix;
2360 Op->StartLoc = S;
2361 Op->EndLoc = S;
2362 return Op;
2363 }
2364
2365 static std::unique_ptr<AArch64Operand>
2366 CreateReg(MCRegister Reg, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2367 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2368 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2369 unsigned ShiftAmount = 0, unsigned HasExplicitAmount = false) {
2370 auto Op = std::make_unique<AArch64Operand>(args: k_Register, args&: Ctx);
2371 Op->Reg.Reg = Reg;
2372 Op->Reg.Kind = Kind;
2373 Op->Reg.ElementWidth = 0;
2374 Op->Reg.EqualityTy = EqTy;
2375 Op->Reg.ShiftExtend.Type = ExtTy;
2376 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2377 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2378 Op->StartLoc = S;
2379 Op->EndLoc = E;
2380 return Op;
2381 }
2382
2383 static std::unique_ptr<AArch64Operand> CreateVectorReg(
2384 MCRegister Reg, RegKind Kind, unsigned ElementWidth, SMLoc S, SMLoc E,
2385 MCContext &Ctx, AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2386 unsigned ShiftAmount = 0, unsigned HasExplicitAmount = false) {
2387 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2388 Kind == RegKind::SVEPredicateVector ||
2389 Kind == RegKind::SVEPredicateAsCounter) &&
2390 "Invalid vector kind");
2391 auto Op = CreateReg(Reg, Kind, S, E, Ctx, EqTy: EqualsReg, ExtTy, ShiftAmount,
2392 HasExplicitAmount);
2393 Op->Reg.ElementWidth = ElementWidth;
2394 return Op;
2395 }
2396
2397 static std::unique_ptr<AArch64Operand>
2398 CreateVectorList(MCRegister Reg, unsigned Count, unsigned Stride,
2399 unsigned NumElements, unsigned ElementWidth,
2400 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2401 auto Op = std::make_unique<AArch64Operand>(args: k_VectorList, args&: Ctx);
2402 Op->VectorList.Reg = Reg;
2403 Op->VectorList.Count = Count;
2404 Op->VectorList.Stride = Stride;
2405 Op->VectorList.NumElements = NumElements;
2406 Op->VectorList.ElementWidth = ElementWidth;
2407 Op->VectorList.RegisterKind = RegisterKind;
2408 Op->StartLoc = S;
2409 Op->EndLoc = E;
2410 return Op;
2411 }
2412
2413 static std::unique_ptr<AArch64Operand>
2414 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2415 auto Op = std::make_unique<AArch64Operand>(args: k_VectorIndex, args&: Ctx);
2416 Op->VectorIndex.Val = Idx;
2417 Op->StartLoc = S;
2418 Op->EndLoc = E;
2419 return Op;
2420 }
2421
2422 static std::unique_ptr<AArch64Operand>
2423 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2424 auto Op = std::make_unique<AArch64Operand>(args: k_MatrixTileList, args&: Ctx);
2425 Op->MatrixTileList.RegMask = RegMask;
2426 Op->StartLoc = S;
2427 Op->EndLoc = E;
2428 return Op;
2429 }
2430
2431 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2432 const unsigned ElementWidth) {
2433 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2434 RegMap = {
2435 {{0, AArch64::ZAB0},
2436 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2437 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2438 {{8, AArch64::ZAB0},
2439 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2440 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2441 {{16, AArch64::ZAH0},
2442 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2443 {{16, AArch64::ZAH1},
2444 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2445 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2446 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2447 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2448 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2449 };
2450
2451 if (ElementWidth == 64)
2452 OutRegs.insert(V: Reg);
2453 else {
2454 std::vector<unsigned> Regs = RegMap[std::make_pair(x: ElementWidth, y&: Reg)];
2455 assert(!Regs.empty() && "Invalid tile or element width!");
2456 OutRegs.insert_range(R&: Regs);
2457 }
2458 }
2459
2460 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2461 SMLoc E, MCContext &Ctx) {
2462 auto Op = std::make_unique<AArch64Operand>(args: k_Immediate, args&: Ctx);
2463 Op->Imm.Val = Val;
2464 Op->StartLoc = S;
2465 Op->EndLoc = E;
2466 return Op;
2467 }
2468
2469 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2470 unsigned ShiftAmount,
2471 SMLoc S, SMLoc E,
2472 MCContext &Ctx) {
2473 auto Op = std::make_unique<AArch64Operand>(args: k_ShiftedImm, args&: Ctx);
2474 Op->ShiftedImm .Val = Val;
2475 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2476 Op->StartLoc = S;
2477 Op->EndLoc = E;
2478 return Op;
2479 }
2480
2481 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2482 unsigned Last, SMLoc S,
2483 SMLoc E,
2484 MCContext &Ctx) {
2485 auto Op = std::make_unique<AArch64Operand>(args: k_ImmRange, args&: Ctx);
2486 Op->ImmRange.First = First;
2487 Op->ImmRange.Last = Last;
2488 Op->EndLoc = E;
2489 return Op;
2490 }
2491
2492 static std::unique_ptr<AArch64Operand>
2493 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2494 auto Op = std::make_unique<AArch64Operand>(args: k_CondCode, args&: Ctx);
2495 Op->CondCode.Code = Code;
2496 Op->StartLoc = S;
2497 Op->EndLoc = E;
2498 return Op;
2499 }
2500
2501 static std::unique_ptr<AArch64Operand>
2502 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2503 auto Op = std::make_unique<AArch64Operand>(args: k_FPImm, args&: Ctx);
2504 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2505 Op->FPImm.IsExact = IsExact;
2506 Op->StartLoc = S;
2507 Op->EndLoc = S;
2508 return Op;
2509 }
2510
2511 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2512 StringRef Str,
2513 SMLoc S,
2514 MCContext &Ctx,
2515 bool HasnXSModifier) {
2516 auto Op = std::make_unique<AArch64Operand>(args: k_Barrier, args&: Ctx);
2517 Op->Barrier.Val = Val;
2518 Op->Barrier.Data = Str.data();
2519 Op->Barrier.Length = Str.size();
2520 Op->Barrier.HasnXSModifier = HasnXSModifier;
2521 Op->StartLoc = S;
2522 Op->EndLoc = S;
2523 return Op;
2524 }
2525
2526 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2527 uint32_t MRSReg,
2528 uint32_t MSRReg,
2529 uint32_t PStateField,
2530 MCContext &Ctx) {
2531 auto Op = std::make_unique<AArch64Operand>(args: k_SysReg, args&: Ctx);
2532 Op->SysReg.Data = Str.data();
2533 Op->SysReg.Length = Str.size();
2534 Op->SysReg.MRSReg = MRSReg;
2535 Op->SysReg.MSRReg = MSRReg;
2536 Op->SysReg.PStateField = PStateField;
2537 Op->StartLoc = S;
2538 Op->EndLoc = S;
2539 return Op;
2540 }
2541
2542 static std::unique_ptr<AArch64Operand>
2543 CreatePHintInst(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2544 auto Op = std::make_unique<AArch64Operand>(args: k_PHint, args&: Ctx);
2545 Op->PHint.Val = Val;
2546 Op->PHint.Data = Str.data();
2547 Op->PHint.Length = Str.size();
2548 Op->StartLoc = S;
2549 Op->EndLoc = S;
2550 return Op;
2551 }
2552
2553 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2554 SMLoc E, MCContext &Ctx) {
2555 auto Op = std::make_unique<AArch64Operand>(args: k_SysCR, args&: Ctx);
2556 Op->SysCRImm.Val = Val;
2557 Op->StartLoc = S;
2558 Op->EndLoc = E;
2559 return Op;
2560 }
2561
2562 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2563 StringRef Str,
2564 SMLoc S,
2565 MCContext &Ctx) {
2566 auto Op = std::make_unique<AArch64Operand>(args: k_Prefetch, args&: Ctx);
2567 Op->Prefetch.Val = Val;
2568 Op->Barrier.Data = Str.data();
2569 Op->Barrier.Length = Str.size();
2570 Op->StartLoc = S;
2571 Op->EndLoc = S;
2572 return Op;
2573 }
2574
2575 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2576 StringRef Str,
2577 SMLoc S,
2578 MCContext &Ctx) {
2579 auto Op = std::make_unique<AArch64Operand>(args: k_PSBHint, args&: Ctx);
2580 Op->PSBHint.Val = Val;
2581 Op->PSBHint.Data = Str.data();
2582 Op->PSBHint.Length = Str.size();
2583 Op->StartLoc = S;
2584 Op->EndLoc = S;
2585 return Op;
2586 }
2587
2588 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2589 StringRef Str,
2590 SMLoc S,
2591 MCContext &Ctx) {
2592 auto Op = std::make_unique<AArch64Operand>(args: k_BTIHint, args&: Ctx);
2593 Op->BTIHint.Val = Val | 32;
2594 Op->BTIHint.Data = Str.data();
2595 Op->BTIHint.Length = Str.size();
2596 Op->StartLoc = S;
2597 Op->EndLoc = S;
2598 return Op;
2599 }
2600
2601 static std::unique_ptr<AArch64Operand>
2602 CreateCMHPriorityHint(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2603 auto Op = std::make_unique<AArch64Operand>(args: k_CMHPriorityHint, args&: Ctx);
2604 Op->CMHPriorityHint.Val = Val;
2605 Op->CMHPriorityHint.Data = Str.data();
2606 Op->CMHPriorityHint.Length = Str.size();
2607 Op->StartLoc = S;
2608 Op->EndLoc = S;
2609 return Op;
2610 }
2611
2612 static std::unique_ptr<AArch64Operand>
2613 CreateTIndexHint(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2614 auto Op = std::make_unique<AArch64Operand>(args: k_TIndexHint, args&: Ctx);
2615 Op->TIndexHint.Val = Val;
2616 Op->TIndexHint.Data = Str.data();
2617 Op->TIndexHint.Length = Str.size();
2618 Op->StartLoc = S;
2619 Op->EndLoc = S;
2620 return Op;
2621 }
2622
2623 static std::unique_ptr<AArch64Operand>
2624 CreateMatrixRegister(MCRegister Reg, unsigned ElementWidth, MatrixKind Kind,
2625 SMLoc S, SMLoc E, MCContext &Ctx) {
2626 auto Op = std::make_unique<AArch64Operand>(args: k_MatrixRegister, args&: Ctx);
2627 Op->MatrixReg.Reg = Reg;
2628 Op->MatrixReg.ElementWidth = ElementWidth;
2629 Op->MatrixReg.Kind = Kind;
2630 Op->StartLoc = S;
2631 Op->EndLoc = E;
2632 return Op;
2633 }
2634
2635 static std::unique_ptr<AArch64Operand>
2636 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2637 auto Op = std::make_unique<AArch64Operand>(args: k_SVCR, args&: Ctx);
2638 Op->SVCR.PStateField = PStateField;
2639 Op->SVCR.Data = Str.data();
2640 Op->SVCR.Length = Str.size();
2641 Op->StartLoc = S;
2642 Op->EndLoc = S;
2643 return Op;
2644 }
2645
2646 static std::unique_ptr<AArch64Operand>
2647 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2648 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2649 auto Op = std::make_unique<AArch64Operand>(args: k_ShiftExtend, args&: Ctx);
2650 Op->ShiftExtend.Type = ShOp;
2651 Op->ShiftExtend.Amount = Val;
2652 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2653 Op->StartLoc = S;
2654 Op->EndLoc = E;
2655 return Op;
2656 }
2657};
2658
2659} // end anonymous namespace.
2660
2661void AArch64Operand::print(raw_ostream &OS, const MCAsmInfo &MAI) const {
2662 switch (Kind) {
2663 case k_FPImm:
2664 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2665 if (!getFPImmIsExact())
2666 OS << " (inexact)";
2667 OS << ">";
2668 break;
2669 case k_Barrier: {
2670 StringRef Name = getBarrierName();
2671 if (!Name.empty())
2672 OS << "<barrier " << Name << ">";
2673 else
2674 OS << "<barrier invalid #" << getBarrier() << ">";
2675 break;
2676 }
2677 case k_Immediate:
2678 MAI.printExpr(OS, *getImm());
2679 break;
2680 case k_ShiftedImm: {
2681 unsigned Shift = getShiftedImmShift();
2682 OS << "<shiftedimm ";
2683 MAI.printExpr(OS, *getShiftedImmVal());
2684 OS << ", lsl #" << AArch64_AM::getShiftValue(Imm: Shift) << ">";
2685 break;
2686 }
2687 case k_ImmRange: {
2688 OS << "<immrange ";
2689 OS << getFirstImmVal();
2690 OS << ":" << getLastImmVal() << ">";
2691 break;
2692 }
2693 case k_CondCode:
2694 OS << "<condcode " << getCondCode() << ">";
2695 break;
2696 case k_VectorList: {
2697 OS << "<vectorlist ";
2698 MCRegister Reg = getVectorListStart();
2699 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2700 OS << Reg.id() + i * getVectorListStride() << " ";
2701 OS << ">";
2702 break;
2703 }
2704 case k_VectorIndex:
2705 OS << "<vectorindex " << getVectorIndex() << ">";
2706 break;
2707 case k_SysReg:
2708 OS << "<sysreg: " << getSysReg() << '>';
2709 break;
2710 case k_Token:
2711 OS << "'" << getToken() << "'";
2712 break;
2713 case k_SysCR:
2714 OS << "c" << getSysCR();
2715 break;
2716 case k_Prefetch: {
2717 StringRef Name = getPrefetchName();
2718 if (!Name.empty())
2719 OS << "<prfop " << Name << ">";
2720 else
2721 OS << "<prfop invalid #" << getPrefetch() << ">";
2722 break;
2723 }
2724 case k_PSBHint:
2725 OS << getPSBHintName();
2726 break;
2727 case k_PHint:
2728 OS << getPHintName();
2729 break;
2730 case k_BTIHint:
2731 OS << getBTIHintName();
2732 break;
2733 case k_CMHPriorityHint:
2734 OS << getCMHPriorityHintName();
2735 break;
2736 case k_TIndexHint:
2737 OS << getTIndexHintName();
2738 break;
2739 case k_MatrixRegister:
2740 OS << "<matrix " << getMatrixReg().id() << ">";
2741 break;
2742 case k_MatrixTileList: {
2743 OS << "<matrixlist ";
2744 unsigned RegMask = getMatrixTileListRegMask();
2745 unsigned MaxBits = 8;
2746 for (unsigned I = MaxBits; I > 0; --I)
2747 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2748 OS << '>';
2749 break;
2750 }
2751 case k_SVCR: {
2752 OS << getSVCR();
2753 break;
2754 }
2755 case k_Register:
2756 OS << "<register " << getReg().id() << ">";
2757 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2758 break;
2759 [[fallthrough]];
2760 case k_ShiftExtend:
2761 OS << "<" << AArch64_AM::getShiftExtendName(ST: getShiftExtendType()) << " #"
2762 << getShiftExtendAmount();
2763 if (!hasShiftExtendAmount())
2764 OS << "<imp>";
2765 OS << '>';
2766 break;
2767 }
2768}
2769
2770/// @name Auto-generated Match Functions
2771/// {
2772
2773static MCRegister MatchRegisterName(StringRef Name);
2774
2775/// }
2776
2777static unsigned MatchNeonVectorRegName(StringRef Name) {
2778 return StringSwitch<unsigned>(Name.lower())
2779 .Case(S: "v0", Value: AArch64::Q0)
2780 .Case(S: "v1", Value: AArch64::Q1)
2781 .Case(S: "v2", Value: AArch64::Q2)
2782 .Case(S: "v3", Value: AArch64::Q3)
2783 .Case(S: "v4", Value: AArch64::Q4)
2784 .Case(S: "v5", Value: AArch64::Q5)
2785 .Case(S: "v6", Value: AArch64::Q6)
2786 .Case(S: "v7", Value: AArch64::Q7)
2787 .Case(S: "v8", Value: AArch64::Q8)
2788 .Case(S: "v9", Value: AArch64::Q9)
2789 .Case(S: "v10", Value: AArch64::Q10)
2790 .Case(S: "v11", Value: AArch64::Q11)
2791 .Case(S: "v12", Value: AArch64::Q12)
2792 .Case(S: "v13", Value: AArch64::Q13)
2793 .Case(S: "v14", Value: AArch64::Q14)
2794 .Case(S: "v15", Value: AArch64::Q15)
2795 .Case(S: "v16", Value: AArch64::Q16)
2796 .Case(S: "v17", Value: AArch64::Q17)
2797 .Case(S: "v18", Value: AArch64::Q18)
2798 .Case(S: "v19", Value: AArch64::Q19)
2799 .Case(S: "v20", Value: AArch64::Q20)
2800 .Case(S: "v21", Value: AArch64::Q21)
2801 .Case(S: "v22", Value: AArch64::Q22)
2802 .Case(S: "v23", Value: AArch64::Q23)
2803 .Case(S: "v24", Value: AArch64::Q24)
2804 .Case(S: "v25", Value: AArch64::Q25)
2805 .Case(S: "v26", Value: AArch64::Q26)
2806 .Case(S: "v27", Value: AArch64::Q27)
2807 .Case(S: "v28", Value: AArch64::Q28)
2808 .Case(S: "v29", Value: AArch64::Q29)
2809 .Case(S: "v30", Value: AArch64::Q30)
2810 .Case(S: "v31", Value: AArch64::Q31)
2811 .Default(Value: 0);
2812}
2813
2814/// Returns an optional pair of (#elements, element-width) if Suffix
2815/// is a valid vector kind. Where the number of elements in a vector
2816/// or the vector width is implicit or explicitly unknown (but still a
2817/// valid suffix kind), 0 is used.
2818static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2819 RegKind VectorKind) {
2820 std::pair<int, int> Res = {-1, -1};
2821
2822 switch (VectorKind) {
2823 case RegKind::NeonVector:
2824 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2825 .Case(S: "", Value: {0, 0})
2826 .Case(S: ".1d", Value: {1, 64})
2827 .Case(S: ".1q", Value: {1, 128})
2828 // '.2h' needed for fp16 scalar pairwise reductions
2829 .Case(S: ".2h", Value: {2, 16})
2830 .Case(S: ".2b", Value: {2, 8})
2831 .Case(S: ".2s", Value: {2, 32})
2832 .Case(S: ".2d", Value: {2, 64})
2833 // '.4b' is another special case for the ARMv8.2a dot product
2834 // operand
2835 .Case(S: ".4b", Value: {4, 8})
2836 .Case(S: ".4h", Value: {4, 16})
2837 .Case(S: ".4s", Value: {4, 32})
2838 .Case(S: ".8b", Value: {8, 8})
2839 .Case(S: ".8h", Value: {8, 16})
2840 .Case(S: ".16b", Value: {16, 8})
2841 // Accept the width neutral ones, too, for verbose syntax. If
2842 // those aren't used in the right places, the token operand won't
2843 // match so all will work out.
2844 .Case(S: ".b", Value: {0, 8})
2845 .Case(S: ".h", Value: {0, 16})
2846 .Case(S: ".s", Value: {0, 32})
2847 .Case(S: ".d", Value: {0, 64})
2848 .Default(Value: {-1, -1});
2849 break;
2850 case RegKind::SVEPredicateAsCounter:
2851 case RegKind::SVEPredicateVector:
2852 case RegKind::SVEDataVector:
2853 case RegKind::Matrix:
2854 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2855 .Case(S: "", Value: {0, 0})
2856 .Case(S: ".b", Value: {0, 8})
2857 .Case(S: ".h", Value: {0, 16})
2858 .Case(S: ".s", Value: {0, 32})
2859 .Case(S: ".d", Value: {0, 64})
2860 .Case(S: ".q", Value: {0, 128})
2861 .Default(Value: {-1, -1});
2862 break;
2863 default:
2864 llvm_unreachable("Unsupported RegKind");
2865 }
2866
2867 if (Res == std::make_pair(x: -1, y: -1))
2868 return std::nullopt;
2869
2870 return std::optional<std::pair<int, int>>(Res);
2871}
2872
2873static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2874 return parseVectorKind(Suffix, VectorKind).has_value();
2875}
2876
2877static unsigned matchSVEDataVectorRegName(StringRef Name) {
2878 return StringSwitch<unsigned>(Name.lower())
2879 .Case(S: "z0", Value: AArch64::Z0)
2880 .Case(S: "z1", Value: AArch64::Z1)
2881 .Case(S: "z2", Value: AArch64::Z2)
2882 .Case(S: "z3", Value: AArch64::Z3)
2883 .Case(S: "z4", Value: AArch64::Z4)
2884 .Case(S: "z5", Value: AArch64::Z5)
2885 .Case(S: "z6", Value: AArch64::Z6)
2886 .Case(S: "z7", Value: AArch64::Z7)
2887 .Case(S: "z8", Value: AArch64::Z8)
2888 .Case(S: "z9", Value: AArch64::Z9)
2889 .Case(S: "z10", Value: AArch64::Z10)
2890 .Case(S: "z11", Value: AArch64::Z11)
2891 .Case(S: "z12", Value: AArch64::Z12)
2892 .Case(S: "z13", Value: AArch64::Z13)
2893 .Case(S: "z14", Value: AArch64::Z14)
2894 .Case(S: "z15", Value: AArch64::Z15)
2895 .Case(S: "z16", Value: AArch64::Z16)
2896 .Case(S: "z17", Value: AArch64::Z17)
2897 .Case(S: "z18", Value: AArch64::Z18)
2898 .Case(S: "z19", Value: AArch64::Z19)
2899 .Case(S: "z20", Value: AArch64::Z20)
2900 .Case(S: "z21", Value: AArch64::Z21)
2901 .Case(S: "z22", Value: AArch64::Z22)
2902 .Case(S: "z23", Value: AArch64::Z23)
2903 .Case(S: "z24", Value: AArch64::Z24)
2904 .Case(S: "z25", Value: AArch64::Z25)
2905 .Case(S: "z26", Value: AArch64::Z26)
2906 .Case(S: "z27", Value: AArch64::Z27)
2907 .Case(S: "z28", Value: AArch64::Z28)
2908 .Case(S: "z29", Value: AArch64::Z29)
2909 .Case(S: "z30", Value: AArch64::Z30)
2910 .Case(S: "z31", Value: AArch64::Z31)
2911 .Default(Value: 0);
2912}
2913
2914static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2915 return StringSwitch<unsigned>(Name.lower())
2916 .Case(S: "p0", Value: AArch64::P0)
2917 .Case(S: "p1", Value: AArch64::P1)
2918 .Case(S: "p2", Value: AArch64::P2)
2919 .Case(S: "p3", Value: AArch64::P3)
2920 .Case(S: "p4", Value: AArch64::P4)
2921 .Case(S: "p5", Value: AArch64::P5)
2922 .Case(S: "p6", Value: AArch64::P6)
2923 .Case(S: "p7", Value: AArch64::P7)
2924 .Case(S: "p8", Value: AArch64::P8)
2925 .Case(S: "p9", Value: AArch64::P9)
2926 .Case(S: "p10", Value: AArch64::P10)
2927 .Case(S: "p11", Value: AArch64::P11)
2928 .Case(S: "p12", Value: AArch64::P12)
2929 .Case(S: "p13", Value: AArch64::P13)
2930 .Case(S: "p14", Value: AArch64::P14)
2931 .Case(S: "p15", Value: AArch64::P15)
2932 .Default(Value: 0);
2933}
2934
2935static unsigned matchSVEPredicateAsCounterRegName(StringRef Name) {
2936 return StringSwitch<unsigned>(Name.lower())
2937 .Case(S: "pn0", Value: AArch64::PN0)
2938 .Case(S: "pn1", Value: AArch64::PN1)
2939 .Case(S: "pn2", Value: AArch64::PN2)
2940 .Case(S: "pn3", Value: AArch64::PN3)
2941 .Case(S: "pn4", Value: AArch64::PN4)
2942 .Case(S: "pn5", Value: AArch64::PN5)
2943 .Case(S: "pn6", Value: AArch64::PN6)
2944 .Case(S: "pn7", Value: AArch64::PN7)
2945 .Case(S: "pn8", Value: AArch64::PN8)
2946 .Case(S: "pn9", Value: AArch64::PN9)
2947 .Case(S: "pn10", Value: AArch64::PN10)
2948 .Case(S: "pn11", Value: AArch64::PN11)
2949 .Case(S: "pn12", Value: AArch64::PN12)
2950 .Case(S: "pn13", Value: AArch64::PN13)
2951 .Case(S: "pn14", Value: AArch64::PN14)
2952 .Case(S: "pn15", Value: AArch64::PN15)
2953 .Default(Value: 0);
2954}
2955
2956static unsigned matchMatrixTileListRegName(StringRef Name) {
2957 return StringSwitch<unsigned>(Name.lower())
2958 .Case(S: "za0.d", Value: AArch64::ZAD0)
2959 .Case(S: "za1.d", Value: AArch64::ZAD1)
2960 .Case(S: "za2.d", Value: AArch64::ZAD2)
2961 .Case(S: "za3.d", Value: AArch64::ZAD3)
2962 .Case(S: "za4.d", Value: AArch64::ZAD4)
2963 .Case(S: "za5.d", Value: AArch64::ZAD5)
2964 .Case(S: "za6.d", Value: AArch64::ZAD6)
2965 .Case(S: "za7.d", Value: AArch64::ZAD7)
2966 .Case(S: "za0.s", Value: AArch64::ZAS0)
2967 .Case(S: "za1.s", Value: AArch64::ZAS1)
2968 .Case(S: "za2.s", Value: AArch64::ZAS2)
2969 .Case(S: "za3.s", Value: AArch64::ZAS3)
2970 .Case(S: "za0.h", Value: AArch64::ZAH0)
2971 .Case(S: "za1.h", Value: AArch64::ZAH1)
2972 .Case(S: "za0.b", Value: AArch64::ZAB0)
2973 .Default(Value: 0);
2974}
2975
2976static unsigned matchMatrixRegName(StringRef Name) {
2977 return StringSwitch<unsigned>(Name.lower())
2978 .Case(S: "za", Value: AArch64::ZA)
2979 .Case(S: "za0.q", Value: AArch64::ZAQ0)
2980 .Case(S: "za1.q", Value: AArch64::ZAQ1)
2981 .Case(S: "za2.q", Value: AArch64::ZAQ2)
2982 .Case(S: "za3.q", Value: AArch64::ZAQ3)
2983 .Case(S: "za4.q", Value: AArch64::ZAQ4)
2984 .Case(S: "za5.q", Value: AArch64::ZAQ5)
2985 .Case(S: "za6.q", Value: AArch64::ZAQ6)
2986 .Case(S: "za7.q", Value: AArch64::ZAQ7)
2987 .Case(S: "za8.q", Value: AArch64::ZAQ8)
2988 .Case(S: "za9.q", Value: AArch64::ZAQ9)
2989 .Case(S: "za10.q", Value: AArch64::ZAQ10)
2990 .Case(S: "za11.q", Value: AArch64::ZAQ11)
2991 .Case(S: "za12.q", Value: AArch64::ZAQ12)
2992 .Case(S: "za13.q", Value: AArch64::ZAQ13)
2993 .Case(S: "za14.q", Value: AArch64::ZAQ14)
2994 .Case(S: "za15.q", Value: AArch64::ZAQ15)
2995 .Case(S: "za0.d", Value: AArch64::ZAD0)
2996 .Case(S: "za1.d", Value: AArch64::ZAD1)
2997 .Case(S: "za2.d", Value: AArch64::ZAD2)
2998 .Case(S: "za3.d", Value: AArch64::ZAD3)
2999 .Case(S: "za4.d", Value: AArch64::ZAD4)
3000 .Case(S: "za5.d", Value: AArch64::ZAD5)
3001 .Case(S: "za6.d", Value: AArch64::ZAD6)
3002 .Case(S: "za7.d", Value: AArch64::ZAD7)
3003 .Case(S: "za0.s", Value: AArch64::ZAS0)
3004 .Case(S: "za1.s", Value: AArch64::ZAS1)
3005 .Case(S: "za2.s", Value: AArch64::ZAS2)
3006 .Case(S: "za3.s", Value: AArch64::ZAS3)
3007 .Case(S: "za0.h", Value: AArch64::ZAH0)
3008 .Case(S: "za1.h", Value: AArch64::ZAH1)
3009 .Case(S: "za0.b", Value: AArch64::ZAB0)
3010 .Case(S: "za0h.q", Value: AArch64::ZAQ0)
3011 .Case(S: "za1h.q", Value: AArch64::ZAQ1)
3012 .Case(S: "za2h.q", Value: AArch64::ZAQ2)
3013 .Case(S: "za3h.q", Value: AArch64::ZAQ3)
3014 .Case(S: "za4h.q", Value: AArch64::ZAQ4)
3015 .Case(S: "za5h.q", Value: AArch64::ZAQ5)
3016 .Case(S: "za6h.q", Value: AArch64::ZAQ6)
3017 .Case(S: "za7h.q", Value: AArch64::ZAQ7)
3018 .Case(S: "za8h.q", Value: AArch64::ZAQ8)
3019 .Case(S: "za9h.q", Value: AArch64::ZAQ9)
3020 .Case(S: "za10h.q", Value: AArch64::ZAQ10)
3021 .Case(S: "za11h.q", Value: AArch64::ZAQ11)
3022 .Case(S: "za12h.q", Value: AArch64::ZAQ12)
3023 .Case(S: "za13h.q", Value: AArch64::ZAQ13)
3024 .Case(S: "za14h.q", Value: AArch64::ZAQ14)
3025 .Case(S: "za15h.q", Value: AArch64::ZAQ15)
3026 .Case(S: "za0h.d", Value: AArch64::ZAD0)
3027 .Case(S: "za1h.d", Value: AArch64::ZAD1)
3028 .Case(S: "za2h.d", Value: AArch64::ZAD2)
3029 .Case(S: "za3h.d", Value: AArch64::ZAD3)
3030 .Case(S: "za4h.d", Value: AArch64::ZAD4)
3031 .Case(S: "za5h.d", Value: AArch64::ZAD5)
3032 .Case(S: "za6h.d", Value: AArch64::ZAD6)
3033 .Case(S: "za7h.d", Value: AArch64::ZAD7)
3034 .Case(S: "za0h.s", Value: AArch64::ZAS0)
3035 .Case(S: "za1h.s", Value: AArch64::ZAS1)
3036 .Case(S: "za2h.s", Value: AArch64::ZAS2)
3037 .Case(S: "za3h.s", Value: AArch64::ZAS3)
3038 .Case(S: "za0h.h", Value: AArch64::ZAH0)
3039 .Case(S: "za1h.h", Value: AArch64::ZAH1)
3040 .Case(S: "za0h.b", Value: AArch64::ZAB0)
3041 .Case(S: "za0v.q", Value: AArch64::ZAQ0)
3042 .Case(S: "za1v.q", Value: AArch64::ZAQ1)
3043 .Case(S: "za2v.q", Value: AArch64::ZAQ2)
3044 .Case(S: "za3v.q", Value: AArch64::ZAQ3)
3045 .Case(S: "za4v.q", Value: AArch64::ZAQ4)
3046 .Case(S: "za5v.q", Value: AArch64::ZAQ5)
3047 .Case(S: "za6v.q", Value: AArch64::ZAQ6)
3048 .Case(S: "za7v.q", Value: AArch64::ZAQ7)
3049 .Case(S: "za8v.q", Value: AArch64::ZAQ8)
3050 .Case(S: "za9v.q", Value: AArch64::ZAQ9)
3051 .Case(S: "za10v.q", Value: AArch64::ZAQ10)
3052 .Case(S: "za11v.q", Value: AArch64::ZAQ11)
3053 .Case(S: "za12v.q", Value: AArch64::ZAQ12)
3054 .Case(S: "za13v.q", Value: AArch64::ZAQ13)
3055 .Case(S: "za14v.q", Value: AArch64::ZAQ14)
3056 .Case(S: "za15v.q", Value: AArch64::ZAQ15)
3057 .Case(S: "za0v.d", Value: AArch64::ZAD0)
3058 .Case(S: "za1v.d", Value: AArch64::ZAD1)
3059 .Case(S: "za2v.d", Value: AArch64::ZAD2)
3060 .Case(S: "za3v.d", Value: AArch64::ZAD3)
3061 .Case(S: "za4v.d", Value: AArch64::ZAD4)
3062 .Case(S: "za5v.d", Value: AArch64::ZAD5)
3063 .Case(S: "za6v.d", Value: AArch64::ZAD6)
3064 .Case(S: "za7v.d", Value: AArch64::ZAD7)
3065 .Case(S: "za0v.s", Value: AArch64::ZAS0)
3066 .Case(S: "za1v.s", Value: AArch64::ZAS1)
3067 .Case(S: "za2v.s", Value: AArch64::ZAS2)
3068 .Case(S: "za3v.s", Value: AArch64::ZAS3)
3069 .Case(S: "za0v.h", Value: AArch64::ZAH0)
3070 .Case(S: "za1v.h", Value: AArch64::ZAH1)
3071 .Case(S: "za0v.b", Value: AArch64::ZAB0)
3072 .Default(Value: 0);
3073}
3074
3075bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
3076 SMLoc &EndLoc) {
3077 return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess();
3078}
3079
3080ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
3081 SMLoc &EndLoc) {
3082 StartLoc = getLoc();
3083 ParseStatus Res = tryParseScalarRegister(Reg);
3084 EndLoc = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
3085 return Res;
3086}
3087
3088// Matches a register name or register alias previously defined by '.req'
3089MCRegister AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
3090 RegKind Kind) {
3091 MCRegister Reg = MCRegister();
3092 if ((Reg = matchSVEDataVectorRegName(Name)))
3093 return Kind == RegKind::SVEDataVector ? Reg : MCRegister();
3094
3095 if ((Reg = matchSVEPredicateVectorRegName(Name)))
3096 return Kind == RegKind::SVEPredicateVector ? Reg : MCRegister();
3097
3098 if ((Reg = matchSVEPredicateAsCounterRegName(Name)))
3099 return Kind == RegKind::SVEPredicateAsCounter ? Reg : MCRegister();
3100
3101 if ((Reg = MatchNeonVectorRegName(Name)))
3102 return Kind == RegKind::NeonVector ? Reg : MCRegister();
3103
3104 if ((Reg = matchMatrixRegName(Name)))
3105 return Kind == RegKind::Matrix ? Reg : MCRegister();
3106
3107 if (Name.equals_insensitive(RHS: "zt0"))
3108 return Kind == RegKind::LookupTable ? unsigned(AArch64::ZT0) : 0;
3109
3110 // The parsed register must be of RegKind Scalar
3111 if ((Reg = MatchRegisterName(Name)))
3112 return (Kind == RegKind::Scalar) ? Reg : MCRegister();
3113
3114 if (!Reg) {
3115 // Handle a few common aliases of registers.
3116 if (MCRegister Reg = StringSwitch<unsigned>(Name.lower())
3117 .Case(S: "fp", Value: AArch64::FP)
3118 .Case(S: "lr", Value: AArch64::LR)
3119 .Case(S: "x31", Value: AArch64::XZR)
3120 .Case(S: "w31", Value: AArch64::WZR)
3121 .Default(Value: 0))
3122 return Kind == RegKind::Scalar ? Reg : MCRegister();
3123
3124 // Check for aliases registered via .req. Canonicalize to lower case.
3125 // That's more consistent since register names are case insensitive, and
3126 // it's how the original entry was passed in from MC/MCParser/AsmParser.
3127 auto Entry = RegisterReqs.find(Key: Name.lower());
3128 if (Entry == RegisterReqs.end())
3129 return MCRegister();
3130
3131 // set Reg if the match is the right kind of register
3132 if (Kind == Entry->getValue().first)
3133 Reg = Entry->getValue().second;
3134 }
3135 return Reg;
3136}
3137
3138unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
3139 switch (K) {
3140 case RegKind::Scalar:
3141 case RegKind::NeonVector:
3142 case RegKind::SVEDataVector:
3143 return 32;
3144 case RegKind::Matrix:
3145 case RegKind::SVEPredicateVector:
3146 case RegKind::SVEPredicateAsCounter:
3147 return 16;
3148 case RegKind::LookupTable:
3149 return 1;
3150 }
3151 llvm_unreachable("Unsupported RegKind");
3152}
3153
3154/// tryParseScalarRegister - Try to parse a register name. The token must be an
3155/// Identifier when called, and if it is a register name the token is eaten and
3156/// the register is added to the operand list.
3157ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
3158 const AsmToken &Tok = getTok();
3159 if (Tok.isNot(K: AsmToken::Identifier))
3160 return ParseStatus::NoMatch;
3161
3162 std::string lowerCase = Tok.getString().lower();
3163 MCRegister Reg = matchRegisterNameAlias(Name: lowerCase, Kind: RegKind::Scalar);
3164 if (!Reg)
3165 return ParseStatus::NoMatch;
3166
3167 RegNum = Reg;
3168 Lex(); // Eat identifier token.
3169 return ParseStatus::Success;
3170}
3171
3172/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
3173ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
3174 SMLoc S = getLoc();
3175
3176 if (getTok().isNot(K: AsmToken::Identifier))
3177 return Error(L: S, Msg: "Expected cN operand where 0 <= N <= 15");
3178
3179 StringRef Tok = getTok().getIdentifier();
3180 if (Tok[0] != 'c' && Tok[0] != 'C')
3181 return Error(L: S, Msg: "Expected cN operand where 0 <= N <= 15");
3182
3183 uint32_t CRNum;
3184 bool BadNum = Tok.drop_front().getAsInteger(Radix: 10, Result&: CRNum);
3185 if (BadNum || CRNum > 15)
3186 return Error(L: S, Msg: "Expected cN operand where 0 <= N <= 15");
3187
3188 Lex(); // Eat identifier token.
3189 Operands.push_back(
3190 Elt: AArch64Operand::CreateSysCR(Val: CRNum, S, E: getLoc(), Ctx&: getContext()));
3191 return ParseStatus::Success;
3192}
3193
3194// Either an identifier for named values or a 6-bit immediate.
3195ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
3196 SMLoc S = getLoc();
3197 const AsmToken &Tok = getTok();
3198
3199 unsigned MaxVal = 63;
3200
3201 // Immediate case, with optional leading hash:
3202 if (parseOptionalToken(T: AsmToken::Hash) ||
3203 Tok.is(K: AsmToken::Integer)) {
3204 const MCExpr *ImmVal;
3205 if (getParser().parseExpression(Res&: ImmVal))
3206 return ParseStatus::Failure;
3207
3208 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
3209 if (!MCE)
3210 return TokError(Msg: "immediate value expected for prefetch operand");
3211 unsigned prfop = MCE->getValue();
3212 if (prfop > MaxVal)
3213 return TokError(Msg: "prefetch operand out of range, [0," + utostr(X: MaxVal) +
3214 "] expected");
3215
3216 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(Encoding: MCE->getValue());
3217 Operands.push_back(Elt: AArch64Operand::CreatePrefetch(
3218 Val: prfop, Str: RPRFM ? RPRFM->Name : "", S, Ctx&: getContext()));
3219 return ParseStatus::Success;
3220 }
3221
3222 if (Tok.isNot(K: AsmToken::Identifier))
3223 return TokError(Msg: "prefetch hint expected");
3224
3225 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Name: Tok.getString());
3226 if (!RPRFM)
3227 return TokError(Msg: "prefetch hint expected");
3228
3229 Operands.push_back(Elt: AArch64Operand::CreatePrefetch(
3230 Val: RPRFM->Encoding, Str: Tok.getString(), S, Ctx&: getContext()));
3231 Lex(); // Eat identifier token.
3232 return ParseStatus::Success;
3233}
3234
3235/// tryParsePrefetch - Try to parse a prefetch operand.
3236template <bool IsSVEPrefetch>
3237ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3238 SMLoc S = getLoc();
3239 const AsmToken &Tok = getTok();
3240
3241 auto LookupByName = [](StringRef N) {
3242 if (IsSVEPrefetch) {
3243 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(Name: N))
3244 return std::optional<unsigned>(Res->Encoding);
3245 } else if (auto Res = AArch64PRFM::lookupPRFMByName(Name: N))
3246 return std::optional<unsigned>(Res->Encoding);
3247 return std::optional<unsigned>();
3248 };
3249
3250 auto LookupByEncoding = [](unsigned E) {
3251 if (IsSVEPrefetch) {
3252 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(Encoding: E))
3253 return std::optional<StringRef>(Res->Name);
3254 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(Encoding: E))
3255 return std::optional<StringRef>(Res->Name);
3256 return std::optional<StringRef>();
3257 };
3258 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3259
3260 // Either an identifier for named values or a 5-bit immediate.
3261 // Eat optional hash.
3262 if (parseOptionalToken(T: AsmToken::Hash) ||
3263 Tok.is(K: AsmToken::Integer)) {
3264 const MCExpr *ImmVal;
3265 if (getParser().parseExpression(Res&: ImmVal))
3266 return ParseStatus::Failure;
3267
3268 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
3269 if (!MCE)
3270 return TokError(Msg: "immediate value expected for prefetch operand");
3271 unsigned prfop = MCE->getValue();
3272 if (prfop > MaxVal)
3273 return TokError(Msg: "prefetch operand out of range, [0," + utostr(X: MaxVal) +
3274 "] expected");
3275
3276 auto PRFM = LookupByEncoding(MCE->getValue());
3277 Operands.push_back(AArch64Operand::CreatePrefetch(Val: prfop, Str: PRFM.value_or(""),
3278 S, Ctx&: getContext()));
3279 return ParseStatus::Success;
3280 }
3281
3282 if (Tok.isNot(K: AsmToken::Identifier))
3283 return TokError(Msg: "prefetch hint expected");
3284
3285 auto PRFM = LookupByName(Tok.getString());
3286 if (!PRFM)
3287 return TokError(Msg: "prefetch hint expected");
3288
3289 Operands.push_back(AArch64Operand::CreatePrefetch(
3290 Val: *PRFM, Str: Tok.getString(), S, Ctx&: getContext()));
3291 Lex(); // Eat identifier token.
3292 return ParseStatus::Success;
3293}
3294
3295/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3296ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3297 SMLoc S = getLoc();
3298 const AsmToken &Tok = getTok();
3299 if (Tok.isNot(K: AsmToken::Identifier))
3300 return TokError(Msg: "invalid operand for instruction");
3301
3302 auto PSB = AArch64PSBHint::lookupPSBByName(Name: Tok.getString());
3303 if (!PSB)
3304 return TokError(Msg: "invalid operand for instruction");
3305
3306 Operands.push_back(Elt: AArch64Operand::CreatePSBHint(
3307 Val: PSB->Encoding, Str: Tok.getString(), S, Ctx&: getContext()));
3308 Lex(); // Eat identifier token.
3309 return ParseStatus::Success;
3310}
3311
3312ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3313 SMLoc StartLoc = getLoc();
3314
3315 MCRegister RegNum;
3316
3317 // The case where xzr, xzr is not present is handled by an InstAlias.
3318
3319 auto RegTok = getTok(); // in case we need to backtrack
3320 if (!tryParseScalarRegister(RegNum).isSuccess())
3321 return ParseStatus::NoMatch;
3322
3323 if (RegNum != AArch64::XZR) {
3324 getLexer().UnLex(Token: RegTok);
3325 return ParseStatus::NoMatch;
3326 }
3327
3328 if (parseComma())
3329 return ParseStatus::Failure;
3330
3331 if (!tryParseScalarRegister(RegNum).isSuccess())
3332 return TokError(Msg: "expected register operand");
3333
3334 if (RegNum != AArch64::XZR)
3335 return TokError(Msg: "xzr must be followed by xzr");
3336
3337 // We need to push something, since we claim this is an operand in .td.
3338 // See also AArch64AsmParser::parseKeywordOperand.
3339 Operands.push_back(Elt: AArch64Operand::CreateReg(
3340 Reg: RegNum, Kind: RegKind::Scalar, S: StartLoc, E: getLoc(), Ctx&: getContext()));
3341
3342 return ParseStatus::Success;
3343}
3344
3345/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3346ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3347 SMLoc S = getLoc();
3348 const AsmToken &Tok = getTok();
3349 if (Tok.isNot(K: AsmToken::Identifier))
3350 return TokError(Msg: "invalid operand for instruction");
3351
3352 auto BTI = AArch64BTIHint::lookupBTIByName(Name: Tok.getString());
3353 if (!BTI)
3354 return TokError(Msg: "invalid operand for instruction");
3355
3356 Operands.push_back(Elt: AArch64Operand::CreateBTIHint(
3357 Val: BTI->Encoding, Str: Tok.getString(), S, Ctx&: getContext()));
3358 Lex(); // Eat identifier token.
3359 return ParseStatus::Success;
3360}
3361
3362/// tryParseCMHPriorityHint - Try to parse a CMHPriority operand
3363ParseStatus AArch64AsmParser::tryParseCMHPriorityHint(OperandVector &Operands) {
3364 SMLoc S = getLoc();
3365 const AsmToken &Tok = getTok();
3366 if (Tok.isNot(K: AsmToken::Identifier))
3367 return TokError(Msg: "invalid operand for instruction");
3368
3369 auto CMHPriority =
3370 AArch64CMHPriorityHint::lookupCMHPriorityHintByName(Name: Tok.getString());
3371 if (!CMHPriority)
3372 return TokError(Msg: "invalid operand for instruction");
3373
3374 Operands.push_back(Elt: AArch64Operand::CreateCMHPriorityHint(
3375 Val: CMHPriority->Encoding, Str: Tok.getString(), S, Ctx&: getContext()));
3376 Lex(); // Eat identifier token.
3377 return ParseStatus::Success;
3378}
3379
3380/// tryParseTIndexHint - Try to parse a TIndex operand
3381ParseStatus AArch64AsmParser::tryParseTIndexHint(OperandVector &Operands) {
3382 SMLoc S = getLoc();
3383 const AsmToken &Tok = getTok();
3384 if (Tok.isNot(K: AsmToken::Identifier))
3385 return TokError(Msg: "invalid operand for instruction");
3386
3387 auto TIndex = AArch64TIndexHint::lookupTIndexByName(Name: Tok.getString());
3388 if (!TIndex)
3389 return TokError(Msg: "invalid operand for instruction");
3390
3391 Operands.push_back(Elt: AArch64Operand::CreateTIndexHint(
3392 Val: TIndex->Encoding, Str: Tok.getString(), S, Ctx&: getContext()));
3393 Lex(); // Eat identifier token.
3394 return ParseStatus::Success;
3395}
3396
3397/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3398/// instruction.
3399ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3400 SMLoc S = getLoc();
3401 const MCExpr *Expr = nullptr;
3402
3403 if (getTok().is(K: AsmToken::Hash)) {
3404 Lex(); // Eat hash token.
3405 }
3406
3407 if (parseSymbolicImmVal(ImmVal&: Expr))
3408 return ParseStatus::Failure;
3409
3410 AArch64::Specifier ELFSpec;
3411 AArch64::Specifier DarwinSpec;
3412 int64_t Addend;
3413 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3414 if (DarwinSpec == AArch64::S_None && ELFSpec == AArch64::S_INVALID) {
3415 // No modifier was specified at all; this is the syntax for an ELF basic
3416 // ADRP relocation (unfortunately).
3417 Expr =
3418 MCSpecifierExpr::create(Expr, S: AArch64::S_ABS_PAGE, Ctx&: getContext(), Loc: S);
3419 } else if ((DarwinSpec == AArch64::S_MACHO_GOTPAGE ||
3420 DarwinSpec == AArch64::S_MACHO_TLVPPAGE) &&
3421 Addend != 0) {
3422 return Error(L: S, Msg: "gotpage label reference not allowed an addend");
3423 } else if (DarwinSpec != AArch64::S_MACHO_PAGE &&
3424 DarwinSpec != AArch64::S_MACHO_GOTPAGE &&
3425 DarwinSpec != AArch64::S_MACHO_TLVPPAGE &&
3426 ELFSpec != AArch64::S_ABS_PAGE_NC &&
3427 ELFSpec != AArch64::S_GOT_PAGE &&
3428 ELFSpec != AArch64::S_GOT_AUTH_PAGE &&
3429 ELFSpec != AArch64::S_GOT_PAGE_LO15 &&
3430 ELFSpec != AArch64::S_GOTTPREL_PAGE &&
3431 ELFSpec != AArch64::S_TLSDESC_PAGE &&
3432 ELFSpec != AArch64::S_TLSDESC_AUTH_PAGE) {
3433 // The operand must be an @page or @gotpage qualified symbolref.
3434 return Error(L: S, Msg: "page or gotpage label reference expected");
3435 }
3436 }
3437
3438 // We have either a label reference possibly with addend or an immediate. The
3439 // addend is a raw value here. The linker will adjust it to only reference the
3440 // page.
3441 SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
3442 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: Expr, S, E, Ctx&: getContext()));
3443
3444 return ParseStatus::Success;
3445}
3446
3447/// tryParseAdrLabel - Parse and validate a source label for the ADR
3448/// instruction.
3449ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3450 SMLoc S = getLoc();
3451 const MCExpr *Expr = nullptr;
3452
3453 // Leave anything with a bracket to the default for SVE
3454 if (getTok().is(K: AsmToken::LBrac))
3455 return ParseStatus::NoMatch;
3456
3457 if (getTok().is(K: AsmToken::Hash))
3458 Lex(); // Eat hash token.
3459
3460 if (parseSymbolicImmVal(ImmVal&: Expr))
3461 return ParseStatus::Failure;
3462
3463 AArch64::Specifier ELFSpec;
3464 AArch64::Specifier DarwinSpec;
3465 int64_t Addend;
3466 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3467 if (DarwinSpec == AArch64::S_None && ELFSpec == AArch64::S_INVALID) {
3468 // No modifier was specified at all; this is the syntax for an ELF basic
3469 // ADR relocation (unfortunately).
3470 Expr = MCSpecifierExpr::create(Expr, S: AArch64::S_ABS, Ctx&: getContext(), Loc: S);
3471 } else if (ELFSpec != AArch64::S_GOT_AUTH_PAGE) {
3472 // For tiny code model, we use :got_auth: operator to fill 21-bit imm of
3473 // adr. It's not actually GOT entry page address but the GOT address
3474 // itself - we just share the same variant kind with :got_auth: operator
3475 // applied for adrp.
3476 // TODO: can we somehow get current TargetMachine object to call
3477 // getCodeModel() on it to ensure we are using tiny code model?
3478 return Error(L: S, Msg: "unexpected adr label");
3479 }
3480 }
3481
3482 SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
3483 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: Expr, S, E, Ctx&: getContext()));
3484 return ParseStatus::Success;
3485}
3486
3487/// tryParseFPImm - A floating point immediate expression operand.
3488template <bool AddFPZeroAsLiteral>
3489ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3490 SMLoc S = getLoc();
3491
3492 bool Hash = parseOptionalToken(T: AsmToken::Hash);
3493
3494 // Handle negation, as that still comes through as a separate token.
3495 bool isNegative = parseOptionalToken(T: AsmToken::Minus);
3496
3497 const AsmToken &Tok = getTok();
3498 if (!Tok.is(K: AsmToken::Real) && !Tok.is(K: AsmToken::Integer)) {
3499 if (!Hash)
3500 return ParseStatus::NoMatch;
3501 return TokError(Msg: "invalid floating point immediate");
3502 }
3503
3504 // Parse hexadecimal representation.
3505 if (Tok.is(K: AsmToken::Integer) && Tok.getString().starts_with(Prefix: "0x")) {
3506 if (Tok.getIntVal() > 255 || isNegative)
3507 return TokError(Msg: "encoded floating point value out of range");
3508
3509 APFloat F((double)AArch64_AM::getFPImmFloat(Imm: Tok.getIntVal()));
3510 Operands.push_back(
3511 Elt: AArch64Operand::CreateFPImm(Val: F, IsExact: true, S, Ctx&: getContext()));
3512 } else {
3513 // Parse FP representation.
3514 APFloat RealVal(APFloat::IEEEdouble());
3515 auto StatusOrErr =
3516 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3517 if (errorToBool(Err: StatusOrErr.takeError()))
3518 return TokError(Msg: "invalid floating point representation");
3519
3520 if (isNegative)
3521 RealVal.changeSign();
3522
3523 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3524 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: "#0", S, Ctx&: getContext()));
3525 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: ".0", S, Ctx&: getContext()));
3526 } else
3527 Operands.push_back(Elt: AArch64Operand::CreateFPImm(
3528 Val: RealVal, IsExact: *StatusOrErr == APFloat::opOK, S, Ctx&: getContext()));
3529 }
3530
3531 Lex(); // Eat the token.
3532
3533 return ParseStatus::Success;
3534}
3535
3536/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3537/// a shift suffix, for example '#1, lsl #12'.
3538ParseStatus
3539AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3540 SMLoc S = getLoc();
3541
3542 if (getTok().is(K: AsmToken::Hash))
3543 Lex(); // Eat '#'
3544 else if (getTok().isNot(K: AsmToken::Integer))
3545 // Operand should start from # or should be integer, emit error otherwise.
3546 return ParseStatus::NoMatch;
3547
3548 if (getTok().is(K: AsmToken::Integer) &&
3549 getLexer().peekTok().is(K: AsmToken::Colon))
3550 return tryParseImmRange(Operands);
3551
3552 const MCExpr *Imm = nullptr;
3553 if (parseSymbolicImmVal(ImmVal&: Imm))
3554 return ParseStatus::Failure;
3555 else if (getTok().isNot(K: AsmToken::Comma)) {
3556 Operands.push_back(
3557 Elt: AArch64Operand::CreateImm(Val: Imm, S, E: getLoc(), Ctx&: getContext()));
3558 return ParseStatus::Success;
3559 }
3560
3561 // Eat ','
3562 Lex();
3563 StringRef VecGroup;
3564 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3565 Operands.push_back(
3566 Elt: AArch64Operand::CreateImm(Val: Imm, S, E: getLoc(), Ctx&: getContext()));
3567 Operands.push_back(
3568 Elt: AArch64Operand::CreateToken(Str: VecGroup, S: getLoc(), Ctx&: getContext()));
3569 return ParseStatus::Success;
3570 }
3571
3572 // The optional operand must be "lsl #N" where N is non-negative.
3573 if (!getTok().is(K: AsmToken::Identifier) ||
3574 !getTok().getIdentifier().equals_insensitive(RHS: "lsl"))
3575 return Error(L: getLoc(), Msg: "only 'lsl #+N' valid after immediate");
3576
3577 // Eat 'lsl'
3578 Lex();
3579
3580 parseOptionalToken(T: AsmToken::Hash);
3581
3582 if (getTok().isNot(K: AsmToken::Integer))
3583 return Error(L: getLoc(), Msg: "only 'lsl #+N' valid after immediate");
3584
3585 int64_t ShiftAmount = getTok().getIntVal();
3586
3587 if (ShiftAmount < 0)
3588 return Error(L: getLoc(), Msg: "positive shift amount required");
3589 Lex(); // Eat the number
3590
3591 // Just in case the optional lsl #0 is used for immediates other than zero.
3592 if (ShiftAmount == 0 && Imm != nullptr) {
3593 Operands.push_back(
3594 Elt: AArch64Operand::CreateImm(Val: Imm, S, E: getLoc(), Ctx&: getContext()));
3595 return ParseStatus::Success;
3596 }
3597
3598 Operands.push_back(Elt: AArch64Operand::CreateShiftedImm(Val: Imm, ShiftAmount, S,
3599 E: getLoc(), Ctx&: getContext()));
3600 return ParseStatus::Success;
3601}
3602
3603/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3604/// suggestion to help common typos.
3605AArch64CC::CondCode
3606AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3607 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3608 .Case(S: "eq", Value: AArch64CC::EQ)
3609 .Case(S: "ne", Value: AArch64CC::NE)
3610 .Case(S: "cs", Value: AArch64CC::HS)
3611 .Case(S: "hs", Value: AArch64CC::HS)
3612 .Case(S: "cc", Value: AArch64CC::LO)
3613 .Case(S: "lo", Value: AArch64CC::LO)
3614 .Case(S: "mi", Value: AArch64CC::MI)
3615 .Case(S: "pl", Value: AArch64CC::PL)
3616 .Case(S: "vs", Value: AArch64CC::VS)
3617 .Case(S: "vc", Value: AArch64CC::VC)
3618 .Case(S: "hi", Value: AArch64CC::HI)
3619 .Case(S: "ls", Value: AArch64CC::LS)
3620 .Case(S: "ge", Value: AArch64CC::GE)
3621 .Case(S: "lt", Value: AArch64CC::LT)
3622 .Case(S: "gt", Value: AArch64CC::GT)
3623 .Case(S: "le", Value: AArch64CC::LE)
3624 .Case(S: "al", Value: AArch64CC::AL)
3625 .Case(S: "nv", Value: AArch64CC::NV)
3626 // SVE condition code aliases:
3627 .Case(S: "none", Value: AArch64CC::EQ)
3628 .Case(S: "any", Value: AArch64CC::NE)
3629 .Case(S: "nlast", Value: AArch64CC::HS)
3630 .Case(S: "last", Value: AArch64CC::LO)
3631 .Case(S: "first", Value: AArch64CC::MI)
3632 .Case(S: "nfrst", Value: AArch64CC::PL)
3633 .Case(S: "pmore", Value: AArch64CC::HI)
3634 .Case(S: "plast", Value: AArch64CC::LS)
3635 .Case(S: "tcont", Value: AArch64CC::GE)
3636 .Case(S: "tstop", Value: AArch64CC::LT)
3637 .Default(Value: AArch64CC::Invalid);
3638
3639 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3640 Suggestion = "nfrst";
3641
3642 return CC;
3643}
3644
3645/// parseCondCode - Parse a Condition Code operand.
3646bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3647 bool invertCondCode) {
3648 SMLoc S = getLoc();
3649 const AsmToken &Tok = getTok();
3650 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3651
3652 StringRef Cond = Tok.getString();
3653 std::string Suggestion;
3654 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3655 if (CC == AArch64CC::Invalid) {
3656 std::string Msg = "invalid condition code";
3657 if (!Suggestion.empty())
3658 Msg += ", did you mean " + Suggestion + "?";
3659 return TokError(Msg);
3660 }
3661 Lex(); // Eat identifier token.
3662
3663 if (invertCondCode) {
3664 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3665 return TokError(Msg: "condition codes AL and NV are invalid for this instruction");
3666 CC = AArch64CC::getInvertedCondCode(Code: AArch64CC::CondCode(CC));
3667 }
3668
3669 Operands.push_back(
3670 Elt: AArch64Operand::CreateCondCode(Code: CC, S, E: getLoc(), Ctx&: getContext()));
3671 return false;
3672}
3673
3674ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3675 const AsmToken &Tok = getTok();
3676 SMLoc S = getLoc();
3677
3678 if (Tok.isNot(K: AsmToken::Identifier))
3679 return TokError(Msg: "invalid operand for instruction");
3680
3681 unsigned PStateImm = -1;
3682 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Name: Tok.getString());
3683 if (!SVCR)
3684 return ParseStatus::NoMatch;
3685 if (SVCR->haveFeatures(ActiveFeatures: getSTI().getFeatureBits()))
3686 PStateImm = SVCR->Encoding;
3687
3688 Operands.push_back(
3689 Elt: AArch64Operand::CreateSVCR(PStateField: PStateImm, Str: Tok.getString(), S, Ctx&: getContext()));
3690 Lex(); // Eat identifier token.
3691 return ParseStatus::Success;
3692}
3693
3694ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3695 const AsmToken &Tok = getTok();
3696 SMLoc S = getLoc();
3697
3698 StringRef Name = Tok.getString();
3699
3700 if (Name.equals_insensitive(RHS: "za") || Name.starts_with_insensitive(Prefix: "za.")) {
3701 Lex(); // eat "za[.(b|h|s|d)]"
3702 unsigned ElementWidth = 0;
3703 auto DotPosition = Name.find(C: '.');
3704 if (DotPosition != StringRef::npos) {
3705 const auto &KindRes =
3706 parseVectorKind(Suffix: Name.drop_front(N: DotPosition), VectorKind: RegKind::Matrix);
3707 if (!KindRes)
3708 return TokError(
3709 Msg: "Expected the register to be followed by element width suffix");
3710 ElementWidth = KindRes->second;
3711 }
3712 Operands.push_back(Elt: AArch64Operand::CreateMatrixRegister(
3713 Reg: AArch64::ZA, ElementWidth, Kind: MatrixKind::Array, S, E: getLoc(),
3714 Ctx&: getContext()));
3715 if (getLexer().is(K: AsmToken::LBrac)) {
3716 // There's no comma after matrix operand, so we can parse the next operand
3717 // immediately.
3718 if (parseOperand(Operands, isCondCode: false, invertCondCode: false))
3719 return ParseStatus::NoMatch;
3720 }
3721 return ParseStatus::Success;
3722 }
3723
3724 // Try to parse matrix register.
3725 MCRegister Reg = matchRegisterNameAlias(Name, Kind: RegKind::Matrix);
3726 if (!Reg)
3727 return ParseStatus::NoMatch;
3728
3729 size_t DotPosition = Name.find(C: '.');
3730 assert(DotPosition != StringRef::npos && "Unexpected register");
3731
3732 StringRef Head = Name.take_front(N: DotPosition);
3733 StringRef Tail = Name.drop_front(N: DotPosition);
3734 StringRef RowOrColumn = Head.take_back();
3735
3736 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3737 .Case(S: "h", Value: MatrixKind::Row)
3738 .Case(S: "v", Value: MatrixKind::Col)
3739 .Default(Value: MatrixKind::Tile);
3740
3741 // Next up, parsing the suffix
3742 const auto &KindRes = parseVectorKind(Suffix: Tail, VectorKind: RegKind::Matrix);
3743 if (!KindRes)
3744 return TokError(
3745 Msg: "Expected the register to be followed by element width suffix");
3746 unsigned ElementWidth = KindRes->second;
3747
3748 Lex();
3749
3750 Operands.push_back(Elt: AArch64Operand::CreateMatrixRegister(
3751 Reg, ElementWidth, Kind, S, E: getLoc(), Ctx&: getContext()));
3752
3753 if (getLexer().is(K: AsmToken::LBrac)) {
3754 // There's no comma after matrix operand, so we can parse the next operand
3755 // immediately.
3756 if (parseOperand(Operands, isCondCode: false, invertCondCode: false))
3757 return ParseStatus::NoMatch;
3758 }
3759 return ParseStatus::Success;
3760}
3761
3762/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3763/// them if present.
3764ParseStatus
3765AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3766 const AsmToken &Tok = getTok();
3767 std::string LowerID = Tok.getString().lower();
3768 AArch64_AM::ShiftExtendType ShOp =
3769 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3770 .Case(S: "lsl", Value: AArch64_AM::LSL)
3771 .Case(S: "lsr", Value: AArch64_AM::LSR)
3772 .Case(S: "asr", Value: AArch64_AM::ASR)
3773 .Case(S: "ror", Value: AArch64_AM::ROR)
3774 .Case(S: "msl", Value: AArch64_AM::MSL)
3775 .Case(S: "uxtb", Value: AArch64_AM::UXTB)
3776 .Case(S: "uxth", Value: AArch64_AM::UXTH)
3777 .Case(S: "uxtw", Value: AArch64_AM::UXTW)
3778 .Case(S: "uxtx", Value: AArch64_AM::UXTX)
3779 .Case(S: "sxtb", Value: AArch64_AM::SXTB)
3780 .Case(S: "sxth", Value: AArch64_AM::SXTH)
3781 .Case(S: "sxtw", Value: AArch64_AM::SXTW)
3782 .Case(S: "sxtx", Value: AArch64_AM::SXTX)
3783 .Default(Value: AArch64_AM::InvalidShiftExtend);
3784
3785 if (ShOp == AArch64_AM::InvalidShiftExtend)
3786 return ParseStatus::NoMatch;
3787
3788 SMLoc S = Tok.getLoc();
3789 Lex();
3790
3791 bool Hash = parseOptionalToken(T: AsmToken::Hash);
3792
3793 if (!Hash && getLexer().isNot(K: AsmToken::Integer)) {
3794 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3795 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3796 ShOp == AArch64_AM::MSL) {
3797 // We expect a number here.
3798 return TokError(Msg: "expected #imm after shift specifier");
3799 }
3800
3801 // "extend" type operations don't need an immediate, #0 is implicit.
3802 SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
3803 Operands.push_back(
3804 Elt: AArch64Operand::CreateShiftExtend(ShOp, Val: 0, HasExplicitAmount: false, S, E, Ctx&: getContext()));
3805 return ParseStatus::Success;
3806 }
3807
3808 // Make sure we do actually have a number, identifier or a parenthesized
3809 // expression.
3810 SMLoc E = getLoc();
3811 if (!getTok().is(K: AsmToken::Integer) && !getTok().is(K: AsmToken::LParen) &&
3812 !getTok().is(K: AsmToken::Identifier))
3813 return Error(L: E, Msg: "expected integer shift amount");
3814
3815 const MCExpr *ImmVal;
3816 if (getParser().parseExpression(Res&: ImmVal))
3817 return ParseStatus::Failure;
3818
3819 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
3820 if (!MCE)
3821 return Error(L: E, Msg: "expected constant '#imm' after shift specifier");
3822
3823 E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
3824 Operands.push_back(Elt: AArch64Operand::CreateShiftExtend(
3825 ShOp, Val: MCE->getValue(), HasExplicitAmount: true, S, E, Ctx&: getContext()));
3826 return ParseStatus::Success;
3827}
3828
3829static const struct Extension {
3830 const char *Name;
3831 const FeatureBitset Features;
3832} ExtensionMap[] = {
3833 {.Name: "crc", .Features: {AArch64::FeatureCRC}},
3834 {.Name: "sm4", .Features: {AArch64::FeatureSM4}},
3835 {.Name: "sha3", .Features: {AArch64::FeatureSHA3}},
3836 {.Name: "sha2", .Features: {AArch64::FeatureSHA2}},
3837 {.Name: "aes", .Features: {AArch64::FeatureAES}},
3838 {.Name: "crypto", .Features: {AArch64::FeatureCrypto}},
3839 {.Name: "fp", .Features: {AArch64::FeatureFPARMv8}},
3840 {.Name: "simd", .Features: {AArch64::FeatureNEON}},
3841 {.Name: "ras", .Features: {AArch64::FeatureRAS}},
3842 {.Name: "rasv2", .Features: {AArch64::FeatureRASv2}},
3843 {.Name: "lse", .Features: {AArch64::FeatureLSE}},
3844 {.Name: "predres", .Features: {AArch64::FeaturePredRes}},
3845 {.Name: "predres2", .Features: {AArch64::FeatureSPECRES2}},
3846 {.Name: "ccdp", .Features: {AArch64::FeatureCacheDeepPersist}},
3847 {.Name: "mte", .Features: {AArch64::FeatureMTE}},
3848 {.Name: "memtag", .Features: {AArch64::FeatureMTE}},
3849 {.Name: "tlb-rmi", .Features: {AArch64::FeatureTLB_RMI}},
3850 {.Name: "pan", .Features: {AArch64::FeaturePAN}},
3851 {.Name: "pan-rwv", .Features: {AArch64::FeaturePAN_RWV}},
3852 {.Name: "ccpp", .Features: {AArch64::FeatureCCPP}},
3853 {.Name: "rcpc", .Features: {AArch64::FeatureRCPC}},
3854 {.Name: "rng", .Features: {AArch64::FeatureRandGen}},
3855 {.Name: "sve", .Features: {AArch64::FeatureSVE}},
3856 {.Name: "sve-b16b16", .Features: {AArch64::FeatureSVEB16B16}},
3857 {.Name: "sve2", .Features: {AArch64::FeatureSVE2}},
3858 {.Name: "sve-aes", .Features: {AArch64::FeatureSVEAES}},
3859 {.Name: "sve2-aes", .Features: {AArch64::FeatureAliasSVE2AES, AArch64::FeatureSVEAES}},
3860 {.Name: "sve-sm4", .Features: {AArch64::FeatureSVESM4}},
3861 {.Name: "sve2-sm4", .Features: {AArch64::FeatureAliasSVE2SM4, AArch64::FeatureSVESM4}},
3862 {.Name: "sve-sha3", .Features: {AArch64::FeatureSVESHA3}},
3863 {.Name: "sve2-sha3", .Features: {AArch64::FeatureAliasSVE2SHA3, AArch64::FeatureSVESHA3}},
3864 {.Name: "sve-bitperm", .Features: {AArch64::FeatureSVEBitPerm}},
3865 {.Name: "sve2-bitperm",
3866 .Features: {AArch64::FeatureAliasSVE2BitPerm, AArch64::FeatureSVEBitPerm,
3867 AArch64::FeatureSVE2}},
3868 {.Name: "sve2p1", .Features: {AArch64::FeatureSVE2p1}},
3869 {.Name: "ls64", .Features: {AArch64::FeatureLS64}},
3870 {.Name: "xs", .Features: {AArch64::FeatureXS}},
3871 {.Name: "pauth", .Features: {AArch64::FeaturePAuth}},
3872 {.Name: "flagm", .Features: {AArch64::FeatureFlagM}},
3873 {.Name: "rme", .Features: {AArch64::FeatureRME}},
3874 {.Name: "sme", .Features: {AArch64::FeatureSME}},
3875 {.Name: "sme-f64f64", .Features: {AArch64::FeatureSMEF64F64}},
3876 {.Name: "sme-f16f16", .Features: {AArch64::FeatureSMEF16F16}},
3877 {.Name: "sme-i16i64", .Features: {AArch64::FeatureSMEI16I64}},
3878 {.Name: "sme2", .Features: {AArch64::FeatureSME2}},
3879 {.Name: "sme2p1", .Features: {AArch64::FeatureSME2p1}},
3880 {.Name: "sme-b16b16", .Features: {AArch64::FeatureSMEB16B16}},
3881 {.Name: "hbc", .Features: {AArch64::FeatureHBC}},
3882 {.Name: "mops", .Features: {AArch64::FeatureMOPS}},
3883 {.Name: "mec", .Features: {AArch64::FeatureMEC}},
3884 {.Name: "the", .Features: {AArch64::FeatureTHE}},
3885 {.Name: "d128", .Features: {AArch64::FeatureD128}},
3886 {.Name: "lse128", .Features: {AArch64::FeatureLSE128}},
3887 {.Name: "ite", .Features: {AArch64::FeatureITE}},
3888 {.Name: "cssc", .Features: {AArch64::FeatureCSSC}},
3889 {.Name: "rcpc3", .Features: {AArch64::FeatureRCPC3}},
3890 {.Name: "gcs", .Features: {AArch64::FeatureGCS}},
3891 {.Name: "bf16", .Features: {AArch64::FeatureBF16}},
3892 {.Name: "compnum", .Features: {AArch64::FeatureComplxNum}},
3893 {.Name: "dotprod", .Features: {AArch64::FeatureDotProd}},
3894 {.Name: "f32mm", .Features: {AArch64::FeatureMatMulFP32}},
3895 {.Name: "f64mm", .Features: {AArch64::FeatureMatMulFP64}},
3896 {.Name: "fp16", .Features: {AArch64::FeatureFullFP16}},
3897 {.Name: "fp16fml", .Features: {AArch64::FeatureFP16FML}},
3898 {.Name: "i8mm", .Features: {AArch64::FeatureMatMulInt8}},
3899 {.Name: "lor", .Features: {AArch64::FeatureLOR}},
3900 {.Name: "profile", .Features: {AArch64::FeatureSPE}},
3901 // "rdma" is the name documented by binutils for the feature, but
3902 // binutils also accepts incomplete prefixes of features, so "rdm"
3903 // works too. Support both spellings here.
3904 {.Name: "rdm", .Features: {AArch64::FeatureRDM}},
3905 {.Name: "rdma", .Features: {AArch64::FeatureRDM}},
3906 {.Name: "sb", .Features: {AArch64::FeatureSB}},
3907 {.Name: "ssbs", .Features: {AArch64::FeatureSSBS}},
3908 {.Name: "fp8", .Features: {AArch64::FeatureFP8}},
3909 {.Name: "faminmax", .Features: {AArch64::FeatureFAMINMAX}},
3910 {.Name: "fp8fma", .Features: {AArch64::FeatureFP8FMA}},
3911 {.Name: "ssve-fp8fma", .Features: {AArch64::FeatureSSVE_FP8FMA}},
3912 {.Name: "fp8dot2", .Features: {AArch64::FeatureFP8DOT2}},
3913 {.Name: "ssve-fp8dot2", .Features: {AArch64::FeatureSSVE_FP8DOT2}},
3914 {.Name: "fp8dot4", .Features: {AArch64::FeatureFP8DOT4}},
3915 {.Name: "ssve-fp8dot4", .Features: {AArch64::FeatureSSVE_FP8DOT4}},
3916 {.Name: "lut", .Features: {AArch64::FeatureLUT}},
3917 {.Name: "sme-lutv2", .Features: {AArch64::FeatureSME_LUTv2}},
3918 {.Name: "sme-f8f16", .Features: {AArch64::FeatureSMEF8F16}},
3919 {.Name: "sme-f8f32", .Features: {AArch64::FeatureSMEF8F32}},
3920 {.Name: "sme-fa64", .Features: {AArch64::FeatureSMEFA64}},
3921 {.Name: "cpa", .Features: {AArch64::FeatureCPA}},
3922 {.Name: "tlbiw", .Features: {AArch64::FeatureTLBIW}},
3923 {.Name: "pops", .Features: {AArch64::FeaturePoPS}},
3924 {.Name: "cmpbr", .Features: {AArch64::FeatureCMPBR}},
3925 {.Name: "f8f32mm", .Features: {AArch64::FeatureF8F32MM}},
3926 {.Name: "f8f16mm", .Features: {AArch64::FeatureF8F16MM}},
3927 {.Name: "fprcvt", .Features: {AArch64::FeatureFPRCVT}},
3928 {.Name: "lsfe", .Features: {AArch64::FeatureLSFE}},
3929 {.Name: "sme2p2", .Features: {AArch64::FeatureSME2p2}},
3930 {.Name: "ssve-aes", .Features: {AArch64::FeatureSSVE_AES}},
3931 {.Name: "sve2p2", .Features: {AArch64::FeatureSVE2p2}},
3932 {.Name: "sve-aes2", .Features: {AArch64::FeatureSVEAES2}},
3933 {.Name: "sve-bfscale", .Features: {AArch64::FeatureSVEBFSCALE}},
3934 {.Name: "sve-f16f32mm", .Features: {AArch64::FeatureSVE_F16F32MM}},
3935 {.Name: "lsui", .Features: {AArch64::FeatureLSUI}},
3936 {.Name: "occmo", .Features: {AArch64::FeatureOCCMO}},
3937 {.Name: "ssve-bitperm", .Features: {AArch64::FeatureSSVE_BitPerm}},
3938 {.Name: "sme-mop4", .Features: {AArch64::FeatureSME_MOP4}},
3939 {.Name: "sme-tmop", .Features: {AArch64::FeatureSME_TMOP}},
3940 {.Name: "lscp", .Features: {AArch64::FeatureLSCP}},
3941 {.Name: "tlbid", .Features: {AArch64::FeatureTLBID}},
3942 {.Name: "mpamv2", .Features: {AArch64::FeatureMPAMv2}},
3943 {.Name: "mtetc", .Features: {AArch64::FeatureMTETC}},
3944 {.Name: "gcie", .Features: {AArch64::FeatureGCIE}},
3945 {.Name: "sme2p3", .Features: {AArch64::FeatureSME2p3}},
3946 {.Name: "sve2p3", .Features: {AArch64::FeatureSVE2p3}},
3947 {.Name: "sve-b16mm", .Features: {AArch64::FeatureSVE_B16MM}},
3948 {.Name: "f16mm", .Features: {AArch64::FeatureF16MM}},
3949 {.Name: "f16f32dot", .Features: {AArch64::FeatureF16F32DOT}},
3950 {.Name: "f16f32mm", .Features: {AArch64::FeatureF16F32MM}},
3951 {.Name: "mops-go", .Features: {AArch64::FeatureMOPS_GO}},
3952 {.Name: "poe2", .Features: {AArch64::FeatureS1POE2}},
3953 {.Name: "tev", .Features: {AArch64::FeatureTEV}},
3954 {.Name: "btie", .Features: {AArch64::FeatureBTIE}},
3955 {.Name: "dit", .Features: {AArch64::FeatureDIT}},
3956 {.Name: "brbe", .Features: {AArch64::FeatureBRBE}},
3957 {.Name: "bti", .Features: {AArch64::FeatureBranchTargetId}},
3958 {.Name: "fcma", .Features: {AArch64::FeatureComplxNum}},
3959 {.Name: "jscvt", .Features: {AArch64::FeatureJS}},
3960 {.Name: "pauth-lr", .Features: {AArch64::FeaturePAuthLR}},
3961 {.Name: "ssve-fexpa", .Features: {AArch64::FeatureSSVE_FEXPA}},
3962 {.Name: "wfxt", .Features: {AArch64::FeatureWFxT}},
3963};
3964
3965static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3966 if (FBS[AArch64::HasV8_0aOps])
3967 Str += "ARMv8a";
3968 if (FBS[AArch64::HasV8_1aOps])
3969 Str += "ARMv8.1a";
3970 else if (FBS[AArch64::HasV8_2aOps])
3971 Str += "ARMv8.2a";
3972 else if (FBS[AArch64::HasV8_3aOps])
3973 Str += "ARMv8.3a";
3974 else if (FBS[AArch64::HasV8_4aOps])
3975 Str += "ARMv8.4a";
3976 else if (FBS[AArch64::HasV8_5aOps])
3977 Str += "ARMv8.5a";
3978 else if (FBS[AArch64::HasV8_6aOps])
3979 Str += "ARMv8.6a";
3980 else if (FBS[AArch64::HasV8_7aOps])
3981 Str += "ARMv8.7a";
3982 else if (FBS[AArch64::HasV8_8aOps])
3983 Str += "ARMv8.8a";
3984 else if (FBS[AArch64::HasV8_9aOps])
3985 Str += "ARMv8.9a";
3986 else if (FBS[AArch64::HasV9_0aOps])
3987 Str += "ARMv9-a";
3988 else if (FBS[AArch64::HasV9_1aOps])
3989 Str += "ARMv9.1a";
3990 else if (FBS[AArch64::HasV9_2aOps])
3991 Str += "ARMv9.2a";
3992 else if (FBS[AArch64::HasV9_3aOps])
3993 Str += "ARMv9.3a";
3994 else if (FBS[AArch64::HasV9_4aOps])
3995 Str += "ARMv9.4a";
3996 else if (FBS[AArch64::HasV9_5aOps])
3997 Str += "ARMv9.5a";
3998 else if (FBS[AArch64::HasV9_6aOps])
3999 Str += "ARMv9.6a";
4000 else if (FBS[AArch64::HasV9_7aOps])
4001 Str += "ARMv9.7a";
4002 else if (FBS[AArch64::HasV8_0rOps])
4003 Str += "ARMv8r";
4004 else {
4005 SmallVector<std::string, 2> ExtMatches;
4006 for (const auto& Ext : ExtensionMap) {
4007 // Use & in case multiple features are enabled
4008 if ((FBS & Ext.Features) != FeatureBitset())
4009 ExtMatches.push_back(Elt: Ext.Name);
4010 }
4011 Str += !ExtMatches.empty() ? llvm::join(R&: ExtMatches, Separator: ", ") : "(unknown)";
4012 }
4013}
4014
4015void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
4016 SMLoc S) {
4017 const uint16_t Op2 = Encoding & 7;
4018 const uint16_t Cm = (Encoding & 0x78) >> 3;
4019 const uint16_t Cn = (Encoding & 0x780) >> 7;
4020 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
4021
4022 const MCExpr *Expr = MCConstantExpr::create(Value: Op1, Ctx&: getContext());
4023
4024 Operands.push_back(
4025 Elt: AArch64Operand::CreateImm(Val: Expr, S, E: getLoc(), Ctx&: getContext()));
4026 Operands.push_back(
4027 Elt: AArch64Operand::CreateSysCR(Val: Cn, S, E: getLoc(), Ctx&: getContext()));
4028 Operands.push_back(
4029 Elt: AArch64Operand::CreateSysCR(Val: Cm, S, E: getLoc(), Ctx&: getContext()));
4030 Expr = MCConstantExpr::create(Value: Op2, Ctx&: getContext());
4031 Operands.push_back(
4032 Elt: AArch64Operand::CreateImm(Val: Expr, S, E: getLoc(), Ctx&: getContext()));
4033}
4034
4035/// parseSysAlias - The IC, DC, AT, TLBI, MLBI and GIC{R} and GSB instructions
4036/// are simple aliases for the SYS instruction. Parse them specially so that
4037/// we create a SYS MCInst.
4038bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
4039 OperandVector &Operands) {
4040 if (Name.contains(C: '.'))
4041 return TokError(Msg: "invalid operand");
4042
4043 Mnemonic = Name;
4044 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: "sys", S: NameLoc, Ctx&: getContext()));
4045
4046 const AsmToken &Tok = getTok();
4047 StringRef Op = Tok.getString();
4048 SMLoc S = Tok.getLoc();
4049 bool ExpectRegister = true;
4050 bool OptionalRegister = false;
4051 bool hasAll = getSTI().hasFeature(Feature: AArch64::FeatureAll);
4052 bool hasTLBID = getSTI().hasFeature(Feature: AArch64::FeatureTLBID);
4053
4054 if (Mnemonic == "ic") {
4055 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Name: Op);
4056 if (!IC)
4057 return TokError(Msg: "invalid operand for IC instruction");
4058 else if (!IC->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4059 std::string Str("IC " + std::string(IC->Name) + " requires: ");
4060 setRequiredFeatureString(FBS: IC->getRequiredFeatures(), Str);
4061 return TokError(Msg: Str);
4062 }
4063 ExpectRegister = IC->NeedsReg;
4064 createSysAlias(Encoding: IC->Encoding, Operands, S);
4065 } else if (Mnemonic == "dc") {
4066 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Name: Op);
4067 if (!DC)
4068 return TokError(Msg: "invalid operand for DC instruction");
4069 else if (!DC->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4070 std::string Str("DC " + std::string(DC->Name) + " requires: ");
4071 setRequiredFeatureString(FBS: DC->getRequiredFeatures(), Str);
4072 return TokError(Msg: Str);
4073 }
4074 createSysAlias(Encoding: DC->Encoding, Operands, S);
4075 } else if (Mnemonic == "at") {
4076 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Name: Op);
4077 if (!AT)
4078 return TokError(Msg: "invalid operand for AT instruction");
4079 else if (!AT->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4080 std::string Str("AT " + std::string(AT->Name) + " requires: ");
4081 setRequiredFeatureString(FBS: AT->getRequiredFeatures(), Str);
4082 return TokError(Msg: Str);
4083 }
4084 createSysAlias(Encoding: AT->Encoding, Operands, S);
4085 } else if (Mnemonic == "tlbi") {
4086 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Name: Op);
4087 if (!TLBI)
4088 return TokError(Msg: "invalid operand for TLBI instruction");
4089 else if (!TLBI->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4090 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
4091 setRequiredFeatureString(FBS: TLBI->getRequiredFeatures(), Str);
4092 return TokError(Msg: Str);
4093 }
4094 ExpectRegister = TLBI->NeedsReg;
4095 bool hasTLBID = getSTI().hasFeature(Feature: AArch64::FeatureTLBID);
4096 if (hasAll || hasTLBID) {
4097 OptionalRegister = TLBI->OptionalReg;
4098 }
4099 createSysAlias(Encoding: TLBI->Encoding, Operands, S);
4100 } else if (Mnemonic == "mlbi") {
4101 const AArch64MLBI::MLBI *MLBI = AArch64MLBI::lookupMLBIByName(Name: Op);
4102 if (!MLBI)
4103 return TokError(Msg: "invalid operand for MLBI instruction");
4104 else if (!MLBI->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4105 std::string Str("MLBI " + std::string(MLBI->Name) + " requires: ");
4106 setRequiredFeatureString(FBS: MLBI->getRequiredFeatures(), Str);
4107 return TokError(Msg: Str);
4108 }
4109 ExpectRegister = MLBI->NeedsReg;
4110 createSysAlias(Encoding: MLBI->Encoding, Operands, S);
4111 } else if (Mnemonic == "gic") {
4112 const AArch64GIC::GIC *GIC = AArch64GIC::lookupGICByName(Name: Op);
4113 if (!GIC)
4114 return TokError(Msg: "invalid operand for GIC instruction");
4115 else if (!GIC->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4116 std::string Str("GIC " + std::string(GIC->Name) + " requires: ");
4117 setRequiredFeatureString(FBS: GIC->getRequiredFeatures(), Str);
4118 return TokError(Msg: Str);
4119 }
4120 ExpectRegister = GIC->NeedsReg;
4121 createSysAlias(Encoding: GIC->Encoding, Operands, S);
4122 } else if (Mnemonic == "gsb") {
4123 const AArch64GSB::GSB *GSB = AArch64GSB::lookupGSBByName(Name: Op);
4124 if (!GSB)
4125 return TokError(Msg: "invalid operand for GSB instruction");
4126 else if (!GSB->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4127 std::string Str("GSB " + std::string(GSB->Name) + " requires: ");
4128 setRequiredFeatureString(FBS: GSB->getRequiredFeatures(), Str);
4129 return TokError(Msg: Str);
4130 }
4131 ExpectRegister = false;
4132 createSysAlias(Encoding: GSB->Encoding, Operands, S);
4133 } else if (Mnemonic == "plbi") {
4134 const AArch64PLBI::PLBI *PLBI = AArch64PLBI::lookupPLBIByName(Name: Op);
4135 if (!PLBI)
4136 return TokError(Msg: "invalid operand for PLBI instruction");
4137 else if (!PLBI->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4138 std::string Str("PLBI " + std::string(PLBI->Name) + " requires: ");
4139 setRequiredFeatureString(FBS: PLBI->getRequiredFeatures(), Str);
4140 return TokError(Msg: Str);
4141 }
4142 ExpectRegister = PLBI->NeedsReg;
4143 if (hasAll || hasTLBID) {
4144 OptionalRegister = PLBI->OptionalReg;
4145 }
4146 createSysAlias(Encoding: PLBI->Encoding, Operands, S);
4147 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" ||
4148 Mnemonic == "cosp") {
4149
4150 if (Op.lower() != "rctx")
4151 return TokError(Msg: "invalid operand for prediction restriction instruction");
4152
4153 bool hasPredres = hasAll || getSTI().hasFeature(Feature: AArch64::FeaturePredRes);
4154 bool hasSpecres2 = hasAll || getSTI().hasFeature(Feature: AArch64::FeatureSPECRES2);
4155
4156 if (Mnemonic == "cosp" && !hasSpecres2)
4157 return TokError(Msg: "COSP requires: predres2");
4158 if (!hasPredres)
4159 return TokError(Msg: Mnemonic.upper() + "RCTX requires: predres");
4160
4161 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
4162 : Mnemonic == "dvp" ? 0b101
4163 : Mnemonic == "cosp" ? 0b110
4164 : Mnemonic == "cpp" ? 0b111
4165 : 0;
4166 assert(PRCTX_Op2 &&
4167 "Invalid mnemonic for prediction restriction instruction");
4168 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
4169 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
4170
4171 createSysAlias(Encoding, Operands, S);
4172 }
4173
4174 Lex(); // Eat operand.
4175
4176 bool HasRegister = false;
4177
4178 // Check for the optional register operand.
4179 if (parseOptionalToken(T: AsmToken::Comma)) {
4180 if (Tok.isNot(K: AsmToken::Identifier) || parseRegister(Operands))
4181 return TokError(Msg: "expected register operand");
4182 HasRegister = true;
4183 }
4184
4185 if (!OptionalRegister) {
4186 if (ExpectRegister && !HasRegister)
4187 return TokError(Msg: "specified " + Mnemonic + " op requires a register");
4188 else if (!ExpectRegister && HasRegister)
4189 return TokError(Msg: "specified " + Mnemonic + " op does not use a register");
4190 }
4191
4192 if (parseToken(T: AsmToken::EndOfStatement, Msg: "unexpected token in argument list"))
4193 return true;
4194
4195 return false;
4196}
4197
4198/// parseSyslAlias - The GICR instructions are simple aliases for
4199/// the SYSL instruction. Parse them specially so that we create a
4200/// SYS MCInst.
4201bool AArch64AsmParser::parseSyslAlias(StringRef Name, SMLoc NameLoc,
4202 OperandVector &Operands) {
4203
4204 Mnemonic = Name;
4205 Operands.push_back(
4206 Elt: AArch64Operand::CreateToken(Str: "sysl", S: NameLoc, Ctx&: getContext()));
4207
4208 // Now expect two operands (identifier + register)
4209 SMLoc startLoc = getLoc();
4210 const AsmToken &regTok = getTok();
4211 StringRef reg = regTok.getString();
4212 MCRegister Reg = matchRegisterNameAlias(Name: reg.lower(), Kind: RegKind::Scalar);
4213 if (!Reg)
4214 return TokError(Msg: "expected register operand");
4215
4216 Operands.push_back(Elt: AArch64Operand::CreateReg(
4217 Reg, Kind: RegKind::Scalar, S: startLoc, E: getLoc(), Ctx&: getContext(), EqTy: EqualsReg));
4218
4219 Lex(); // Eat token
4220 if (parseToken(T: AsmToken::Comma))
4221 return true;
4222
4223 // Check for identifier
4224 const AsmToken &operandTok = getTok();
4225 StringRef Op = operandTok.getString();
4226 SMLoc S2 = operandTok.getLoc();
4227 Lex(); // Eat token
4228
4229 if (Mnemonic == "gicr") {
4230 const AArch64GICR::GICR *GICR = AArch64GICR::lookupGICRByName(Name: Op);
4231 if (!GICR)
4232 return Error(L: S2, Msg: "invalid operand for GICR instruction");
4233 else if (!GICR->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4234 std::string Str("GICR " + std::string(GICR->Name) + " requires: ");
4235 setRequiredFeatureString(FBS: GICR->getRequiredFeatures(), Str);
4236 return Error(L: S2, Msg: Str);
4237 }
4238 createSysAlias(Encoding: GICR->Encoding, Operands, S: S2);
4239 }
4240
4241 if (parseToken(T: AsmToken::EndOfStatement, Msg: "unexpected token in argument list"))
4242 return true;
4243
4244 return false;
4245}
4246
4247/// parseSyspAlias - The TLBIP instructions are simple aliases for
4248/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
4249bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
4250 OperandVector &Operands) {
4251 if (Name.contains(C: '.'))
4252 return TokError(Msg: "invalid operand");
4253
4254 Mnemonic = Name;
4255 Operands.push_back(
4256 Elt: AArch64Operand::CreateToken(Str: "sysp", S: NameLoc, Ctx&: getContext()));
4257
4258 const AsmToken &Tok = getTok();
4259 StringRef Op = Tok.getString();
4260 SMLoc S = Tok.getLoc();
4261
4262 if (Mnemonic == "tlbip") {
4263 const AArch64TLBIP::TLBIP *TLBIP = AArch64TLBIP::lookupTLBIPByName(Name: Op);
4264 if (!TLBIP)
4265 return TokError(Msg: "invalid operand for TLBIP instruction");
4266 if (!getSTI().hasFeature(Feature: AArch64::FeatureD128) &&
4267 !getSTI().hasFeature(Feature: AArch64::FeatureAll))
4268 return TokError(Msg: "instruction requires: d128");
4269 if (!TLBIP->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4270 std::string Str("instruction requires: ");
4271 setRequiredFeatureString(FBS: TLBIP->getRequiredFeatures(), Str);
4272 return TokError(Msg: Str);
4273 }
4274 createSysAlias(Encoding: TLBIP->Encoding, Operands, S);
4275 }
4276
4277 Lex(); // Eat operand.
4278
4279 if (parseComma())
4280 return true;
4281
4282 if (Tok.isNot(K: AsmToken::Identifier))
4283 return TokError(Msg: "expected register identifier");
4284 auto Result = tryParseSyspXzrPair(Operands);
4285 if (Result.isNoMatch())
4286 Result = tryParseGPRSeqPair(Operands);
4287 if (!Result.isSuccess())
4288 return TokError(Msg: "specified " + Mnemonic +
4289 " op requires a pair of registers");
4290
4291 if (parseToken(T: AsmToken::EndOfStatement, Msg: "unexpected token in argument list"))
4292 return true;
4293
4294 return false;
4295}
4296
4297ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
4298 MCAsmParser &Parser = getParser();
4299 const AsmToken &Tok = getTok();
4300
4301 if (Mnemonic == "tsb" && Tok.isNot(K: AsmToken::Identifier))
4302 return TokError(Msg: "'csync' operand expected");
4303 if (parseOptionalToken(T: AsmToken::Hash) || Tok.is(K: AsmToken::Integer)) {
4304 // Immediate operand.
4305 const MCExpr *ImmVal;
4306 SMLoc ExprLoc = getLoc();
4307 AsmToken IntTok = Tok;
4308 if (getParser().parseExpression(Res&: ImmVal))
4309 return ParseStatus::Failure;
4310 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
4311 if (!MCE)
4312 return Error(L: ExprLoc, Msg: "immediate value expected for barrier operand");
4313 int64_t Value = MCE->getValue();
4314 if (Mnemonic == "dsb" && Value > 15) {
4315 // This case is a no match here, but it might be matched by the nXS
4316 // variant. Deliberately not unlex the optional '#' as it is not necessary
4317 // to characterize an integer immediate.
4318 Parser.getLexer().UnLex(Token: IntTok);
4319 return ParseStatus::NoMatch;
4320 }
4321 if (Value < 0 || Value > 15)
4322 return Error(L: ExprLoc, Msg: "barrier operand out of range");
4323 auto DB = AArch64DB::lookupDBByEncoding(Encoding: Value);
4324 Operands.push_back(Elt: AArch64Operand::CreateBarrier(Val: Value, Str: DB ? DB->Name : "",
4325 S: ExprLoc, Ctx&: getContext(),
4326 HasnXSModifier: false /*hasnXSModifier*/));
4327 return ParseStatus::Success;
4328 }
4329
4330 if (Tok.isNot(K: AsmToken::Identifier))
4331 return TokError(Msg: "invalid operand for instruction");
4332
4333 StringRef Operand = Tok.getString();
4334 auto TSB = AArch64TSB::lookupTSBByName(Name: Operand);
4335 auto DB = AArch64DB::lookupDBByName(Name: Operand);
4336 // The only valid named option for ISB is 'sy'
4337 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
4338 return TokError(Msg: "'sy' or #imm operand expected");
4339 // The only valid named option for TSB is 'csync'
4340 if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
4341 return TokError(Msg: "'csync' operand expected");
4342 if (!DB && !TSB) {
4343 if (Mnemonic == "dsb") {
4344 // This case is a no match here, but it might be matched by the nXS
4345 // variant.
4346 return ParseStatus::NoMatch;
4347 }
4348 return TokError(Msg: "invalid barrier option name");
4349 }
4350
4351 Operands.push_back(Elt: AArch64Operand::CreateBarrier(
4352 Val: DB ? DB->Encoding : TSB->Encoding, Str: Tok.getString(), S: getLoc(),
4353 Ctx&: getContext(), HasnXSModifier: false /*hasnXSModifier*/));
4354 Lex(); // Consume the option
4355
4356 return ParseStatus::Success;
4357}
4358
4359ParseStatus
4360AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
4361 const AsmToken &Tok = getTok();
4362
4363 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
4364 if (Mnemonic != "dsb")
4365 return ParseStatus::Failure;
4366
4367 if (parseOptionalToken(T: AsmToken::Hash) || Tok.is(K: AsmToken::Integer)) {
4368 // Immediate operand.
4369 const MCExpr *ImmVal;
4370 SMLoc ExprLoc = getLoc();
4371 if (getParser().parseExpression(Res&: ImmVal))
4372 return ParseStatus::Failure;
4373 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
4374 if (!MCE)
4375 return Error(L: ExprLoc, Msg: "immediate value expected for barrier operand");
4376 int64_t Value = MCE->getValue();
4377 // v8.7-A DSB in the nXS variant accepts only the following immediate
4378 // values: 16, 20, 24, 28.
4379 if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
4380 return Error(L: ExprLoc, Msg: "barrier operand out of range");
4381 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(ImmValue: Value);
4382 Operands.push_back(Elt: AArch64Operand::CreateBarrier(Val: DB->Encoding, Str: DB->Name,
4383 S: ExprLoc, Ctx&: getContext(),
4384 HasnXSModifier: true /*hasnXSModifier*/));
4385 return ParseStatus::Success;
4386 }
4387
4388 if (Tok.isNot(K: AsmToken::Identifier))
4389 return TokError(Msg: "invalid operand for instruction");
4390
4391 StringRef Operand = Tok.getString();
4392 auto DB = AArch64DBnXS::lookupDBnXSByName(Name: Operand);
4393
4394 if (!DB)
4395 return TokError(Msg: "invalid barrier option name");
4396
4397 Operands.push_back(
4398 Elt: AArch64Operand::CreateBarrier(Val: DB->Encoding, Str: Tok.getString(), S: getLoc(),
4399 Ctx&: getContext(), HasnXSModifier: true /*hasnXSModifier*/));
4400 Lex(); // Consume the option
4401
4402 return ParseStatus::Success;
4403}
4404
4405ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4406 const AsmToken &Tok = getTok();
4407
4408 if (Tok.isNot(K: AsmToken::Identifier))
4409 return ParseStatus::NoMatch;
4410
4411 if (AArch64SVCR::lookupSVCRByName(Name: Tok.getString()))
4412 return ParseStatus::NoMatch;
4413
4414 int MRSReg, MSRReg;
4415 auto SysReg = AArch64SysReg::lookupSysRegByName(Name: Tok.getString());
4416 if (SysReg && SysReg->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) {
4417 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4418 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4419 } else
4420 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Name: Tok.getString());
4421
4422 unsigned PStateImm = -1;
4423 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Name: Tok.getString());
4424 if (PState15 && PState15->haveFeatures(ActiveFeatures: getSTI().getFeatureBits()))
4425 PStateImm = PState15->Encoding;
4426 if (!PState15) {
4427 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Name: Tok.getString());
4428 if (PState1 && PState1->haveFeatures(ActiveFeatures: getSTI().getFeatureBits()))
4429 PStateImm = PState1->Encoding;
4430 }
4431
4432 Operands.push_back(
4433 Elt: AArch64Operand::CreateSysReg(Str: Tok.getString(), S: getLoc(), MRSReg, MSRReg,
4434 PStateField: PStateImm, Ctx&: getContext()));
4435 Lex(); // Eat identifier
4436
4437 return ParseStatus::Success;
4438}
4439
4440ParseStatus
4441AArch64AsmParser::tryParsePHintInstOperand(OperandVector &Operands) {
4442 SMLoc S = getLoc();
4443 const AsmToken &Tok = getTok();
4444 if (Tok.isNot(K: AsmToken::Identifier))
4445 return TokError(Msg: "invalid operand for instruction");
4446
4447 auto PH = AArch64PHint::lookupPHintByName(Tok.getString());
4448 if (!PH)
4449 return TokError(Msg: "invalid operand for instruction");
4450
4451 Operands.push_back(Elt: AArch64Operand::CreatePHintInst(
4452 Val: PH->Encoding, Str: Tok.getString(), S, Ctx&: getContext()));
4453 Lex(); // Eat identifier token.
4454 return ParseStatus::Success;
4455}
4456
4457/// tryParseNeonVectorRegister - Parse a vector register operand.
4458bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4459 if (getTok().isNot(K: AsmToken::Identifier))
4460 return true;
4461
4462 SMLoc S = getLoc();
4463 // Check for a vector register specifier first.
4464 StringRef Kind;
4465 MCRegister Reg;
4466 ParseStatus Res = tryParseVectorRegister(Reg, Kind, MatchKind: RegKind::NeonVector);
4467 if (!Res.isSuccess())
4468 return true;
4469
4470 const auto &KindRes = parseVectorKind(Suffix: Kind, VectorKind: RegKind::NeonVector);
4471 if (!KindRes)
4472 return true;
4473
4474 unsigned ElementWidth = KindRes->second;
4475 Operands.push_back(
4476 Elt: AArch64Operand::CreateVectorReg(Reg, Kind: RegKind::NeonVector, ElementWidth,
4477 S, E: getLoc(), Ctx&: getContext()));
4478
4479 // If there was an explicit qualifier, that goes on as a literal text
4480 // operand.
4481 if (!Kind.empty())
4482 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: Kind, S, Ctx&: getContext()));
4483
4484 return tryParseVectorIndex(Operands).isFailure();
4485}
4486
4487ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4488 SMLoc SIdx = getLoc();
4489 if (parseOptionalToken(T: AsmToken::LBrac)) {
4490 const MCExpr *ImmVal;
4491 if (getParser().parseExpression(Res&: ImmVal))
4492 return ParseStatus::NoMatch;
4493 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
4494 if (!MCE)
4495 return TokError(Msg: "immediate value expected for vector index");
4496
4497 SMLoc E = getLoc();
4498
4499 if (parseToken(T: AsmToken::RBrac, Msg: "']' expected"))
4500 return ParseStatus::Failure;
4501
4502 Operands.push_back(Elt: AArch64Operand::CreateVectorIndex(Idx: MCE->getValue(), S: SIdx,
4503 E, Ctx&: getContext()));
4504 return ParseStatus::Success;
4505 }
4506
4507 return ParseStatus::NoMatch;
4508}
4509
4510// tryParseVectorRegister - Try to parse a vector register name with
4511// optional kind specifier. If it is a register specifier, eat the token
4512// and return it.
4513ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4514 StringRef &Kind,
4515 RegKind MatchKind) {
4516 const AsmToken &Tok = getTok();
4517
4518 if (Tok.isNot(K: AsmToken::Identifier))
4519 return ParseStatus::NoMatch;
4520
4521 StringRef Name = Tok.getString();
4522 // If there is a kind specifier, it's separated from the register name by
4523 // a '.'.
4524 size_t Start = 0, Next = Name.find(C: '.');
4525 StringRef Head = Name.slice(Start, End: Next);
4526 MCRegister RegNum = matchRegisterNameAlias(Name: Head, Kind: MatchKind);
4527
4528 if (RegNum) {
4529 if (Next != StringRef::npos) {
4530 Kind = Name.substr(Start: Next);
4531 if (!isValidVectorKind(Suffix: Kind, VectorKind: MatchKind))
4532 return TokError(Msg: "invalid vector kind qualifier");
4533 }
4534 Lex(); // Eat the register token.
4535
4536 Reg = RegNum;
4537 return ParseStatus::Success;
4538 }
4539
4540 return ParseStatus::NoMatch;
4541}
4542
4543ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector(
4544 OperandVector &Operands) {
4545 ParseStatus Status =
4546 tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(Operands);
4547 if (!Status.isSuccess())
4548 Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(Operands);
4549 return Status;
4550}
4551
4552/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4553template <RegKind RK>
4554ParseStatus
4555AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4556 // Check for a SVE predicate register specifier first.
4557 const SMLoc S = getLoc();
4558 StringRef Kind;
4559 MCRegister RegNum;
4560 auto Res = tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RK);
4561 if (!Res.isSuccess())
4562 return Res;
4563
4564 const auto &KindRes = parseVectorKind(Suffix: Kind, VectorKind: RK);
4565 if (!KindRes)
4566 return ParseStatus::NoMatch;
4567
4568 unsigned ElementWidth = KindRes->second;
4569 Operands.push_back(Elt: AArch64Operand::CreateVectorReg(
4570 Reg: RegNum, Kind: RK, ElementWidth, S,
4571 E: getLoc(), Ctx&: getContext()));
4572
4573 if (getLexer().is(K: AsmToken::LBrac)) {
4574 if (RK == RegKind::SVEPredicateAsCounter) {
4575 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4576 if (ResIndex.isSuccess())
4577 return ParseStatus::Success;
4578 } else {
4579 // Indexed predicate, there's no comma so try parse the next operand
4580 // immediately.
4581 if (parseOperand(Operands, isCondCode: false, invertCondCode: false))
4582 return ParseStatus::NoMatch;
4583 }
4584 }
4585
4586 // Not all predicates are followed by a '/m' or '/z'.
4587 if (getTok().isNot(K: AsmToken::Slash))
4588 return ParseStatus::Success;
4589
4590 // But when they do they shouldn't have an element type suffix.
4591 if (!Kind.empty())
4592 return Error(L: S, Msg: "not expecting size suffix");
4593
4594 // Add a literal slash as operand
4595 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: "/", S: getLoc(), Ctx&: getContext()));
4596
4597 Lex(); // Eat the slash.
4598
4599 // Zeroing or merging?
4600 auto Pred = getTok().getString().lower();
4601 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4602 return Error(L: getLoc(), Msg: "expecting 'z' predication");
4603
4604 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4605 return Error(L: getLoc(), Msg: "expecting 'm' or 'z' predication");
4606
4607 // Add zero/merge token.
4608 const char *ZM = Pred == "z" ? "z" : "m";
4609 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: ZM, S: getLoc(), Ctx&: getContext()));
4610
4611 Lex(); // Eat zero/merge token.
4612 return ParseStatus::Success;
4613}
4614
4615/// parseRegister - Parse a register operand.
4616bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4617 // Try for a Neon vector register.
4618 if (!tryParseNeonVectorRegister(Operands))
4619 return false;
4620
4621 if (tryParseZTOperand(Operands).isSuccess())
4622 return false;
4623
4624 // Otherwise try for a scalar register.
4625 if (tryParseGPROperand<false>(Operands).isSuccess())
4626 return false;
4627
4628 return true;
4629}
4630
4631bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4632 bool HasELFModifier = false;
4633 AArch64::Specifier RefKind;
4634 SMLoc Loc = getLexer().getLoc();
4635 if (parseOptionalToken(T: AsmToken::Colon)) {
4636 HasELFModifier = true;
4637
4638 if (getTok().isNot(K: AsmToken::Identifier))
4639 return TokError(Msg: "expect relocation specifier in operand after ':'");
4640
4641 std::string LowerCase = getTok().getIdentifier().lower();
4642 RefKind = StringSwitch<AArch64::Specifier>(LowerCase)
4643 .Case(S: "lo12", Value: AArch64::S_LO12)
4644 .Case(S: "abs_g3", Value: AArch64::S_ABS_G3)
4645 .Case(S: "abs_g2", Value: AArch64::S_ABS_G2)
4646 .Case(S: "abs_g2_s", Value: AArch64::S_ABS_G2_S)
4647 .Case(S: "abs_g2_nc", Value: AArch64::S_ABS_G2_NC)
4648 .Case(S: "abs_g1", Value: AArch64::S_ABS_G1)
4649 .Case(S: "abs_g1_s", Value: AArch64::S_ABS_G1_S)
4650 .Case(S: "abs_g1_nc", Value: AArch64::S_ABS_G1_NC)
4651 .Case(S: "abs_g0", Value: AArch64::S_ABS_G0)
4652 .Case(S: "abs_g0_s", Value: AArch64::S_ABS_G0_S)
4653 .Case(S: "abs_g0_nc", Value: AArch64::S_ABS_G0_NC)
4654 .Case(S: "prel_g3", Value: AArch64::S_PREL_G3)
4655 .Case(S: "prel_g2", Value: AArch64::S_PREL_G2)
4656 .Case(S: "prel_g2_nc", Value: AArch64::S_PREL_G2_NC)
4657 .Case(S: "prel_g1", Value: AArch64::S_PREL_G1)
4658 .Case(S: "prel_g1_nc", Value: AArch64::S_PREL_G1_NC)
4659 .Case(S: "prel_g0", Value: AArch64::S_PREL_G0)
4660 .Case(S: "prel_g0_nc", Value: AArch64::S_PREL_G0_NC)
4661 .Case(S: "dtprel_g2", Value: AArch64::S_DTPREL_G2)
4662 .Case(S: "dtprel_g1", Value: AArch64::S_DTPREL_G1)
4663 .Case(S: "dtprel_g1_nc", Value: AArch64::S_DTPREL_G1_NC)
4664 .Case(S: "dtprel_g0", Value: AArch64::S_DTPREL_G0)
4665 .Case(S: "dtprel_g0_nc", Value: AArch64::S_DTPREL_G0_NC)
4666 .Case(S: "dtprel_hi12", Value: AArch64::S_DTPREL_HI12)
4667 .Case(S: "dtprel_lo12", Value: AArch64::S_DTPREL_LO12)
4668 .Case(S: "dtprel_lo12_nc", Value: AArch64::S_DTPREL_LO12_NC)
4669 .Case(S: "pg_hi21_nc", Value: AArch64::S_ABS_PAGE_NC)
4670 .Case(S: "tprel_g2", Value: AArch64::S_TPREL_G2)
4671 .Case(S: "tprel_g1", Value: AArch64::S_TPREL_G1)
4672 .Case(S: "tprel_g1_nc", Value: AArch64::S_TPREL_G1_NC)
4673 .Case(S: "tprel_g0", Value: AArch64::S_TPREL_G0)
4674 .Case(S: "tprel_g0_nc", Value: AArch64::S_TPREL_G0_NC)
4675 .Case(S: "tprel_hi12", Value: AArch64::S_TPREL_HI12)
4676 .Case(S: "tprel_lo12", Value: AArch64::S_TPREL_LO12)
4677 .Case(S: "tprel_lo12_nc", Value: AArch64::S_TPREL_LO12_NC)
4678 .Case(S: "tlsdesc_lo12", Value: AArch64::S_TLSDESC_LO12)
4679 .Case(S: "tlsdesc_auth_lo12", Value: AArch64::S_TLSDESC_AUTH_LO12)
4680 .Case(S: "got", Value: AArch64::S_GOT_PAGE)
4681 .Case(S: "gotpage_lo15", Value: AArch64::S_GOT_PAGE_LO15)
4682 .Case(S: "got_lo12", Value: AArch64::S_GOT_LO12)
4683 .Case(S: "got_auth", Value: AArch64::S_GOT_AUTH_PAGE)
4684 .Case(S: "got_auth_lo12", Value: AArch64::S_GOT_AUTH_LO12)
4685 .Case(S: "gottprel", Value: AArch64::S_GOTTPREL_PAGE)
4686 .Case(S: "gottprel_lo12", Value: AArch64::S_GOTTPREL_LO12_NC)
4687 .Case(S: "gottprel_g1", Value: AArch64::S_GOTTPREL_G1)
4688 .Case(S: "gottprel_g0_nc", Value: AArch64::S_GOTTPREL_G0_NC)
4689 .Case(S: "tlsdesc", Value: AArch64::S_TLSDESC_PAGE)
4690 .Case(S: "tlsdesc_auth", Value: AArch64::S_TLSDESC_AUTH_PAGE)
4691 .Case(S: "secrel_lo12", Value: AArch64::S_SECREL_LO12)
4692 .Case(S: "secrel_hi12", Value: AArch64::S_SECREL_HI12)
4693 .Default(Value: AArch64::S_INVALID);
4694
4695 if (RefKind == AArch64::S_INVALID)
4696 return TokError(Msg: "expect relocation specifier in operand after ':'");
4697
4698 Lex(); // Eat identifier
4699
4700 if (parseToken(T: AsmToken::Colon, Msg: "expect ':' after relocation specifier"))
4701 return true;
4702 }
4703
4704 if (getParser().parseExpression(Res&: ImmVal))
4705 return true;
4706
4707 if (HasELFModifier)
4708 ImmVal = MCSpecifierExpr::create(Expr: ImmVal, S: RefKind, Ctx&: getContext(), Loc);
4709
4710 SMLoc EndLoc;
4711 if (getContext().getAsmInfo()->hasSubsectionsViaSymbols()) {
4712 if (getParser().parseAtSpecifier(Res&: ImmVal, EndLoc))
4713 return true;
4714 const MCExpr *Term;
4715 MCBinaryExpr::Opcode Opcode;
4716 if (parseOptionalToken(T: AsmToken::Plus))
4717 Opcode = MCBinaryExpr::Add;
4718 else if (parseOptionalToken(T: AsmToken::Minus))
4719 Opcode = MCBinaryExpr::Sub;
4720 else
4721 return false;
4722 if (getParser().parsePrimaryExpr(Res&: Term, EndLoc))
4723 return true;
4724 ImmVal = MCBinaryExpr::create(Op: Opcode, LHS: ImmVal, RHS: Term, Ctx&: getContext());
4725 }
4726
4727 return false;
4728}
4729
4730ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4731 if (getTok().isNot(K: AsmToken::LCurly))
4732 return ParseStatus::NoMatch;
4733
4734 auto ParseMatrixTile = [this](unsigned &Reg,
4735 unsigned &ElementWidth) -> ParseStatus {
4736 StringRef Name = getTok().getString();
4737 size_t DotPosition = Name.find(C: '.');
4738 if (DotPosition == StringRef::npos)
4739 return ParseStatus::NoMatch;
4740
4741 unsigned RegNum = matchMatrixTileListRegName(Name);
4742 if (!RegNum)
4743 return ParseStatus::NoMatch;
4744
4745 StringRef Tail = Name.drop_front(N: DotPosition);
4746 const std::optional<std::pair<int, int>> &KindRes =
4747 parseVectorKind(Suffix: Tail, VectorKind: RegKind::Matrix);
4748 if (!KindRes)
4749 return TokError(
4750 Msg: "Expected the register to be followed by element width suffix");
4751 ElementWidth = KindRes->second;
4752 Reg = RegNum;
4753 Lex(); // Eat the register.
4754 return ParseStatus::Success;
4755 };
4756
4757 SMLoc S = getLoc();
4758 auto LCurly = getTok();
4759 Lex(); // Eat left bracket token.
4760
4761 // Empty matrix list
4762 if (parseOptionalToken(T: AsmToken::RCurly)) {
4763 Operands.push_back(Elt: AArch64Operand::CreateMatrixTileList(
4764 /*RegMask=*/0, S, E: getLoc(), Ctx&: getContext()));
4765 return ParseStatus::Success;
4766 }
4767
4768 // Try parse {za} alias early
4769 if (getTok().getString().equals_insensitive(RHS: "za")) {
4770 Lex(); // Eat 'za'
4771
4772 if (parseToken(T: AsmToken::RCurly, Msg: "'}' expected"))
4773 return ParseStatus::Failure;
4774
4775 Operands.push_back(Elt: AArch64Operand::CreateMatrixTileList(
4776 /*RegMask=*/0xFF, S, E: getLoc(), Ctx&: getContext()));
4777 return ParseStatus::Success;
4778 }
4779
4780 SMLoc TileLoc = getLoc();
4781
4782 unsigned FirstReg, ElementWidth;
4783 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4784 if (!ParseRes.isSuccess()) {
4785 getLexer().UnLex(Token: LCurly);
4786 return ParseRes;
4787 }
4788
4789 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4790
4791 unsigned PrevReg = FirstReg;
4792
4793 SmallSet<unsigned, 8> DRegs;
4794 AArch64Operand::ComputeRegsForAlias(Reg: FirstReg, OutRegs&: DRegs, ElementWidth);
4795
4796 SmallSet<unsigned, 8> SeenRegs;
4797 SeenRegs.insert(V: FirstReg);
4798
4799 while (parseOptionalToken(T: AsmToken::Comma)) {
4800 TileLoc = getLoc();
4801 unsigned Reg, NextElementWidth;
4802 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4803 if (!ParseRes.isSuccess())
4804 return ParseRes;
4805
4806 // Element size must match on all regs in the list.
4807 if (ElementWidth != NextElementWidth)
4808 return Error(L: TileLoc, Msg: "mismatched register size suffix");
4809
4810 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(Reg: PrevReg)))
4811 Warning(L: TileLoc, Msg: "tile list not in ascending order");
4812
4813 if (SeenRegs.contains(V: Reg))
4814 Warning(L: TileLoc, Msg: "duplicate tile in list");
4815 else {
4816 SeenRegs.insert(V: Reg);
4817 AArch64Operand::ComputeRegsForAlias(Reg, OutRegs&: DRegs, ElementWidth);
4818 }
4819
4820 PrevReg = Reg;
4821 }
4822
4823 if (parseToken(T: AsmToken::RCurly, Msg: "'}' expected"))
4824 return ParseStatus::Failure;
4825
4826 unsigned RegMask = 0;
4827 for (auto Reg : DRegs)
4828 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4829 RI->getEncodingValue(Reg: AArch64::ZAD0));
4830 Operands.push_back(
4831 Elt: AArch64Operand::CreateMatrixTileList(RegMask, S, E: getLoc(), Ctx&: getContext()));
4832
4833 return ParseStatus::Success;
4834}
4835
4836template <RegKind VectorKind>
4837ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4838 bool ExpectMatch) {
4839 MCAsmParser &Parser = getParser();
4840 if (!getTok().is(K: AsmToken::LCurly))
4841 return ParseStatus::NoMatch;
4842
4843 // Wrapper around parse function
4844 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4845 bool NoMatchIsError) -> ParseStatus {
4846 auto RegTok = getTok();
4847 auto ParseRes = tryParseVectorRegister(Reg, Kind, MatchKind: VectorKind);
4848 if (ParseRes.isSuccess()) {
4849 if (parseVectorKind(Suffix: Kind, VectorKind))
4850 return ParseRes;
4851 llvm_unreachable("Expected a valid vector kind");
4852 }
4853
4854 if (RegTok.is(K: AsmToken::Identifier) && ParseRes.isNoMatch() &&
4855 RegTok.getString().equals_insensitive(RHS: "zt0"))
4856 return ParseStatus::NoMatch;
4857
4858 if (RegTok.isNot(K: AsmToken::Identifier) || ParseRes.isFailure() ||
4859 (ParseRes.isNoMatch() && NoMatchIsError &&
4860 !RegTok.getString().starts_with_insensitive(Prefix: "za")))
4861 return Error(L: Loc, Msg: "vector register expected");
4862
4863 return ParseStatus::NoMatch;
4864 };
4865
4866 unsigned NumRegs = getNumRegsForRegKind(K: VectorKind);
4867 SMLoc S = getLoc();
4868 auto LCurly = getTok();
4869 Lex(); // Eat left bracket token.
4870
4871 StringRef Kind;
4872 MCRegister FirstReg;
4873 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4874
4875 // Put back the original left bracket if there was no match, so that
4876 // different types of list-operands can be matched (e.g. SVE, Neon).
4877 if (ParseRes.isNoMatch())
4878 Parser.getLexer().UnLex(Token: LCurly);
4879
4880 if (!ParseRes.isSuccess())
4881 return ParseRes;
4882
4883 MCRegister PrevReg = FirstReg;
4884 unsigned Count = 1;
4885
4886 unsigned Stride = 1;
4887 if (parseOptionalToken(T: AsmToken::Minus)) {
4888 SMLoc Loc = getLoc();
4889 StringRef NextKind;
4890
4891 MCRegister Reg;
4892 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4893 if (!ParseRes.isSuccess())
4894 return ParseRes;
4895
4896 // Any Kind suffices must match on all regs in the list.
4897 if (Kind != NextKind)
4898 return Error(L: Loc, Msg: "mismatched register size suffix");
4899
4900 unsigned Space =
4901 (PrevReg < Reg) ? (Reg - PrevReg) : (NumRegs - (PrevReg - Reg));
4902
4903 if (Space == 0 || Space > 3)
4904 return Error(L: Loc, Msg: "invalid number of vectors");
4905
4906 Count += Space;
4907 }
4908 else {
4909 bool HasCalculatedStride = false;
4910 while (parseOptionalToken(T: AsmToken::Comma)) {
4911 SMLoc Loc = getLoc();
4912 StringRef NextKind;
4913 MCRegister Reg;
4914 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4915 if (!ParseRes.isSuccess())
4916 return ParseRes;
4917
4918 // Any Kind suffices must match on all regs in the list.
4919 if (Kind != NextKind)
4920 return Error(L: Loc, Msg: "mismatched register size suffix");
4921
4922 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4923 unsigned PrevRegVal =
4924 getContext().getRegisterInfo()->getEncodingValue(Reg: PrevReg);
4925 if (!HasCalculatedStride) {
4926 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4927 : (NumRegs - (PrevRegVal - RegVal));
4928 HasCalculatedStride = true;
4929 }
4930
4931 // Register must be incremental (with a wraparound at last register).
4932 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4933 return Error(L: Loc, Msg: "registers must have the same sequential stride");
4934
4935 PrevReg = Reg;
4936 ++Count;
4937 }
4938 }
4939
4940 if (parseToken(T: AsmToken::RCurly, Msg: "'}' expected"))
4941 return ParseStatus::Failure;
4942
4943 if (Count > 4)
4944 return Error(L: S, Msg: "invalid number of vectors");
4945
4946 unsigned NumElements = 0;
4947 unsigned ElementWidth = 0;
4948 if (!Kind.empty()) {
4949 if (const auto &VK = parseVectorKind(Suffix: Kind, VectorKind))
4950 std::tie(args&: NumElements, args&: ElementWidth) = *VK;
4951 }
4952
4953 Operands.push_back(Elt: AArch64Operand::CreateVectorList(
4954 Reg: FirstReg, Count, Stride, NumElements, ElementWidth, RegisterKind: VectorKind, S,
4955 E: getLoc(), Ctx&: getContext()));
4956
4957 if (getTok().is(K: AsmToken::LBrac)) {
4958 ParseStatus Res = tryParseVectorIndex(Operands);
4959 if (Res.isFailure())
4960 return ParseStatus::Failure;
4961 return ParseStatus::Success;
4962 }
4963
4964 return ParseStatus::Success;
4965}
4966
4967/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4968bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4969 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, ExpectMatch: true);
4970 if (!ParseRes.isSuccess())
4971 return true;
4972
4973 return tryParseVectorIndex(Operands).isFailure();
4974}
4975
4976ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4977 SMLoc StartLoc = getLoc();
4978
4979 MCRegister RegNum;
4980 ParseStatus Res = tryParseScalarRegister(RegNum);
4981 if (!Res.isSuccess())
4982 return Res;
4983
4984 if (!parseOptionalToken(T: AsmToken::Comma)) {
4985 Operands.push_back(Elt: AArch64Operand::CreateReg(
4986 Reg: RegNum, Kind: RegKind::Scalar, S: StartLoc, E: getLoc(), Ctx&: getContext()));
4987 return ParseStatus::Success;
4988 }
4989
4990 parseOptionalToken(T: AsmToken::Hash);
4991
4992 if (getTok().isNot(K: AsmToken::Integer))
4993 return Error(L: getLoc(), Msg: "index must be absent or #0");
4994
4995 const MCExpr *ImmVal;
4996 if (getParser().parseExpression(Res&: ImmVal) || !isa<MCConstantExpr>(Val: ImmVal) ||
4997 cast<MCConstantExpr>(Val: ImmVal)->getValue() != 0)
4998 return Error(L: getLoc(), Msg: "index must be absent or #0");
4999
5000 Operands.push_back(Elt: AArch64Operand::CreateReg(
5001 Reg: RegNum, Kind: RegKind::Scalar, S: StartLoc, E: getLoc(), Ctx&: getContext()));
5002 return ParseStatus::Success;
5003}
5004
5005ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
5006 SMLoc StartLoc = getLoc();
5007 const AsmToken &Tok = getTok();
5008 std::string Name = Tok.getString().lower();
5009
5010 MCRegister Reg = matchRegisterNameAlias(Name, Kind: RegKind::LookupTable);
5011
5012 if (!Reg)
5013 return ParseStatus::NoMatch;
5014
5015 Operands.push_back(Elt: AArch64Operand::CreateReg(
5016 Reg, Kind: RegKind::LookupTable, S: StartLoc, E: getLoc(), Ctx&: getContext()));
5017 Lex(); // Eat register.
5018
5019 // Check if register is followed by an index
5020 if (parseOptionalToken(T: AsmToken::LBrac)) {
5021 Operands.push_back(
5022 Elt: AArch64Operand::CreateToken(Str: "[", S: getLoc(), Ctx&: getContext()));
5023 const MCExpr *ImmVal;
5024 if (getParser().parseExpression(Res&: ImmVal))
5025 return ParseStatus::NoMatch;
5026 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
5027 if (!MCE)
5028 return TokError(Msg: "immediate value expected for vector index");
5029 Operands.push_back(Elt: AArch64Operand::CreateImm(
5030 Val: MCConstantExpr::create(Value: MCE->getValue(), Ctx&: getContext()), S: StartLoc,
5031 E: getLoc(), Ctx&: getContext()));
5032 if (parseOptionalToken(T: AsmToken::Comma))
5033 if (parseOptionalMulOperand(Operands))
5034 return ParseStatus::Failure;
5035 if (parseToken(T: AsmToken::RBrac, Msg: "']' expected"))
5036 return ParseStatus::Failure;
5037 Operands.push_back(
5038 Elt: AArch64Operand::CreateToken(Str: "]", S: getLoc(), Ctx&: getContext()));
5039 }
5040 return ParseStatus::Success;
5041}
5042
5043template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
5044ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
5045 SMLoc StartLoc = getLoc();
5046
5047 MCRegister RegNum;
5048 ParseStatus Res = tryParseScalarRegister(RegNum);
5049 if (!Res.isSuccess())
5050 return Res;
5051
5052 // No shift/extend is the default.
5053 if (!ParseShiftExtend || getTok().isNot(K: AsmToken::Comma)) {
5054 Operands.push_back(Elt: AArch64Operand::CreateReg(
5055 Reg: RegNum, Kind: RegKind::Scalar, S: StartLoc, E: getLoc(), Ctx&: getContext(), EqTy));
5056 return ParseStatus::Success;
5057 }
5058
5059 // Eat the comma
5060 Lex();
5061
5062 // Match the shift
5063 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
5064 Res = tryParseOptionalShiftExtend(Operands&: ExtOpnd);
5065 if (!Res.isSuccess())
5066 return Res;
5067
5068 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
5069 Operands.push_back(Elt: AArch64Operand::CreateReg(
5070 Reg: RegNum, Kind: RegKind::Scalar, S: StartLoc, E: Ext->getEndLoc(), Ctx&: getContext(), EqTy,
5071 ExtTy: Ext->getShiftExtendType(), ShiftAmount: Ext->getShiftExtendAmount(),
5072 HasExplicitAmount: Ext->hasShiftExtendAmount()));
5073
5074 return ParseStatus::Success;
5075}
5076
5077bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
5078 MCAsmParser &Parser = getParser();
5079
5080 // Some SVE instructions have a decoration after the immediate, i.e.
5081 // "mul vl". We parse them here and add tokens, which must be present in the
5082 // asm string in the tablegen instruction.
5083 bool NextIsVL =
5084 Parser.getLexer().peekTok().getString().equals_insensitive(RHS: "vl");
5085 bool NextIsHash = Parser.getLexer().peekTok().is(K: AsmToken::Hash);
5086 if (!getTok().getString().equals_insensitive(RHS: "mul") ||
5087 !(NextIsVL || NextIsHash))
5088 return true;
5089
5090 Operands.push_back(
5091 Elt: AArch64Operand::CreateToken(Str: "mul", S: getLoc(), Ctx&: getContext()));
5092 Lex(); // Eat the "mul"
5093
5094 if (NextIsVL) {
5095 Operands.push_back(
5096 Elt: AArch64Operand::CreateToken(Str: "vl", S: getLoc(), Ctx&: getContext()));
5097 Lex(); // Eat the "vl"
5098 return false;
5099 }
5100
5101 if (NextIsHash) {
5102 Lex(); // Eat the #
5103 SMLoc S = getLoc();
5104
5105 // Parse immediate operand.
5106 const MCExpr *ImmVal;
5107 if (!Parser.parseExpression(Res&: ImmVal))
5108 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal)) {
5109 Operands.push_back(Elt: AArch64Operand::CreateImm(
5110 Val: MCConstantExpr::create(Value: MCE->getValue(), Ctx&: getContext()), S, E: getLoc(),
5111 Ctx&: getContext()));
5112 return false;
5113 }
5114 }
5115
5116 return Error(L: getLoc(), Msg: "expected 'vl' or '#<imm>'");
5117}
5118
5119bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
5120 StringRef &VecGroup) {
5121 MCAsmParser &Parser = getParser();
5122 auto Tok = Parser.getTok();
5123 if (Tok.isNot(K: AsmToken::Identifier))
5124 return true;
5125
5126 StringRef VG = StringSwitch<StringRef>(Tok.getString().lower())
5127 .Case(S: "vgx2", Value: "vgx2")
5128 .Case(S: "vgx4", Value: "vgx4")
5129 .Default(Value: "");
5130
5131 if (VG.empty())
5132 return true;
5133
5134 VecGroup = VG;
5135 Parser.Lex(); // Eat vgx[2|4]
5136 return false;
5137}
5138
5139bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
5140 auto Tok = getTok();
5141 if (Tok.isNot(K: AsmToken::Identifier))
5142 return true;
5143
5144 auto Keyword = Tok.getString();
5145 Keyword = StringSwitch<StringRef>(Keyword.lower())
5146 .Case(S: "sm", Value: "sm")
5147 .Case(S: "za", Value: "za")
5148 .Default(Value: Keyword);
5149 Operands.push_back(
5150 Elt: AArch64Operand::CreateToken(Str: Keyword, S: Tok.getLoc(), Ctx&: getContext()));
5151
5152 Lex();
5153 return false;
5154}
5155
5156/// parseOperand - Parse a arm instruction operand. For now this parses the
5157/// operand regardless of the mnemonic.
5158bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
5159 bool invertCondCode) {
5160 MCAsmParser &Parser = getParser();
5161
5162 ParseStatus ResTy =
5163 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
5164
5165 // Check if the current operand has a custom associated parser, if so, try to
5166 // custom parse the operand, or fallback to the general approach.
5167 if (ResTy.isSuccess())
5168 return false;
5169 // If there wasn't a custom match, try the generic matcher below. Otherwise,
5170 // there was a match, but an error occurred, in which case, just return that
5171 // the operand parsing failed.
5172 if (ResTy.isFailure())
5173 return true;
5174
5175 // Nothing custom, so do general case parsing.
5176 SMLoc S, E;
5177 auto parseOptionalShiftExtend = [&](AsmToken SavedTok) {
5178 if (parseOptionalToken(T: AsmToken::Comma)) {
5179 ParseStatus Res = tryParseOptionalShiftExtend(Operands);
5180 if (!Res.isNoMatch())
5181 return Res.isFailure();
5182 getLexer().UnLex(Token: SavedTok);
5183 }
5184 return false;
5185 };
5186 switch (getLexer().getKind()) {
5187 default: {
5188 SMLoc S = getLoc();
5189 const MCExpr *Expr;
5190 if (parseSymbolicImmVal(ImmVal&: Expr))
5191 return Error(L: S, Msg: "invalid operand");
5192
5193 SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
5194 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: Expr, S, E, Ctx&: getContext()));
5195 return parseOptionalShiftExtend(getTok());
5196 }
5197 case AsmToken::LBrac: {
5198 Operands.push_back(
5199 Elt: AArch64Operand::CreateToken(Str: "[", S: getLoc(), Ctx&: getContext()));
5200 Lex(); // Eat '['
5201
5202 // There's no comma after a '[', so we can parse the next operand
5203 // immediately.
5204 return parseOperand(Operands, isCondCode: false, invertCondCode: false);
5205 }
5206 case AsmToken::LCurly: {
5207 if (!parseNeonVectorList(Operands))
5208 return false;
5209
5210 Operands.push_back(
5211 Elt: AArch64Operand::CreateToken(Str: "{", S: getLoc(), Ctx&: getContext()));
5212 Lex(); // Eat '{'
5213
5214 // There's no comma after a '{', so we can parse the next operand
5215 // immediately.
5216 return parseOperand(Operands, isCondCode: false, invertCondCode: false);
5217 }
5218 case AsmToken::Identifier: {
5219 // See if this is a "VG" decoration used by SME instructions.
5220 StringRef VecGroup;
5221 if (!parseOptionalVGOperand(Operands, VecGroup)) {
5222 Operands.push_back(
5223 Elt: AArch64Operand::CreateToken(Str: VecGroup, S: getLoc(), Ctx&: getContext()));
5224 return false;
5225 }
5226 // If we're expecting a Condition Code operand, then just parse that.
5227 if (isCondCode)
5228 return parseCondCode(Operands, invertCondCode);
5229
5230 // If it's a register name, parse it.
5231 if (!parseRegister(Operands)) {
5232 // Parse an optional shift/extend modifier.
5233 AsmToken SavedTok = getTok();
5234 if (parseOptionalToken(T: AsmToken::Comma)) {
5235 // The operand after the register may be a label (e.g. ADR/ADRP). Check
5236 // such cases and don't report an error when <label> happens to match a
5237 // shift/extend modifier.
5238 ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic,
5239 /*ParseForAllFeatures=*/true);
5240 if (!Res.isNoMatch())
5241 return Res.isFailure();
5242 Res = tryParseOptionalShiftExtend(Operands);
5243 if (!Res.isNoMatch())
5244 return Res.isFailure();
5245 getLexer().UnLex(Token: SavedTok);
5246 }
5247 return false;
5248 }
5249
5250 // See if this is a "mul vl" decoration or "mul #<int>" operand used
5251 // by SVE instructions.
5252 if (!parseOptionalMulOperand(Operands))
5253 return false;
5254
5255 // If this is a two-word mnemonic, parse its special keyword
5256 // operand as an identifier.
5257 if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
5258 Mnemonic == "gcsb")
5259 return parseKeywordOperand(Operands);
5260
5261 // This was not a register so parse other operands that start with an
5262 // identifier (like labels) as expressions and create them as immediates.
5263 const MCExpr *IdVal, *Term;
5264 S = getLoc();
5265 if (getParser().parseExpression(Res&: IdVal))
5266 return true;
5267 if (getParser().parseAtSpecifier(Res&: IdVal, EndLoc&: E))
5268 return true;
5269 std::optional<MCBinaryExpr::Opcode> Opcode;
5270 if (parseOptionalToken(T: AsmToken::Plus))
5271 Opcode = MCBinaryExpr::Add;
5272 else if (parseOptionalToken(T: AsmToken::Minus))
5273 Opcode = MCBinaryExpr::Sub;
5274 if (Opcode) {
5275 if (getParser().parsePrimaryExpr(Res&: Term, EndLoc&: E))
5276 return true;
5277 IdVal = MCBinaryExpr::create(Op: *Opcode, LHS: IdVal, RHS: Term, Ctx&: getContext());
5278 }
5279 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: IdVal, S, E, Ctx&: getContext()));
5280
5281 // Parse an optional shift/extend modifier.
5282 return parseOptionalShiftExtend(getTok());
5283 }
5284 case AsmToken::Integer:
5285 case AsmToken::Real:
5286 case AsmToken::Hash: {
5287 // #42 -> immediate.
5288 S = getLoc();
5289
5290 parseOptionalToken(T: AsmToken::Hash);
5291
5292 // Parse a negative sign
5293 bool isNegative = false;
5294 if (getTok().is(K: AsmToken::Minus)) {
5295 isNegative = true;
5296 // We need to consume this token only when we have a Real, otherwise
5297 // we let parseSymbolicImmVal take care of it
5298 if (Parser.getLexer().peekTok().is(K: AsmToken::Real))
5299 Lex();
5300 }
5301
5302 // The only Real that should come through here is a literal #0.0 for
5303 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
5304 // so convert the value.
5305 const AsmToken &Tok = getTok();
5306 if (Tok.is(K: AsmToken::Real)) {
5307 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
5308 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5309 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
5310 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
5311 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
5312 return TokError(Msg: "unexpected floating point literal");
5313 else if (IntVal != 0 || isNegative)
5314 return TokError(Msg: "expected floating-point constant #0.0");
5315 Lex(); // Eat the token.
5316
5317 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: "#0", S, Ctx&: getContext()));
5318 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: ".0", S, Ctx&: getContext()));
5319 return false;
5320 }
5321
5322 const MCExpr *ImmVal;
5323 if (parseSymbolicImmVal(ImmVal))
5324 return true;
5325
5326 E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
5327 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: ImmVal, S, E, Ctx&: getContext()));
5328
5329 // Parse an optional shift/extend modifier.
5330 return parseOptionalShiftExtend(Tok);
5331 }
5332 case AsmToken::Equal: {
5333 SMLoc Loc = getLoc();
5334 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
5335 return TokError(Msg: "unexpected token in operand");
5336 Lex(); // Eat '='
5337 const MCExpr *SubExprVal;
5338 if (getParser().parseExpression(Res&: SubExprVal))
5339 return true;
5340
5341 if (Operands.size() < 2 ||
5342 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
5343 return Error(L: Loc, Msg: "Only valid when first operand is register");
5344
5345 bool IsXReg =
5346 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5347 Reg: Operands[1]->getReg());
5348
5349 MCContext& Ctx = getContext();
5350 E = SMLoc::getFromPointer(Ptr: Loc.getPointer() - 1);
5351 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
5352 if (isa<MCConstantExpr>(Val: SubExprVal)) {
5353 uint64_t Imm = (cast<MCConstantExpr>(Val: SubExprVal))->getValue();
5354 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
5355 while (Imm > 0xFFFF && llvm::countr_zero(Val: Imm) >= 16) {
5356 ShiftAmt += 16;
5357 Imm >>= 16;
5358 }
5359 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
5360 Operands[0] = AArch64Operand::CreateToken(Str: "movz", S: Loc, Ctx);
5361 Operands.push_back(Elt: AArch64Operand::CreateImm(
5362 Val: MCConstantExpr::create(Value: Imm, Ctx), S, E, Ctx));
5363 if (ShiftAmt)
5364 Operands.push_back(Elt: AArch64Operand::CreateShiftExtend(ShOp: AArch64_AM::LSL,
5365 Val: ShiftAmt, HasExplicitAmount: true, S, E, Ctx));
5366 return false;
5367 }
5368 APInt Simm = APInt(64, Imm << ShiftAmt);
5369 // check if the immediate is an unsigned or signed 32-bit int for W regs
5370 if (!IsXReg && !(Simm.isIntN(N: 32) || Simm.isSignedIntN(N: 32)))
5371 return Error(L: Loc, Msg: "Immediate too large for register");
5372 }
5373 // If it is a label or an imm that cannot fit in a movz, put it into CP.
5374 const MCExpr *CPLoc =
5375 getTargetStreamer().addConstantPoolEntry(SubExprVal, Size: IsXReg ? 8 : 4, Loc);
5376 Operands.push_back(Elt: AArch64Operand::CreateImm(Val: CPLoc, S, E, Ctx));
5377 return false;
5378 }
5379 }
5380}
5381
5382bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
5383 const MCExpr *Expr = nullptr;
5384 SMLoc L = getLoc();
5385 if (check(P: getParser().parseExpression(Res&: Expr), Loc: L, Msg: "expected expression"))
5386 return true;
5387 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Val: Expr);
5388 if (check(P: !Value, Loc: L, Msg: "expected constant expression"))
5389 return true;
5390 Out = Value->getValue();
5391 return false;
5392}
5393
5394bool AArch64AsmParser::parseComma() {
5395 if (check(P: getTok().isNot(K: AsmToken::Comma), Loc: getLoc(), Msg: "expected comma"))
5396 return true;
5397 // Eat the comma
5398 Lex();
5399 return false;
5400}
5401
5402bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
5403 unsigned First, unsigned Last) {
5404 MCRegister Reg;
5405 SMLoc Start, End;
5406 if (check(P: parseRegister(Reg, StartLoc&: Start, EndLoc&: End), Loc: getLoc(), Msg: "expected register"))
5407 return true;
5408
5409 // Special handling for FP and LR; they aren't linearly after x28 in
5410 // the registers enum.
5411 unsigned RangeEnd = Last;
5412 if (Base == AArch64::X0) {
5413 if (Last == AArch64::FP) {
5414 RangeEnd = AArch64::X28;
5415 if (Reg == AArch64::FP) {
5416 Out = 29;
5417 return false;
5418 }
5419 }
5420 if (Last == AArch64::LR) {
5421 RangeEnd = AArch64::X28;
5422 if (Reg == AArch64::FP) {
5423 Out = 29;
5424 return false;
5425 } else if (Reg == AArch64::LR) {
5426 Out = 30;
5427 return false;
5428 }
5429 }
5430 }
5431
5432 if (check(P: Reg < First || Reg > RangeEnd, Loc: Start,
5433 Msg: Twine("expected register in range ") +
5434 AArch64InstPrinter::getRegisterName(Reg: First) + " to " +
5435 AArch64InstPrinter::getRegisterName(Reg: Last)))
5436 return true;
5437 Out = Reg - Base;
5438 return false;
5439}
5440
5441bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1,
5442 const MCParsedAsmOperand &Op2) const {
5443 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
5444 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
5445
5446 if (AOp1.isVectorList() && AOp2.isVectorList())
5447 return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
5448 AOp1.getVectorListStart() == AOp2.getVectorListStart() &&
5449 AOp1.getVectorListStride() == AOp2.getVectorListStride();
5450
5451 if (!AOp1.isReg() || !AOp2.isReg())
5452 return false;
5453
5454 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
5455 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
5456 return MCTargetAsmParser::areEqualRegs(Op1, Op2);
5457
5458 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
5459 "Testing equality of non-scalar registers not supported");
5460
5461 // Check if a registers match their sub/super register classes.
5462 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
5463 return getXRegFromWReg(Reg: Op1.getReg()) == Op2.getReg();
5464 if (AOp1.getRegEqualityTy() == EqualsSubReg)
5465 return getWRegFromXReg(Reg: Op1.getReg()) == Op2.getReg();
5466 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
5467 return getXRegFromWReg(Reg: Op2.getReg()) == Op1.getReg();
5468 if (AOp2.getRegEqualityTy() == EqualsSubReg)
5469 return getWRegFromXReg(Reg: Op2.getReg()) == Op1.getReg();
5470
5471 return false;
5472}
5473
5474/// Parse an AArch64 instruction mnemonic followed by its operands.
5475bool AArch64AsmParser::parseInstruction(ParseInstructionInfo &Info,
5476 StringRef Name, SMLoc NameLoc,
5477 OperandVector &Operands) {
5478 Name = StringSwitch<StringRef>(Name.lower())
5479 .Case(S: "beq", Value: "b.eq")
5480 .Case(S: "bne", Value: "b.ne")
5481 .Case(S: "bhs", Value: "b.hs")
5482 .Case(S: "bcs", Value: "b.cs")
5483 .Case(S: "blo", Value: "b.lo")
5484 .Case(S: "bcc", Value: "b.cc")
5485 .Case(S: "bmi", Value: "b.mi")
5486 .Case(S: "bpl", Value: "b.pl")
5487 .Case(S: "bvs", Value: "b.vs")
5488 .Case(S: "bvc", Value: "b.vc")
5489 .Case(S: "bhi", Value: "b.hi")
5490 .Case(S: "bls", Value: "b.ls")
5491 .Case(S: "bge", Value: "b.ge")
5492 .Case(S: "blt", Value: "b.lt")
5493 .Case(S: "bgt", Value: "b.gt")
5494 .Case(S: "ble", Value: "b.le")
5495 .Case(S: "bal", Value: "b.al")
5496 .Case(S: "bnv", Value: "b.nv")
5497 .Default(Value: Name);
5498
5499 // First check for the AArch64-specific .req directive.
5500 if (getTok().is(K: AsmToken::Identifier) &&
5501 getTok().getIdentifier().lower() == ".req") {
5502 parseDirectiveReq(Name, L: NameLoc);
5503 // We always return 'error' for this, as we're done with this
5504 // statement and don't need to match the 'instruction."
5505 return true;
5506 }
5507
5508 // Create the leading tokens for the mnemonic, split by '.' characters.
5509 size_t Start = 0, Next = Name.find(C: '.');
5510 StringRef Head = Name.slice(Start, End: Next);
5511
5512 // IC, DC, AT, TLBI, MLBI, PLBI, GIC{R}, GSB and Prediction invalidation
5513 // instructions are aliases for the SYS instruction.
5514 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
5515 Head == "cfp" || Head == "dvp" || Head == "cpp" || Head == "cosp" ||
5516 Head == "mlbi" || Head == "plbi" || Head == "gic" || Head == "gsb")
5517 return parseSysAlias(Name: Head, NameLoc, Operands);
5518
5519 // GICR instructions are aliases for the SYSL instruction.
5520 if (Head == "gicr")
5521 return parseSyslAlias(Name: Head, NameLoc, Operands);
5522
5523 // TLBIP instructions are aliases for the SYSP instruction.
5524 if (Head == "tlbip")
5525 return parseSyspAlias(Name: Head, NameLoc, Operands);
5526
5527 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: Head, S: NameLoc, Ctx&: getContext()));
5528 Mnemonic = Head;
5529
5530 // Handle condition codes for a branch mnemonic
5531 if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
5532 Start = Next;
5533 Next = Name.find(C: '.', From: Start + 1);
5534 Head = Name.slice(Start: Start + 1, End: Next);
5535
5536 SMLoc SuffixLoc = SMLoc::getFromPointer(Ptr: NameLoc.getPointer() +
5537 (Head.data() - Name.data()));
5538 std::string Suggestion;
5539 AArch64CC::CondCode CC = parseCondCodeString(Cond: Head, Suggestion);
5540 if (CC == AArch64CC::Invalid) {
5541 std::string Msg = "invalid condition code";
5542 if (!Suggestion.empty())
5543 Msg += ", did you mean " + Suggestion + "?";
5544 return Error(L: SuffixLoc, Msg);
5545 }
5546 Operands.push_back(Elt: AArch64Operand::CreateToken(Str: ".", S: SuffixLoc, Ctx&: getContext(),
5547 /*IsSuffix=*/true));
5548 Operands.push_back(
5549 Elt: AArch64Operand::CreateCondCode(Code: CC, S: NameLoc, E: NameLoc, Ctx&: getContext()));
5550 }
5551
5552 // Add the remaining tokens in the mnemonic.
5553 while (Next != StringRef::npos) {
5554 Start = Next;
5555 Next = Name.find(C: '.', From: Start + 1);
5556 Head = Name.slice(Start, End: Next);
5557 SMLoc SuffixLoc = SMLoc::getFromPointer(Ptr: NameLoc.getPointer() +
5558 (Head.data() - Name.data()) + 1);
5559 Operands.push_back(Elt: AArch64Operand::CreateToken(
5560 Str: Head, S: SuffixLoc, Ctx&: getContext(), /*IsSuffix=*/true));
5561 }
5562
5563 // Conditional compare instructions have a Condition Code operand, which needs
5564 // to be parsed and an immediate operand created.
5565 bool condCodeFourthOperand =
5566 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
5567 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
5568 Head == "csinc" || Head == "csinv" || Head == "csneg");
5569
5570 // These instructions are aliases to some of the conditional select
5571 // instructions. However, the condition code is inverted in the aliased
5572 // instruction.
5573 //
5574 // FIXME: Is this the correct way to handle these? Or should the parser
5575 // generate the aliased instructions directly?
5576 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
5577 bool condCodeThirdOperand =
5578 (Head == "cinc" || Head == "cinv" || Head == "cneg");
5579
5580 // Read the remaining operands.
5581 if (getLexer().isNot(K: AsmToken::EndOfStatement)) {
5582
5583 unsigned N = 1;
5584 do {
5585 // Parse and remember the operand.
5586 if (parseOperand(Operands, isCondCode: (N == 4 && condCodeFourthOperand) ||
5587 (N == 3 && condCodeThirdOperand) ||
5588 (N == 2 && condCodeSecondOperand),
5589 invertCondCode: condCodeSecondOperand || condCodeThirdOperand)) {
5590 return true;
5591 }
5592
5593 // After successfully parsing some operands there are three special cases
5594 // to consider (i.e. notional operands not separated by commas). Two are
5595 // due to memory specifiers:
5596 // + An RBrac will end an address for load/store/prefetch
5597 // + An '!' will indicate a pre-indexed operation.
5598 //
5599 // And a further case is '}', which ends a group of tokens specifying the
5600 // SME accumulator array 'ZA' or tile vector, i.e.
5601 //
5602 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
5603 //
5604 // It's someone else's responsibility to make sure these tokens are sane
5605 // in the given context!
5606
5607 if (parseOptionalToken(T: AsmToken::RBrac))
5608 Operands.push_back(
5609 Elt: AArch64Operand::CreateToken(Str: "]", S: getLoc(), Ctx&: getContext()));
5610 if (parseOptionalToken(T: AsmToken::Exclaim))
5611 Operands.push_back(
5612 Elt: AArch64Operand::CreateToken(Str: "!", S: getLoc(), Ctx&: getContext()));
5613 if (parseOptionalToken(T: AsmToken::RCurly))
5614 Operands.push_back(
5615 Elt: AArch64Operand::CreateToken(Str: "}", S: getLoc(), Ctx&: getContext()));
5616
5617 ++N;
5618 } while (parseOptionalToken(T: AsmToken::Comma));
5619 }
5620
5621 if (parseToken(T: AsmToken::EndOfStatement, Msg: "unexpected token in argument list"))
5622 return true;
5623
5624 return false;
5625}
5626
5627static inline bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg) {
5628 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
5629 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
5630 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
5631 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
5632 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
5633 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
5634 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
5635}
5636
5637// FIXME: This entire function is a giant hack to provide us with decent
5638// operand range validation/diagnostics until TableGen/MC can be extended
5639// to support autogeneration of this kind of validation.
5640bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
5641 SmallVectorImpl<SMLoc> &Loc) {
5642 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5643 const MCInstrDesc &MCID = MII.get(Opcode: Inst.getOpcode());
5644
5645 // A prefix only applies to the instruction following it. Here we extract
5646 // prefix information for the next instruction before validating the current
5647 // one so that in the case of failure we don't erroneously continue using the
5648 // current prefix.
5649 PrefixInfo Prefix = NextPrefix;
5650 NextPrefix = PrefixInfo::CreateFromInst(Inst, TSFlags: MCID.TSFlags);
5651
5652 // Before validating the instruction in isolation we run through the rules
5653 // applicable when it follows a prefix instruction.
5654 // NOTE: brk & hlt can be prefixed but require no additional validation.
5655 if (Prefix.isActive() &&
5656 (Inst.getOpcode() != AArch64::BRK) &&
5657 (Inst.getOpcode() != AArch64::HLT)) {
5658
5659 // Prefixed instructions must have a destructive operand.
5660 if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
5661 AArch64::NotDestructive)
5662 return Error(L: IDLoc, Msg: "instruction is unpredictable when following a"
5663 " movprfx, suggest replacing movprfx with mov");
5664
5665 // Destination operands must match.
5666 if (Inst.getOperand(i: 0).getReg() != Prefix.getDstReg())
5667 return Error(L: Loc[0], Msg: "instruction is unpredictable when following a"
5668 " movprfx writing to a different destination");
5669
5670 // Destination operand must not be used in any other location.
5671 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
5672 if (Inst.getOperand(i).isReg() &&
5673 (MCID.getOperandConstraint(OpNum: i, Constraint: MCOI::TIED_TO) == -1) &&
5674 isMatchingOrAlias(ZReg: Prefix.getDstReg(), Reg: Inst.getOperand(i).getReg()))
5675 return Error(L: Loc[0], Msg: "instruction is unpredictable when following a"
5676 " movprfx and destination also used as non-destructive"
5677 " source");
5678 }
5679
5680 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
5681 if (Prefix.isPredicated()) {
5682 int PgIdx = -1;
5683
5684 // Find the instructions general predicate.
5685 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
5686 if (Inst.getOperand(i).isReg() &&
5687 PPRRegClass.contains(Reg: Inst.getOperand(i).getReg())) {
5688 PgIdx = i;
5689 break;
5690 }
5691
5692 // Instruction must be predicated if the movprfx is predicated.
5693 if (PgIdx == -1 ||
5694 (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
5695 return Error(L: IDLoc, Msg: "instruction is unpredictable when following a"
5696 " predicated movprfx, suggest using unpredicated movprfx");
5697
5698 // Instruction must use same general predicate as the movprfx.
5699 if (Inst.getOperand(i: PgIdx).getReg() != Prefix.getPgReg())
5700 return Error(L: IDLoc, Msg: "instruction is unpredictable when following a"
5701 " predicated movprfx using a different general predicate");
5702
5703 // Instruction element type must match the movprfx.
5704 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
5705 return Error(L: IDLoc, Msg: "instruction is unpredictable when following a"
5706 " predicated movprfx with a different element size");
5707 }
5708 }
5709
5710 // On ARM64EC, only valid registers may be used. Warn against using
5711 // explicitly disallowed registers.
5712 if (IsWindowsArm64EC) {
5713 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) {
5714 if (Inst.getOperand(i).isReg()) {
5715 MCRegister Reg = Inst.getOperand(i).getReg();
5716 // At this point, vector registers are matched to their
5717 // appropriately sized alias.
5718 if ((Reg == AArch64::W13 || Reg == AArch64::X13) ||
5719 (Reg == AArch64::W14 || Reg == AArch64::X14) ||
5720 (Reg == AArch64::W23 || Reg == AArch64::X23) ||
5721 (Reg == AArch64::W24 || Reg == AArch64::X24) ||
5722 (Reg == AArch64::W28 || Reg == AArch64::X28) ||
5723 (Reg >= AArch64::Q16 && Reg <= AArch64::Q31) ||
5724 (Reg >= AArch64::D16 && Reg <= AArch64::D31) ||
5725 (Reg >= AArch64::S16 && Reg <= AArch64::S31) ||
5726 (Reg >= AArch64::H16 && Reg <= AArch64::H31) ||
5727 (Reg >= AArch64::B16 && Reg <= AArch64::B31)) {
5728 Warning(L: IDLoc, Msg: "register " + Twine(RI->getName(RegNo: Reg)) +
5729 " is disallowed on ARM64EC.");
5730 }
5731 }
5732 }
5733 }
5734
5735 // Check for indexed addressing modes w/ the base register being the
5736 // same as a destination/source register or pair load where
5737 // the Rt == Rt2. All of those are undefined behaviour.
5738 switch (Inst.getOpcode()) {
5739 case AArch64::LDPSWpre:
5740 case AArch64::LDPWpost:
5741 case AArch64::LDPWpre:
5742 case AArch64::LDPXpost:
5743 case AArch64::LDPXpre: {
5744 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5745 MCRegister Rt2 = Inst.getOperand(i: 2).getReg();
5746 MCRegister Rn = Inst.getOperand(i: 3).getReg();
5747 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt))
5748 return Error(L: Loc[0], Msg: "unpredictable LDP instruction, writeback base "
5749 "is also a destination");
5750 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt2))
5751 return Error(L: Loc[1], Msg: "unpredictable LDP instruction, writeback base "
5752 "is also a destination");
5753 [[fallthrough]];
5754 }
5755 case AArch64::LDR_ZA:
5756 case AArch64::STR_ZA: {
5757 if (Inst.getOperand(i: 2).isImm() && Inst.getOperand(i: 4).isImm() &&
5758 Inst.getOperand(i: 2).getImm() != Inst.getOperand(i: 4).getImm())
5759 return Error(L: Loc[1],
5760 Msg: "unpredictable instruction, immediate and offset mismatch.");
5761 break;
5762 }
5763 case AArch64::LDPDi:
5764 case AArch64::LDPQi:
5765 case AArch64::LDPSi:
5766 case AArch64::LDPSWi:
5767 case AArch64::LDPWi:
5768 case AArch64::LDPXi: {
5769 MCRegister Rt = Inst.getOperand(i: 0).getReg();
5770 MCRegister Rt2 = Inst.getOperand(i: 1).getReg();
5771 if (Rt == Rt2)
5772 return Error(L: Loc[1], Msg: "unpredictable LDP instruction, Rt2==Rt");
5773 break;
5774 }
5775 case AArch64::LDPDpost:
5776 case AArch64::LDPDpre:
5777 case AArch64::LDPQpost:
5778 case AArch64::LDPQpre:
5779 case AArch64::LDPSpost:
5780 case AArch64::LDPSpre:
5781 case AArch64::LDPSWpost: {
5782 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5783 MCRegister Rt2 = Inst.getOperand(i: 2).getReg();
5784 if (Rt == Rt2)
5785 return Error(L: Loc[1], Msg: "unpredictable LDP instruction, Rt2==Rt");
5786 break;
5787 }
5788 case AArch64::STPDpost:
5789 case AArch64::STPDpre:
5790 case AArch64::STPQpost:
5791 case AArch64::STPQpre:
5792 case AArch64::STPSpost:
5793 case AArch64::STPSpre:
5794 case AArch64::STPWpost:
5795 case AArch64::STPWpre:
5796 case AArch64::STPXpost:
5797 case AArch64::STPXpre: {
5798 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5799 MCRegister Rt2 = Inst.getOperand(i: 2).getReg();
5800 MCRegister Rn = Inst.getOperand(i: 3).getReg();
5801 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt))
5802 return Error(L: Loc[0], Msg: "unpredictable STP instruction, writeback base "
5803 "is also a source");
5804 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt2))
5805 return Error(L: Loc[1], Msg: "unpredictable STP instruction, writeback base "
5806 "is also a source");
5807 break;
5808 }
5809 case AArch64::LDRBBpre:
5810 case AArch64::LDRBpre:
5811 case AArch64::LDRHHpre:
5812 case AArch64::LDRHpre:
5813 case AArch64::LDRSBWpre:
5814 case AArch64::LDRSBXpre:
5815 case AArch64::LDRSHWpre:
5816 case AArch64::LDRSHXpre:
5817 case AArch64::LDRSWpre:
5818 case AArch64::LDRWpre:
5819 case AArch64::LDRXpre:
5820 case AArch64::LDRBBpost:
5821 case AArch64::LDRBpost:
5822 case AArch64::LDRHHpost:
5823 case AArch64::LDRHpost:
5824 case AArch64::LDRSBWpost:
5825 case AArch64::LDRSBXpost:
5826 case AArch64::LDRSHWpost:
5827 case AArch64::LDRSHXpost:
5828 case AArch64::LDRSWpost:
5829 case AArch64::LDRWpost:
5830 case AArch64::LDRXpost: {
5831 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5832 MCRegister Rn = Inst.getOperand(i: 2).getReg();
5833 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt))
5834 return Error(L: Loc[0], Msg: "unpredictable LDR instruction, writeback base "
5835 "is also a source");
5836 break;
5837 }
5838 case AArch64::STRBBpost:
5839 case AArch64::STRBpost:
5840 case AArch64::STRHHpost:
5841 case AArch64::STRHpost:
5842 case AArch64::STRWpost:
5843 case AArch64::STRXpost:
5844 case AArch64::STRBBpre:
5845 case AArch64::STRBpre:
5846 case AArch64::STRHHpre:
5847 case AArch64::STRHpre:
5848 case AArch64::STRWpre:
5849 case AArch64::STRXpre: {
5850 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5851 MCRegister Rn = Inst.getOperand(i: 2).getReg();
5852 if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt))
5853 return Error(L: Loc[0], Msg: "unpredictable STR instruction, writeback base "
5854 "is also a source");
5855 break;
5856 }
5857 case AArch64::STXRB:
5858 case AArch64::STXRH:
5859 case AArch64::STXRW:
5860 case AArch64::STXRX:
5861 case AArch64::STLXRB:
5862 case AArch64::STLXRH:
5863 case AArch64::STLXRW:
5864 case AArch64::STLXRX: {
5865 MCRegister Rs = Inst.getOperand(i: 0).getReg();
5866 MCRegister Rt = Inst.getOperand(i: 1).getReg();
5867 MCRegister Rn = Inst.getOperand(i: 2).getReg();
5868 if (RI->isSubRegisterEq(RegA: Rt, RegB: Rs) ||
5869 (RI->isSubRegisterEq(RegA: Rn, RegB: Rs) && Rn != AArch64::SP))
5870 return Error(L: Loc[0],
5871 Msg: "unpredictable STXR instruction, status is also a source");
5872 break;
5873 }
5874 case AArch64::STXPW:
5875 case AArch64::STXPX:
5876 case AArch64::STLXPW:
5877 case AArch64::STLXPX: {
5878 MCRegister Rs = Inst.getOperand(i: 0).getReg();
5879 MCRegister Rt1 = Inst.getOperand(i: 1).getReg();
5880 MCRegister Rt2 = Inst.getOperand(i: 2).getReg();
5881 MCRegister Rn = Inst.getOperand(i: 3).getReg();
5882 if (RI->isSubRegisterEq(RegA: Rt1, RegB: Rs) || RI->isSubRegisterEq(RegA: Rt2, RegB: Rs) ||
5883 (RI->isSubRegisterEq(RegA: Rn, RegB: Rs) && Rn != AArch64::SP))
5884 return Error(L: Loc[0],
5885 Msg: "unpredictable STXP instruction, status is also a source");
5886 break;
5887 }
5888 case AArch64::LDRABwriteback:
5889 case AArch64::LDRAAwriteback: {
5890 MCRegister Xt = Inst.getOperand(i: 0).getReg();
5891 MCRegister Xn = Inst.getOperand(i: 1).getReg();
5892 if (Xt == Xn)
5893 return Error(L: Loc[0],
5894 Msg: "unpredictable LDRA instruction, writeback base"
5895 " is also a destination");
5896 break;
5897 }
5898 }
5899
5900 // Check v8.8-A memops instructions.
5901 switch (Inst.getOpcode()) {
5902 case AArch64::CPYFP:
5903 case AArch64::CPYFPWN:
5904 case AArch64::CPYFPRN:
5905 case AArch64::CPYFPN:
5906 case AArch64::CPYFPWT:
5907 case AArch64::CPYFPWTWN:
5908 case AArch64::CPYFPWTRN:
5909 case AArch64::CPYFPWTN:
5910 case AArch64::CPYFPRT:
5911 case AArch64::CPYFPRTWN:
5912 case AArch64::CPYFPRTRN:
5913 case AArch64::CPYFPRTN:
5914 case AArch64::CPYFPT:
5915 case AArch64::CPYFPTWN:
5916 case AArch64::CPYFPTRN:
5917 case AArch64::CPYFPTN:
5918 case AArch64::CPYFM:
5919 case AArch64::CPYFMWN:
5920 case AArch64::CPYFMRN:
5921 case AArch64::CPYFMN:
5922 case AArch64::CPYFMWT:
5923 case AArch64::CPYFMWTWN:
5924 case AArch64::CPYFMWTRN:
5925 case AArch64::CPYFMWTN:
5926 case AArch64::CPYFMRT:
5927 case AArch64::CPYFMRTWN:
5928 case AArch64::CPYFMRTRN:
5929 case AArch64::CPYFMRTN:
5930 case AArch64::CPYFMT:
5931 case AArch64::CPYFMTWN:
5932 case AArch64::CPYFMTRN:
5933 case AArch64::CPYFMTN:
5934 case AArch64::CPYFE:
5935 case AArch64::CPYFEWN:
5936 case AArch64::CPYFERN:
5937 case AArch64::CPYFEN:
5938 case AArch64::CPYFEWT:
5939 case AArch64::CPYFEWTWN:
5940 case AArch64::CPYFEWTRN:
5941 case AArch64::CPYFEWTN:
5942 case AArch64::CPYFERT:
5943 case AArch64::CPYFERTWN:
5944 case AArch64::CPYFERTRN:
5945 case AArch64::CPYFERTN:
5946 case AArch64::CPYFET:
5947 case AArch64::CPYFETWN:
5948 case AArch64::CPYFETRN:
5949 case AArch64::CPYFETN:
5950 case AArch64::CPYP:
5951 case AArch64::CPYPWN:
5952 case AArch64::CPYPRN:
5953 case AArch64::CPYPN:
5954 case AArch64::CPYPWT:
5955 case AArch64::CPYPWTWN:
5956 case AArch64::CPYPWTRN:
5957 case AArch64::CPYPWTN:
5958 case AArch64::CPYPRT:
5959 case AArch64::CPYPRTWN:
5960 case AArch64::CPYPRTRN:
5961 case AArch64::CPYPRTN:
5962 case AArch64::CPYPT:
5963 case AArch64::CPYPTWN:
5964 case AArch64::CPYPTRN:
5965 case AArch64::CPYPTN:
5966 case AArch64::CPYM:
5967 case AArch64::CPYMWN:
5968 case AArch64::CPYMRN:
5969 case AArch64::CPYMN:
5970 case AArch64::CPYMWT:
5971 case AArch64::CPYMWTWN:
5972 case AArch64::CPYMWTRN:
5973 case AArch64::CPYMWTN:
5974 case AArch64::CPYMRT:
5975 case AArch64::CPYMRTWN:
5976 case AArch64::CPYMRTRN:
5977 case AArch64::CPYMRTN:
5978 case AArch64::CPYMT:
5979 case AArch64::CPYMTWN:
5980 case AArch64::CPYMTRN:
5981 case AArch64::CPYMTN:
5982 case AArch64::CPYE:
5983 case AArch64::CPYEWN:
5984 case AArch64::CPYERN:
5985 case AArch64::CPYEN:
5986 case AArch64::CPYEWT:
5987 case AArch64::CPYEWTWN:
5988 case AArch64::CPYEWTRN:
5989 case AArch64::CPYEWTN:
5990 case AArch64::CPYERT:
5991 case AArch64::CPYERTWN:
5992 case AArch64::CPYERTRN:
5993 case AArch64::CPYERTN:
5994 case AArch64::CPYET:
5995 case AArch64::CPYETWN:
5996 case AArch64::CPYETRN:
5997 case AArch64::CPYETN: {
5998 // Xd_wb == op0, Xs_wb == op1, Xn_wb == op2
5999 MCRegister Xd = Inst.getOperand(i: 3).getReg();
6000 MCRegister Xs = Inst.getOperand(i: 4).getReg();
6001 MCRegister Xn = Inst.getOperand(i: 5).getReg();
6002
6003 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6004 assert(Xs == Inst.getOperand(1).getReg() && "Xs_wb and Xs do not match");
6005 assert(Xn == Inst.getOperand(2).getReg() && "Xn_wb and Xn do not match");
6006
6007 if (Xd == Xs)
6008 return Error(L: Loc[0], Msg: "invalid CPY instruction, destination and source"
6009 " registers are the same");
6010 if (Xd == Xn)
6011 return Error(L: Loc[0], Msg: "invalid CPY instruction, destination and size"
6012 " registers are the same");
6013 if (Xs == Xn)
6014 return Error(L: Loc[0], Msg: "invalid CPY instruction, source and size"
6015 " registers are the same");
6016 break;
6017 }
6018 case AArch64::SETP:
6019 case AArch64::SETPT:
6020 case AArch64::SETPN:
6021 case AArch64::SETPTN:
6022 case AArch64::SETM:
6023 case AArch64::SETMT:
6024 case AArch64::SETMN:
6025 case AArch64::SETMTN:
6026 case AArch64::SETE:
6027 case AArch64::SETET:
6028 case AArch64::SETEN:
6029 case AArch64::SETETN:
6030 case AArch64::SETGP:
6031 case AArch64::SETGPT:
6032 case AArch64::SETGPN:
6033 case AArch64::SETGPTN:
6034 case AArch64::SETGM:
6035 case AArch64::SETGMT:
6036 case AArch64::SETGMN:
6037 case AArch64::SETGMTN:
6038 case AArch64::MOPSSETGE:
6039 case AArch64::MOPSSETGET:
6040 case AArch64::MOPSSETGEN:
6041 case AArch64::MOPSSETGETN: {
6042 // Xd_wb == op0, Xn_wb == op1
6043 MCRegister Xd = Inst.getOperand(i: 2).getReg();
6044 MCRegister Xn = Inst.getOperand(i: 3).getReg();
6045 MCRegister Xm = Inst.getOperand(i: 4).getReg();
6046
6047 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6048 assert(Xn == Inst.getOperand(1).getReg() && "Xn_wb and Xn do not match");
6049
6050 if (Xd == Xn)
6051 return Error(L: Loc[0], Msg: "invalid SET instruction, destination and size"
6052 " registers are the same");
6053 if (Xd == Xm)
6054 return Error(L: Loc[0], Msg: "invalid SET instruction, destination and source"
6055 " registers are the same");
6056 if (Xn == Xm)
6057 return Error(L: Loc[0], Msg: "invalid SET instruction, source and size"
6058 " registers are the same");
6059 break;
6060 }
6061 case AArch64::SETGOP:
6062 case AArch64::SETGOPT:
6063 case AArch64::SETGOPN:
6064 case AArch64::SETGOPTN:
6065 case AArch64::SETGOM:
6066 case AArch64::SETGOMT:
6067 case AArch64::SETGOMN:
6068 case AArch64::SETGOMTN:
6069 case AArch64::SETGOE:
6070 case AArch64::SETGOET:
6071 case AArch64::SETGOEN:
6072 case AArch64::SETGOETN: {
6073 // Xd_wb == op0, Xn_wb == op1
6074 MCRegister Xd = Inst.getOperand(i: 2).getReg();
6075 MCRegister Xn = Inst.getOperand(i: 3).getReg();
6076
6077 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6078 assert(Xn == Inst.getOperand(1).getReg() && "Xn_wb and Xn do not match");
6079
6080 if (Xd == Xn)
6081 return Error(L: Loc[0], Msg: "invalid SET instruction, destination and size"
6082 " registers are the same");
6083 break;
6084 }
6085 }
6086
6087 // Now check immediate ranges. Separate from the above as there is overlap
6088 // in the instructions being checked and this keeps the nested conditionals
6089 // to a minimum.
6090 switch (Inst.getOpcode()) {
6091 case AArch64::ADDSWri:
6092 case AArch64::ADDSXri:
6093 case AArch64::ADDWri:
6094 case AArch64::ADDXri:
6095 case AArch64::SUBSWri:
6096 case AArch64::SUBSXri:
6097 case AArch64::SUBWri:
6098 case AArch64::SUBXri: {
6099 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
6100 // some slight duplication here.
6101 if (Inst.getOperand(i: 2).isExpr()) {
6102 const MCExpr *Expr = Inst.getOperand(i: 2).getExpr();
6103 AArch64::Specifier ELFSpec;
6104 AArch64::Specifier DarwinSpec;
6105 int64_t Addend;
6106 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
6107
6108 // Only allow these with ADDXri.
6109 if ((DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
6110 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF) &&
6111 Inst.getOpcode() == AArch64::ADDXri)
6112 return false;
6113
6114 // Only allow these with ADDXri/ADDWri
6115 if (llvm::is_contained(
6116 Set: {AArch64::S_LO12, AArch64::S_GOT_AUTH_LO12,
6117 AArch64::S_DTPREL_HI12, AArch64::S_DTPREL_LO12,
6118 AArch64::S_DTPREL_LO12_NC, AArch64::S_TPREL_HI12,
6119 AArch64::S_TPREL_LO12, AArch64::S_TPREL_LO12_NC,
6120 AArch64::S_TLSDESC_LO12, AArch64::S_TLSDESC_AUTH_LO12,
6121 AArch64::S_SECREL_LO12, AArch64::S_SECREL_HI12},
6122 Element: ELFSpec) &&
6123 (Inst.getOpcode() == AArch64::ADDXri ||
6124 Inst.getOpcode() == AArch64::ADDWri))
6125 return false;
6126
6127 // Don't allow symbol refs in the immediate field otherwise
6128 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
6129 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
6130 // 'cmp w0, 'borked')
6131 return Error(L: Loc.back(), Msg: "invalid immediate expression");
6132 }
6133 // We don't validate more complex expressions here
6134 }
6135 return false;
6136 }
6137 default:
6138 return false;
6139 }
6140}
6141
6142static std::string AArch64MnemonicSpellCheck(StringRef S,
6143 const FeatureBitset &FBS,
6144 unsigned VariantID = 0);
6145
6146bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
6147 uint64_t ErrorInfo,
6148 OperandVector &Operands) {
6149 switch (ErrCode) {
6150 case Match_InvalidTiedOperand: {
6151 auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]);
6152 if (Op.isVectorList())
6153 return Error(L: Loc, Msg: "operand must match destination register list");
6154
6155 assert(Op.isReg() && "Unexpected operand type");
6156 switch (Op.getRegEqualityTy()) {
6157 case RegConstraintEqualityTy::EqualsSubReg:
6158 return Error(L: Loc, Msg: "operand must be 64-bit form of destination register");
6159 case RegConstraintEqualityTy::EqualsSuperReg:
6160 return Error(L: Loc, Msg: "operand must be 32-bit form of destination register");
6161 case RegConstraintEqualityTy::EqualsReg:
6162 return Error(L: Loc, Msg: "operand must match destination register");
6163 }
6164 llvm_unreachable("Unknown RegConstraintEqualityTy");
6165 }
6166 case Match_MissingFeature:
6167 return Error(L: Loc,
6168 Msg: "instruction requires a CPU feature not currently enabled");
6169 case Match_InvalidOperand:
6170 return Error(L: Loc, Msg: "invalid operand for instruction");
6171 case Match_InvalidSuffix:
6172 return Error(L: Loc, Msg: "invalid type suffix for instruction");
6173 case Match_InvalidCondCode:
6174 return Error(L: Loc, Msg: "expected AArch64 condition code");
6175 case Match_AddSubRegExtendSmall:
6176 return Error(L: Loc,
6177 Msg: "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
6178 case Match_AddSubRegExtendLarge:
6179 return Error(L: Loc,
6180 Msg: "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
6181 case Match_AddSubSecondSource:
6182 return Error(L: Loc,
6183 Msg: "expected compatible register, symbol or integer in range [0, 4095]");
6184 case Match_LogicalSecondSource:
6185 return Error(L: Loc, Msg: "expected compatible register or logical immediate");
6186 case Match_InvalidMovImm32Shift:
6187 return Error(L: Loc, Msg: "expected 'lsl' with optional integer 0 or 16");
6188 case Match_InvalidMovImm64Shift:
6189 return Error(L: Loc, Msg: "expected 'lsl' with optional integer 0, 16, 32 or 48");
6190 case Match_AddSubRegShift32:
6191 return Error(L: Loc,
6192 Msg: "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
6193 case Match_AddSubRegShift64:
6194 return Error(L: Loc,
6195 Msg: "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
6196 case Match_InvalidFPImm:
6197 return Error(L: Loc,
6198 Msg: "expected compatible register or floating-point constant");
6199 case Match_InvalidMemoryIndexedSImm6:
6200 return Error(L: Loc, Msg: "index must be an integer in range [-32, 31].");
6201 case Match_InvalidMemoryIndexedSImm5:
6202 return Error(L: Loc, Msg: "index must be an integer in range [-16, 15].");
6203 case Match_InvalidMemoryIndexed1SImm4:
6204 return Error(L: Loc, Msg: "index must be an integer in range [-8, 7].");
6205 case Match_InvalidMemoryIndexed2SImm4:
6206 return Error(L: Loc, Msg: "index must be a multiple of 2 in range [-16, 14].");
6207 case Match_InvalidMemoryIndexed3SImm4:
6208 return Error(L: Loc, Msg: "index must be a multiple of 3 in range [-24, 21].");
6209 case Match_InvalidMemoryIndexed4SImm4:
6210 return Error(L: Loc, Msg: "index must be a multiple of 4 in range [-32, 28].");
6211 case Match_InvalidMemoryIndexed16SImm4:
6212 return Error(L: Loc, Msg: "index must be a multiple of 16 in range [-128, 112].");
6213 case Match_InvalidMemoryIndexed32SImm4:
6214 return Error(L: Loc, Msg: "index must be a multiple of 32 in range [-256, 224].");
6215 case Match_InvalidMemoryIndexed1SImm6:
6216 return Error(L: Loc, Msg: "index must be an integer in range [-32, 31].");
6217 case Match_InvalidMemoryIndexedSImm8:
6218 return Error(L: Loc, Msg: "index must be an integer in range [-128, 127].");
6219 case Match_InvalidMemoryIndexedSImm9:
6220 return Error(L: Loc, Msg: "index must be an integer in range [-256, 255].");
6221 case Match_InvalidMemoryIndexed16SImm9:
6222 return Error(L: Loc, Msg: "index must be a multiple of 16 in range [-4096, 4080].");
6223 case Match_InvalidMemoryIndexed8SImm10:
6224 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [-4096, 4088].");
6225 case Match_InvalidMemoryIndexed4SImm7:
6226 return Error(L: Loc, Msg: "index must be a multiple of 4 in range [-256, 252].");
6227 case Match_InvalidMemoryIndexed8SImm7:
6228 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [-512, 504].");
6229 case Match_InvalidMemoryIndexed16SImm7:
6230 return Error(L: Loc, Msg: "index must be a multiple of 16 in range [-1024, 1008].");
6231 case Match_InvalidMemoryIndexed8UImm5:
6232 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [0, 248].");
6233 case Match_InvalidMemoryIndexed8UImm3:
6234 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [0, 56].");
6235 case Match_InvalidMemoryIndexed4UImm5:
6236 return Error(L: Loc, Msg: "index must be a multiple of 4 in range [0, 124].");
6237 case Match_InvalidMemoryIndexed2UImm5:
6238 return Error(L: Loc, Msg: "index must be a multiple of 2 in range [0, 62].");
6239 case Match_InvalidMemoryIndexed8UImm6:
6240 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [0, 504].");
6241 case Match_InvalidMemoryIndexed16UImm6:
6242 return Error(L: Loc, Msg: "index must be a multiple of 16 in range [0, 1008].");
6243 case Match_InvalidMemoryIndexed4UImm6:
6244 return Error(L: Loc, Msg: "index must be a multiple of 4 in range [0, 252].");
6245 case Match_InvalidMemoryIndexed2UImm6:
6246 return Error(L: Loc, Msg: "index must be a multiple of 2 in range [0, 126].");
6247 case Match_InvalidMemoryIndexed1UImm6:
6248 return Error(L: Loc, Msg: "index must be in range [0, 63].");
6249 case Match_InvalidMemoryWExtend8:
6250 return Error(L: Loc,
6251 Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0");
6252 case Match_InvalidMemoryWExtend16:
6253 return Error(L: Loc,
6254 Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
6255 case Match_InvalidMemoryWExtend32:
6256 return Error(L: Loc,
6257 Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
6258 case Match_InvalidMemoryWExtend64:
6259 return Error(L: Loc,
6260 Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
6261 case Match_InvalidMemoryWExtend128:
6262 return Error(L: Loc,
6263 Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
6264 case Match_InvalidMemoryXExtend8:
6265 return Error(L: Loc,
6266 Msg: "expected 'lsl' or 'sxtx' with optional shift of #0");
6267 case Match_InvalidMemoryXExtend16:
6268 return Error(L: Loc,
6269 Msg: "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
6270 case Match_InvalidMemoryXExtend32:
6271 return Error(L: Loc,
6272 Msg: "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
6273 case Match_InvalidMemoryXExtend64:
6274 return Error(L: Loc,
6275 Msg: "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
6276 case Match_InvalidMemoryXExtend128:
6277 return Error(L: Loc,
6278 Msg: "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
6279 case Match_InvalidMemoryIndexed1:
6280 return Error(L: Loc, Msg: "index must be an integer in range [0, 4095].");
6281 case Match_InvalidMemoryIndexed2:
6282 return Error(L: Loc, Msg: "index must be a multiple of 2 in range [0, 8190].");
6283 case Match_InvalidMemoryIndexed4:
6284 return Error(L: Loc, Msg: "index must be a multiple of 4 in range [0, 16380].");
6285 case Match_InvalidMemoryIndexed8:
6286 return Error(L: Loc, Msg: "index must be a multiple of 8 in range [0, 32760].");
6287 case Match_InvalidMemoryIndexed16:
6288 return Error(L: Loc, Msg: "index must be a multiple of 16 in range [0, 65520].");
6289 case Match_InvalidImm0_0:
6290 return Error(L: Loc, Msg: "immediate must be 0.");
6291 case Match_InvalidImm0_1:
6292 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 1].");
6293 case Match_InvalidImm0_3:
6294 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 3].");
6295 case Match_InvalidImm0_7:
6296 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 7].");
6297 case Match_InvalidImm0_15:
6298 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 15].");
6299 case Match_InvalidImm0_31:
6300 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 31].");
6301 case Match_InvalidImm0_63:
6302 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 63].");
6303 case Match_InvalidImm0_127:
6304 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 127].");
6305 case Match_InvalidImm0_255:
6306 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 255].");
6307 case Match_InvalidImm0_65535:
6308 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 65535].");
6309 case Match_InvalidImm1_8:
6310 return Error(L: Loc, Msg: "immediate must be an integer in range [1, 8].");
6311 case Match_InvalidImm1_16:
6312 return Error(L: Loc, Msg: "immediate must be an integer in range [1, 16].");
6313 case Match_InvalidImm1_32:
6314 return Error(L: Loc, Msg: "immediate must be an integer in range [1, 32].");
6315 case Match_InvalidImm1_64:
6316 return Error(L: Loc, Msg: "immediate must be an integer in range [1, 64].");
6317 case Match_InvalidImmM1_62:
6318 return Error(L: Loc, Msg: "immediate must be an integer in range [-1, 62].");
6319 case Match_InvalidMemoryIndexedRange2UImm0:
6320 return Error(L: Loc, Msg: "vector select offset must be the immediate range 0:1.");
6321 case Match_InvalidMemoryIndexedRange2UImm1:
6322 return Error(L: Loc, Msg: "vector select offset must be an immediate range of the "
6323 "form <immf>:<imml>, where the first "
6324 "immediate is a multiple of 2 in the range [0, 2], and "
6325 "the second immediate is immf + 1.");
6326 case Match_InvalidMemoryIndexedRange2UImm2:
6327 case Match_InvalidMemoryIndexedRange2UImm3:
6328 return Error(
6329 L: Loc,
6330 Msg: "vector select offset must be an immediate range of the form "
6331 "<immf>:<imml>, "
6332 "where the first immediate is a multiple of 2 in the range [0, 6] or "
6333 "[0, 14] "
6334 "depending on the instruction, and the second immediate is immf + 1.");
6335 case Match_InvalidMemoryIndexedRange4UImm0:
6336 return Error(L: Loc, Msg: "vector select offset must be the immediate range 0:3.");
6337 case Match_InvalidMemoryIndexedRange4UImm1:
6338 case Match_InvalidMemoryIndexedRange4UImm2:
6339 return Error(
6340 L: Loc,
6341 Msg: "vector select offset must be an immediate range of the form "
6342 "<immf>:<imml>, "
6343 "where the first immediate is a multiple of 4 in the range [0, 4] or "
6344 "[0, 12] "
6345 "depending on the instruction, and the second immediate is immf + 3.");
6346 case Match_InvalidSVEAddSubImm8:
6347 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 255]"
6348 " with a shift amount of 0");
6349 case Match_InvalidSVEAddSubImm16:
6350 case Match_InvalidSVEAddSubImm32:
6351 case Match_InvalidSVEAddSubImm64:
6352 return Error(L: Loc, Msg: "immediate must be an integer in range [0, 255] or a "
6353 "multiple of 256 in range [256, 65280]");
6354 case Match_InvalidSVECpyImm8:
6355 return Error(L: Loc, Msg: "immediate must be an integer in range [-128, 255]"
6356 " with a shift amount of 0");
6357 case Match_InvalidSVECpyImm16:
6358 return Error(L: Loc, Msg: "immediate must be an integer in range [-128, 127] or a "
6359 "multiple of 256 in range [-32768, 65280]");
6360 case Match_InvalidSVECpyImm32:
6361 case Match_InvalidSVECpyImm64:
6362 return Error(L: Loc, Msg: "immediate must be an integer in range [-128, 127] or a "
6363 "multiple of 256 in range [-32768, 32512]");
6364 case Match_InvalidIndexRange0_0:
6365 return Error(L: Loc, Msg: "expected lane specifier '[0]'");
6366 case Match_InvalidIndexRange1_1:
6367 return Error(L: Loc, Msg: "expected lane specifier '[1]'");
6368 case Match_InvalidIndexRange0_15:
6369 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 15].");
6370 case Match_InvalidIndexRange0_7:
6371 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 7].");
6372 case Match_InvalidIndexRange0_3:
6373 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 3].");
6374 case Match_InvalidIndexRange0_1:
6375 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 1].");
6376 case Match_InvalidSVEIndexRange0_63:
6377 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 63].");
6378 case Match_InvalidSVEIndexRange0_31:
6379 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 31].");
6380 case Match_InvalidSVEIndexRange0_15:
6381 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 15].");
6382 case Match_InvalidSVEIndexRange0_7:
6383 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 7].");
6384 case Match_InvalidSVEIndexRange0_3:
6385 return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 3].");
6386 case Match_InvalidLabel:
6387 return Error(L: Loc, Msg: "expected label or encodable integer pc offset");
6388 case Match_MRS:
6389 return Error(L: Loc, Msg: "expected readable system register");
6390 case Match_MSR:
6391 case Match_InvalidSVCR:
6392 return Error(L: Loc, Msg: "expected writable system register or pstate");
6393 case Match_InvalidComplexRotationEven:
6394 return Error(L: Loc, Msg: "complex rotation must be 0, 90, 180 or 270.");
6395 case Match_InvalidComplexRotationOdd:
6396 return Error(L: Loc, Msg: "complex rotation must be 90 or 270.");
6397 case Match_MnemonicFail: {
6398 std::string Suggestion = AArch64MnemonicSpellCheck(
6399 S: ((AArch64Operand &)*Operands[0]).getToken(),
6400 FBS: ComputeAvailableFeatures(FB: STI->getFeatureBits()));
6401 return Error(L: Loc, Msg: "unrecognized instruction mnemonic" + Suggestion);
6402 }
6403 case Match_InvalidGPR64shifted8:
6404 return Error(L: Loc, Msg: "register must be x0..x30 or xzr, without shift");
6405 case Match_InvalidGPR64shifted16:
6406 return Error(L: Loc, Msg: "register must be x0..x30 or xzr, with required shift 'lsl #1'");
6407 case Match_InvalidGPR64shifted32:
6408 return Error(L: Loc, Msg: "register must be x0..x30 or xzr, with required shift 'lsl #2'");
6409 case Match_InvalidGPR64shifted64:
6410 return Error(L: Loc, Msg: "register must be x0..x30 or xzr, with required shift 'lsl #3'");
6411 case Match_InvalidGPR64shifted128:
6412 return Error(
6413 L: Loc, Msg: "register must be x0..x30 or xzr, with required shift 'lsl #4'");
6414 case Match_InvalidGPR64NoXZRshifted8:
6415 return Error(L: Loc, Msg: "register must be x0..x30 without shift");
6416 case Match_InvalidGPR64NoXZRshifted16:
6417 return Error(L: Loc, Msg: "register must be x0..x30 with required shift 'lsl #1'");
6418 case Match_InvalidGPR64NoXZRshifted32:
6419 return Error(L: Loc, Msg: "register must be x0..x30 with required shift 'lsl #2'");
6420 case Match_InvalidGPR64NoXZRshifted64:
6421 return Error(L: Loc, Msg: "register must be x0..x30 with required shift 'lsl #3'");
6422 case Match_InvalidGPR64NoXZRshifted128:
6423 return Error(L: Loc, Msg: "register must be x0..x30 with required shift 'lsl #4'");
6424 case Match_InvalidZPR32UXTW8:
6425 case Match_InvalidZPR32SXTW8:
6426 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
6427 case Match_InvalidZPR32UXTW16:
6428 case Match_InvalidZPR32SXTW16:
6429 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
6430 case Match_InvalidZPR32UXTW32:
6431 case Match_InvalidZPR32SXTW32:
6432 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
6433 case Match_InvalidZPR32UXTW64:
6434 case Match_InvalidZPR32SXTW64:
6435 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
6436 case Match_InvalidZPR64UXTW8:
6437 case Match_InvalidZPR64SXTW8:
6438 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
6439 case Match_InvalidZPR64UXTW16:
6440 case Match_InvalidZPR64SXTW16:
6441 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
6442 case Match_InvalidZPR64UXTW32:
6443 case Match_InvalidZPR64SXTW32:
6444 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
6445 case Match_InvalidZPR64UXTW64:
6446 case Match_InvalidZPR64SXTW64:
6447 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
6448 case Match_InvalidZPR32LSL8:
6449 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s'");
6450 case Match_InvalidZPR32LSL16:
6451 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
6452 case Match_InvalidZPR32LSL32:
6453 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
6454 case Match_InvalidZPR32LSL64:
6455 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
6456 case Match_InvalidZPR64LSL8:
6457 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d'");
6458 case Match_InvalidZPR64LSL16:
6459 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
6460 case Match_InvalidZPR64LSL32:
6461 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
6462 case Match_InvalidZPR64LSL64:
6463 return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
6464 case Match_InvalidZPR0:
6465 return Error(L: Loc, Msg: "expected register without element width suffix");
6466 case Match_InvalidZPR8:
6467 case Match_InvalidZPR16:
6468 case Match_InvalidZPR32:
6469 case Match_InvalidZPR64:
6470 case Match_InvalidZPR128:
6471 return Error(L: Loc, Msg: "invalid element width");
6472 case Match_InvalidZPR_3b8:
6473 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.b..z7.b");
6474 case Match_InvalidZPR_3b16:
6475 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.h..z7.h");
6476 case Match_InvalidZPR_3b32:
6477 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.s..z7.s");
6478 case Match_InvalidZPR_4b8:
6479 return Error(L: Loc,
6480 Msg: "Invalid restricted vector register, expected z0.b..z15.b");
6481 case Match_InvalidZPR_4b16:
6482 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.h..z15.h");
6483 case Match_InvalidZPR_4b32:
6484 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.s..z15.s");
6485 case Match_InvalidZPR_4b64:
6486 return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.d..z15.d");
6487 case Match_InvalidZPRMul2_Lo8:
6488 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6489 "register in z0.b..z14.b");
6490 case Match_InvalidZPRMul2_Hi8:
6491 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6492 "register in z16.b..z30.b");
6493 case Match_InvalidZPRMul2_Lo16:
6494 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6495 "register in z0.h..z14.h");
6496 case Match_InvalidZPRMul2_Hi16:
6497 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6498 "register in z16.h..z30.h");
6499 case Match_InvalidZPRMul2_Lo32:
6500 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6501 "register in z0.s..z14.s");
6502 case Match_InvalidZPRMul2_Hi32:
6503 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6504 "register in z16.s..z30.s");
6505 case Match_InvalidZPRMul2_Lo64:
6506 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6507 "register in z0.d..z14.d");
6508 case Match_InvalidZPRMul2_Hi64:
6509 return Error(L: Loc, Msg: "Invalid restricted vector register, expected even "
6510 "register in z16.d..z30.d");
6511 case Match_InvalidZPR_K0:
6512 return Error(L: Loc, Msg: "invalid restricted vector register, expected register "
6513 "in z20..z23 or z28..z31");
6514 case Match_InvalidSVEPattern:
6515 return Error(L: Loc, Msg: "invalid predicate pattern");
6516 case Match_InvalidSVEPPRorPNRAnyReg:
6517 case Match_InvalidSVEPPRorPNRBReg:
6518 case Match_InvalidSVEPredicateAnyReg:
6519 case Match_InvalidSVEPredicateBReg:
6520 case Match_InvalidSVEPredicateHReg:
6521 case Match_InvalidSVEPredicateSReg:
6522 case Match_InvalidSVEPredicateDReg:
6523 return Error(L: Loc, Msg: "invalid predicate register.");
6524 case Match_InvalidSVEPredicate3bAnyReg:
6525 return Error(L: Loc, Msg: "invalid restricted predicate register, expected p0..p7 (without element suffix)");
6526 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6527 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6528 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6529 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6530 return Error(L: Loc, Msg: "Invalid predicate register, expected PN in range "
6531 "pn8..pn15 with element suffix.");
6532 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6533 return Error(L: Loc, Msg: "invalid restricted predicate-as-counter register "
6534 "expected pn8..pn15");
6535 case Match_InvalidSVEPNPredicateBReg:
6536 case Match_InvalidSVEPNPredicateHReg:
6537 case Match_InvalidSVEPNPredicateSReg:
6538 case Match_InvalidSVEPNPredicateDReg:
6539 return Error(L: Loc, Msg: "Invalid predicate register, expected PN in range "
6540 "pn0..pn15 with element suffix.");
6541 case Match_InvalidSVEVecLenSpecifier:
6542 return Error(L: Loc, Msg: "Invalid vector length specifier, expected VLx2 or VLx4");
6543 case Match_InvalidSVEPredicateListMul2x8:
6544 case Match_InvalidSVEPredicateListMul2x16:
6545 case Match_InvalidSVEPredicateListMul2x32:
6546 case Match_InvalidSVEPredicateListMul2x64:
6547 return Error(L: Loc, Msg: "Invalid vector list, expected list with 2 consecutive "
6548 "predicate registers, where the first vector is a multiple of 2 "
6549 "and with correct element type");
6550 case Match_InvalidSVEExactFPImmOperandHalfOne:
6551 return Error(L: Loc, Msg: "Invalid floating point constant, expected 0.5 or 1.0.");
6552 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6553 return Error(L: Loc, Msg: "Invalid floating point constant, expected 0.5 or 2.0.");
6554 case Match_InvalidSVEExactFPImmOperandZeroOne:
6555 return Error(L: Loc, Msg: "Invalid floating point constant, expected 0.0 or 1.0.");
6556 case Match_InvalidMatrixTileVectorH8:
6557 case Match_InvalidMatrixTileVectorV8:
6558 return Error(L: Loc, Msg: "invalid matrix operand, expected za0h.b or za0v.b");
6559 case Match_InvalidMatrixTileVectorH16:
6560 case Match_InvalidMatrixTileVectorV16:
6561 return Error(L: Loc,
6562 Msg: "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
6563 case Match_InvalidMatrixTileVectorH32:
6564 case Match_InvalidMatrixTileVectorV32:
6565 return Error(L: Loc,
6566 Msg: "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
6567 case Match_InvalidMatrixTileVectorH64:
6568 case Match_InvalidMatrixTileVectorV64:
6569 return Error(L: Loc,
6570 Msg: "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
6571 case Match_InvalidMatrixTileVectorH128:
6572 case Match_InvalidMatrixTileVectorV128:
6573 return Error(L: Loc,
6574 Msg: "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
6575 case Match_InvalidMatrixTile16:
6576 return Error(L: Loc, Msg: "invalid matrix operand, expected za[0-1].h");
6577 case Match_InvalidMatrixTile32:
6578 return Error(L: Loc, Msg: "invalid matrix operand, expected za[0-3].s");
6579 case Match_InvalidMatrixTile64:
6580 return Error(L: Loc, Msg: "invalid matrix operand, expected za[0-7].d");
6581 case Match_InvalidMatrix:
6582 return Error(L: Loc, Msg: "invalid matrix operand, expected za");
6583 case Match_InvalidMatrix8:
6584 return Error(L: Loc, Msg: "invalid matrix operand, expected suffix .b");
6585 case Match_InvalidMatrix16:
6586 return Error(L: Loc, Msg: "invalid matrix operand, expected suffix .h");
6587 case Match_InvalidMatrix32:
6588 return Error(L: Loc, Msg: "invalid matrix operand, expected suffix .s");
6589 case Match_InvalidMatrix64:
6590 return Error(L: Loc, Msg: "invalid matrix operand, expected suffix .d");
6591 case Match_InvalidMatrixIndexGPR32_12_15:
6592 return Error(L: Loc, Msg: "operand must be a register in range [w12, w15]");
6593 case Match_InvalidMatrixIndexGPR32_8_11:
6594 return Error(L: Loc, Msg: "operand must be a register in range [w8, w11]");
6595 case Match_InvalidSVEVectorList2x8Mul2:
6596 case Match_InvalidSVEVectorList2x16Mul2:
6597 case Match_InvalidSVEVectorList2x32Mul2:
6598 case Match_InvalidSVEVectorList2x64Mul2:
6599 case Match_InvalidSVEVectorList2x128Mul2:
6600 return Error(L: Loc, Msg: "Invalid vector list, expected list with 2 consecutive "
6601 "SVE vectors, where the first vector is a multiple of 2 "
6602 "and with matching element types");
6603 case Match_InvalidSVEVectorList2x8Mul2_Lo:
6604 case Match_InvalidSVEVectorList2x16Mul2_Lo:
6605 case Match_InvalidSVEVectorList2x32Mul2_Lo:
6606 case Match_InvalidSVEVectorList2x64Mul2_Lo:
6607 return Error(L: Loc, Msg: "Invalid vector list, expected list with 2 consecutive "
6608 "SVE vectors in the range z0-z14, where the first vector "
6609 "is a multiple of 2 "
6610 "and with matching element types");
6611 case Match_InvalidSVEVectorList2x8Mul2_Hi:
6612 case Match_InvalidSVEVectorList2x16Mul2_Hi:
6613 case Match_InvalidSVEVectorList2x32Mul2_Hi:
6614 case Match_InvalidSVEVectorList2x64Mul2_Hi:
6615 return Error(L: Loc,
6616 Msg: "Invalid vector list, expected list with 2 consecutive "
6617 "SVE vectors in the range z16-z30, where the first vector "
6618 "is a multiple of 2 "
6619 "and with matching element types");
6620 case Match_InvalidSVEVectorList4x8Mul4:
6621 case Match_InvalidSVEVectorList4x16Mul4:
6622 case Match_InvalidSVEVectorList4x32Mul4:
6623 case Match_InvalidSVEVectorList4x64Mul4:
6624 case Match_InvalidSVEVectorList4x128Mul4:
6625 return Error(L: Loc, Msg: "Invalid vector list, expected list with 4 consecutive "
6626 "SVE vectors, where the first vector is a multiple of 4 "
6627 "and with matching element types");
6628 case Match_InvalidLookupTable:
6629 return Error(L: Loc, Msg: "Invalid lookup table, expected zt0");
6630 case Match_InvalidSVEVectorListStrided2x8:
6631 case Match_InvalidSVEVectorListStrided2x16:
6632 case Match_InvalidSVEVectorListStrided2x32:
6633 case Match_InvalidSVEVectorListStrided2x64:
6634 return Error(
6635 L: Loc,
6636 Msg: "Invalid vector list, expected list with each SVE vector in the list "
6637 "8 registers apart, and the first register in the range [z0, z7] or "
6638 "[z16, z23] and with correct element type");
6639 case Match_InvalidSVEVectorListStrided4x8:
6640 case Match_InvalidSVEVectorListStrided4x16:
6641 case Match_InvalidSVEVectorListStrided4x32:
6642 case Match_InvalidSVEVectorListStrided4x64:
6643 return Error(
6644 L: Loc,
6645 Msg: "Invalid vector list, expected list with each SVE vector in the list "
6646 "4 registers apart, and the first register in the range [z0, z3] or "
6647 "[z16, z19] and with correct element type");
6648 case Match_AddSubLSLImm3ShiftLarge:
6649 return Error(L: Loc,
6650 Msg: "expected 'lsl' with optional integer in range [0, 7]");
6651 default:
6652 llvm_unreachable("unexpected error code!");
6653 }
6654}
6655
6656static const char *getSubtargetFeatureName(uint64_t Val);
6657
6658bool AArch64AsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
6659 OperandVector &Operands,
6660 MCStreamer &Out,
6661 uint64_t &ErrorInfo,
6662 bool MatchingInlineAsm) {
6663 assert(!Operands.empty() && "Unexpected empty operand list!");
6664 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
6665 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
6666
6667 StringRef Tok = Op.getToken();
6668 unsigned NumOperands = Operands.size();
6669
6670 if (NumOperands == 4 && Tok == "lsl") {
6671 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6672 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6673 if (Op2.isScalarReg() && Op3.isImm()) {
6674 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Val: Op3.getImm());
6675 if (Op3CE) {
6676 uint64_t Op3Val = Op3CE->getValue();
6677 uint64_t NewOp3Val = 0;
6678 uint64_t NewOp4Val = 0;
6679 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
6680 Reg: Op2.getReg())) {
6681 NewOp3Val = (32 - Op3Val) & 0x1f;
6682 NewOp4Val = 31 - Op3Val;
6683 } else {
6684 NewOp3Val = (64 - Op3Val) & 0x3f;
6685 NewOp4Val = 63 - Op3Val;
6686 }
6687
6688 const MCExpr *NewOp3 = MCConstantExpr::create(Value: NewOp3Val, Ctx&: getContext());
6689 const MCExpr *NewOp4 = MCConstantExpr::create(Value: NewOp4Val, Ctx&: getContext());
6690
6691 Operands[0] =
6692 AArch64Operand::CreateToken(Str: "ubfm", S: Op.getStartLoc(), Ctx&: getContext());
6693 Operands.push_back(Elt: AArch64Operand::CreateImm(
6694 Val: NewOp4, S: Op3.getStartLoc(), E: Op3.getEndLoc(), Ctx&: getContext()));
6695 Operands[3] = AArch64Operand::CreateImm(Val: NewOp3, S: Op3.getStartLoc(),
6696 E: Op3.getEndLoc(), Ctx&: getContext());
6697 }
6698 }
6699 } else if (NumOperands == 4 && Tok == "bfc") {
6700 // FIXME: Horrible hack to handle BFC->BFM alias.
6701 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6702 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
6703 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
6704
6705 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
6706 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(Val: LSBOp.getImm());
6707 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(Val: WidthOp.getImm());
6708
6709 if (LSBCE && WidthCE) {
6710 uint64_t LSB = LSBCE->getValue();
6711 uint64_t Width = WidthCE->getValue();
6712
6713 uint64_t RegWidth = 0;
6714 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6715 Reg: Op1.getReg()))
6716 RegWidth = 64;
6717 else
6718 RegWidth = 32;
6719
6720 if (LSB >= RegWidth)
6721 return Error(L: LSBOp.getStartLoc(),
6722 Msg: "expected integer in range [0, 31]");
6723 if (Width < 1 || Width > RegWidth)
6724 return Error(L: WidthOp.getStartLoc(),
6725 Msg: "expected integer in range [1, 32]");
6726
6727 uint64_t ImmR = 0;
6728 if (RegWidth == 32)
6729 ImmR = (32 - LSB) & 0x1f;
6730 else
6731 ImmR = (64 - LSB) & 0x3f;
6732
6733 uint64_t ImmS = Width - 1;
6734
6735 if (ImmR != 0 && ImmS >= ImmR)
6736 return Error(L: WidthOp.getStartLoc(),
6737 Msg: "requested insert overflows register");
6738
6739 const MCExpr *ImmRExpr = MCConstantExpr::create(Value: ImmR, Ctx&: getContext());
6740 const MCExpr *ImmSExpr = MCConstantExpr::create(Value: ImmS, Ctx&: getContext());
6741 Operands[0] =
6742 AArch64Operand::CreateToken(Str: "bfm", S: Op.getStartLoc(), Ctx&: getContext());
6743 Operands[2] = AArch64Operand::CreateReg(
6744 Reg: RegWidth == 32 ? AArch64::WZR : AArch64::XZR, Kind: RegKind::Scalar,
6745 S: SMLoc(), E: SMLoc(), Ctx&: getContext());
6746 Operands[3] = AArch64Operand::CreateImm(
6747 Val: ImmRExpr, S: LSBOp.getStartLoc(), E: LSBOp.getEndLoc(), Ctx&: getContext());
6748 Operands.emplace_back(
6749 Args: AArch64Operand::CreateImm(Val: ImmSExpr, S: WidthOp.getStartLoc(),
6750 E: WidthOp.getEndLoc(), Ctx&: getContext()));
6751 }
6752 }
6753 } else if (NumOperands == 5) {
6754 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
6755 // UBFIZ -> UBFM aliases.
6756 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
6757 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6758 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6759 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6760
6761 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6762 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Val: Op3.getImm());
6763 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Val: Op4.getImm());
6764
6765 if (Op3CE && Op4CE) {
6766 uint64_t Op3Val = Op3CE->getValue();
6767 uint64_t Op4Val = Op4CE->getValue();
6768
6769 uint64_t RegWidth = 0;
6770 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6771 Reg: Op1.getReg()))
6772 RegWidth = 64;
6773 else
6774 RegWidth = 32;
6775
6776 if (Op3Val >= RegWidth)
6777 return Error(L: Op3.getStartLoc(),
6778 Msg: "expected integer in range [0, 31]");
6779 if (Op4Val < 1 || Op4Val > RegWidth)
6780 return Error(L: Op4.getStartLoc(),
6781 Msg: "expected integer in range [1, 32]");
6782
6783 uint64_t NewOp3Val = 0;
6784 if (RegWidth == 32)
6785 NewOp3Val = (32 - Op3Val) & 0x1f;
6786 else
6787 NewOp3Val = (64 - Op3Val) & 0x3f;
6788
6789 uint64_t NewOp4Val = Op4Val - 1;
6790
6791 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
6792 return Error(L: Op4.getStartLoc(),
6793 Msg: "requested insert overflows register");
6794
6795 const MCExpr *NewOp3 =
6796 MCConstantExpr::create(Value: NewOp3Val, Ctx&: getContext());
6797 const MCExpr *NewOp4 =
6798 MCConstantExpr::create(Value: NewOp4Val, Ctx&: getContext());
6799 Operands[3] = AArch64Operand::CreateImm(
6800 Val: NewOp3, S: Op3.getStartLoc(), E: Op3.getEndLoc(), Ctx&: getContext());
6801 Operands[4] = AArch64Operand::CreateImm(
6802 Val: NewOp4, S: Op4.getStartLoc(), E: Op4.getEndLoc(), Ctx&: getContext());
6803 if (Tok == "bfi")
6804 Operands[0] = AArch64Operand::CreateToken(Str: "bfm", S: Op.getStartLoc(),
6805 Ctx&: getContext());
6806 else if (Tok == "sbfiz")
6807 Operands[0] = AArch64Operand::CreateToken(Str: "sbfm", S: Op.getStartLoc(),
6808 Ctx&: getContext());
6809 else if (Tok == "ubfiz")
6810 Operands[0] = AArch64Operand::CreateToken(Str: "ubfm", S: Op.getStartLoc(),
6811 Ctx&: getContext());
6812 else
6813 llvm_unreachable("No valid mnemonic for alias?");
6814 }
6815 }
6816
6817 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
6818 // UBFX -> UBFM aliases.
6819 } else if (NumOperands == 5 &&
6820 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
6821 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6822 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6823 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6824
6825 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6826 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Val: Op3.getImm());
6827 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Val: Op4.getImm());
6828
6829 if (Op3CE && Op4CE) {
6830 uint64_t Op3Val = Op3CE->getValue();
6831 uint64_t Op4Val = Op4CE->getValue();
6832
6833 uint64_t RegWidth = 0;
6834 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6835 Reg: Op1.getReg()))
6836 RegWidth = 64;
6837 else
6838 RegWidth = 32;
6839
6840 if (Op3Val >= RegWidth)
6841 return Error(L: Op3.getStartLoc(),
6842 Msg: "expected integer in range [0, 31]");
6843 if (Op4Val < 1 || Op4Val > RegWidth)
6844 return Error(L: Op4.getStartLoc(),
6845 Msg: "expected integer in range [1, 32]");
6846
6847 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
6848
6849 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
6850 return Error(L: Op4.getStartLoc(),
6851 Msg: "requested extract overflows register");
6852
6853 const MCExpr *NewOp4 =
6854 MCConstantExpr::create(Value: NewOp4Val, Ctx&: getContext());
6855 Operands[4] = AArch64Operand::CreateImm(
6856 Val: NewOp4, S: Op4.getStartLoc(), E: Op4.getEndLoc(), Ctx&: getContext());
6857 if (Tok == "bfxil")
6858 Operands[0] = AArch64Operand::CreateToken(Str: "bfm", S: Op.getStartLoc(),
6859 Ctx&: getContext());
6860 else if (Tok == "sbfx")
6861 Operands[0] = AArch64Operand::CreateToken(Str: "sbfm", S: Op.getStartLoc(),
6862 Ctx&: getContext());
6863 else if (Tok == "ubfx")
6864 Operands[0] = AArch64Operand::CreateToken(Str: "ubfm", S: Op.getStartLoc(),
6865 Ctx&: getContext());
6866 else
6867 llvm_unreachable("No valid mnemonic for alias?");
6868 }
6869 }
6870 }
6871 }
6872
6873 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
6874 // instruction for FP registers correctly in some rare circumstances. Convert
6875 // it to a safe instruction and warn (because silently changing someone's
6876 // assembly is rude).
6877 if (getSTI().hasFeature(Feature: AArch64::FeatureZCZeroingFPWorkaround) &&
6878 NumOperands == 4 && Tok == "movi") {
6879 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6880 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6881 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6882 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
6883 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
6884 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
6885 if (Suffix.lower() == ".2d" &&
6886 cast<MCConstantExpr>(Val: Op3.getImm())->getValue() == 0) {
6887 Warning(L: IDLoc, Msg: "instruction movi.2d with immediate #0 may not function"
6888 " correctly on this CPU, converting to equivalent movi.16b");
6889 // Switch the suffix to .16b.
6890 unsigned Idx = Op1.isToken() ? 1 : 2;
6891 Operands[Idx] =
6892 AArch64Operand::CreateToken(Str: ".16b", S: IDLoc, Ctx&: getContext());
6893 }
6894 }
6895 }
6896
6897 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
6898 // InstAlias can't quite handle this since the reg classes aren't
6899 // subclasses.
6900 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
6901 // The source register can be Wn here, but the matcher expects a
6902 // GPR64. Twiddle it here if necessary.
6903 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6904 if (Op.isScalarReg()) {
6905 MCRegister Reg = getXRegFromWReg(Reg: Op.getReg());
6906 Operands[2] = AArch64Operand::CreateReg(Reg, Kind: RegKind::Scalar,
6907 S: Op.getStartLoc(), E: Op.getEndLoc(),
6908 Ctx&: getContext());
6909 }
6910 }
6911 // FIXME: Likewise for sxt[bh] with a Xd dst operand
6912 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
6913 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6914 if (Op.isScalarReg() &&
6915 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6916 Reg: Op.getReg())) {
6917 // The source register can be Wn here, but the matcher expects a
6918 // GPR64. Twiddle it here if necessary.
6919 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6920 if (Op.isScalarReg()) {
6921 MCRegister Reg = getXRegFromWReg(Reg: Op.getReg());
6922 Operands[2] = AArch64Operand::CreateReg(Reg, Kind: RegKind::Scalar,
6923 S: Op.getStartLoc(),
6924 E: Op.getEndLoc(), Ctx&: getContext());
6925 }
6926 }
6927 }
6928 // FIXME: Likewise for uxt[bh] with a Xd dst operand
6929 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
6930 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6931 if (Op.isScalarReg() &&
6932 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6933 Reg: Op.getReg())) {
6934 // The source register can be Wn here, but the matcher expects a
6935 // GPR32. Twiddle it here if necessary.
6936 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6937 if (Op.isScalarReg()) {
6938 MCRegister Reg = getWRegFromXReg(Reg: Op.getReg());
6939 Operands[1] = AArch64Operand::CreateReg(Reg, Kind: RegKind::Scalar,
6940 S: Op.getStartLoc(),
6941 E: Op.getEndLoc(), Ctx&: getContext());
6942 }
6943 }
6944 }
6945
6946 MCInst Inst;
6947 FeatureBitset MissingFeatures;
6948 // First try to match against the secondary set of tables containing the
6949 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
6950 unsigned MatchResult =
6951 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6952 matchingInlineAsm: MatchingInlineAsm, VariantID: 1);
6953
6954 // If that fails, try against the alternate table containing long-form NEON:
6955 // "fadd v0.2s, v1.2s, v2.2s"
6956 if (MatchResult != Match_Success) {
6957 // But first, save the short-form match result: we can use it in case the
6958 // long-form match also fails.
6959 auto ShortFormNEONErrorInfo = ErrorInfo;
6960 auto ShortFormNEONMatchResult = MatchResult;
6961 auto ShortFormNEONMissingFeatures = MissingFeatures;
6962
6963 MatchResult =
6964 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6965 matchingInlineAsm: MatchingInlineAsm, VariantID: 0);
6966
6967 // Now, both matches failed, and the long-form match failed on the mnemonic
6968 // suffix token operand. The short-form match failure is probably more
6969 // relevant: use it instead.
6970 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
6971 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
6972 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
6973 MatchResult = ShortFormNEONMatchResult;
6974 ErrorInfo = ShortFormNEONErrorInfo;
6975 MissingFeatures = ShortFormNEONMissingFeatures;
6976 }
6977 }
6978
6979 switch (MatchResult) {
6980 case Match_Success: {
6981 // Perform range checking and other semantic validations
6982 SmallVector<SMLoc, 8> OperandLocs;
6983 NumOperands = Operands.size();
6984 for (unsigned i = 1; i < NumOperands; ++i)
6985 OperandLocs.push_back(Elt: Operands[i]->getStartLoc());
6986 if (validateInstruction(Inst, IDLoc, Loc&: OperandLocs))
6987 return true;
6988
6989 Inst.setLoc(IDLoc);
6990 Out.emitInstruction(Inst, STI: getSTI());
6991 return false;
6992 }
6993 case Match_MissingFeature: {
6994 assert(MissingFeatures.any() && "Unknown missing feature!");
6995 // Special case the error message for the very common case where only
6996 // a single subtarget feature is missing (neon, e.g.).
6997 std::string Msg = "instruction requires:";
6998 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
6999 if (MissingFeatures[i]) {
7000 Msg += " ";
7001 Msg += getSubtargetFeatureName(Val: i);
7002 }
7003 }
7004 return Error(L: IDLoc, Msg);
7005 }
7006 case Match_MnemonicFail:
7007 return showMatchError(Loc: IDLoc, ErrCode: MatchResult, ErrorInfo, Operands);
7008 case Match_InvalidOperand: {
7009 SMLoc ErrorLoc = IDLoc;
7010
7011 if (ErrorInfo != ~0ULL) {
7012 if (ErrorInfo >= Operands.size())
7013 return Error(L: IDLoc, Msg: "too few operands for instruction",
7014 Range: SMRange(IDLoc, getTok().getLoc()));
7015
7016 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
7017 if (ErrorLoc == SMLoc())
7018 ErrorLoc = IDLoc;
7019 }
7020 // If the match failed on a suffix token operand, tweak the diagnostic
7021 // accordingly.
7022 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
7023 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
7024 MatchResult = Match_InvalidSuffix;
7025
7026 return showMatchError(Loc: ErrorLoc, ErrCode: MatchResult, ErrorInfo, Operands);
7027 }
7028 case Match_InvalidTiedOperand:
7029 case Match_InvalidMemoryIndexed1:
7030 case Match_InvalidMemoryIndexed2:
7031 case Match_InvalidMemoryIndexed4:
7032 case Match_InvalidMemoryIndexed8:
7033 case Match_InvalidMemoryIndexed16:
7034 case Match_InvalidCondCode:
7035 case Match_AddSubLSLImm3ShiftLarge:
7036 case Match_AddSubRegExtendSmall:
7037 case Match_AddSubRegExtendLarge:
7038 case Match_AddSubSecondSource:
7039 case Match_LogicalSecondSource:
7040 case Match_AddSubRegShift32:
7041 case Match_AddSubRegShift64:
7042 case Match_InvalidMovImm32Shift:
7043 case Match_InvalidMovImm64Shift:
7044 case Match_InvalidFPImm:
7045 case Match_InvalidMemoryWExtend8:
7046 case Match_InvalidMemoryWExtend16:
7047 case Match_InvalidMemoryWExtend32:
7048 case Match_InvalidMemoryWExtend64:
7049 case Match_InvalidMemoryWExtend128:
7050 case Match_InvalidMemoryXExtend8:
7051 case Match_InvalidMemoryXExtend16:
7052 case Match_InvalidMemoryXExtend32:
7053 case Match_InvalidMemoryXExtend64:
7054 case Match_InvalidMemoryXExtend128:
7055 case Match_InvalidMemoryIndexed1SImm4:
7056 case Match_InvalidMemoryIndexed2SImm4:
7057 case Match_InvalidMemoryIndexed3SImm4:
7058 case Match_InvalidMemoryIndexed4SImm4:
7059 case Match_InvalidMemoryIndexed1SImm6:
7060 case Match_InvalidMemoryIndexed16SImm4:
7061 case Match_InvalidMemoryIndexed32SImm4:
7062 case Match_InvalidMemoryIndexed4SImm7:
7063 case Match_InvalidMemoryIndexed8SImm7:
7064 case Match_InvalidMemoryIndexed16SImm7:
7065 case Match_InvalidMemoryIndexed8UImm5:
7066 case Match_InvalidMemoryIndexed8UImm3:
7067 case Match_InvalidMemoryIndexed4UImm5:
7068 case Match_InvalidMemoryIndexed2UImm5:
7069 case Match_InvalidMemoryIndexed1UImm6:
7070 case Match_InvalidMemoryIndexed2UImm6:
7071 case Match_InvalidMemoryIndexed4UImm6:
7072 case Match_InvalidMemoryIndexed8UImm6:
7073 case Match_InvalidMemoryIndexed16UImm6:
7074 case Match_InvalidMemoryIndexedSImm6:
7075 case Match_InvalidMemoryIndexedSImm5:
7076 case Match_InvalidMemoryIndexedSImm8:
7077 case Match_InvalidMemoryIndexedSImm9:
7078 case Match_InvalidMemoryIndexed16SImm9:
7079 case Match_InvalidMemoryIndexed8SImm10:
7080 case Match_InvalidImm0_0:
7081 case Match_InvalidImm0_1:
7082 case Match_InvalidImm0_3:
7083 case Match_InvalidImm0_7:
7084 case Match_InvalidImm0_15:
7085 case Match_InvalidImm0_31:
7086 case Match_InvalidImm0_63:
7087 case Match_InvalidImm0_127:
7088 case Match_InvalidImm0_255:
7089 case Match_InvalidImm0_65535:
7090 case Match_InvalidImm1_8:
7091 case Match_InvalidImm1_16:
7092 case Match_InvalidImm1_32:
7093 case Match_InvalidImm1_64:
7094 case Match_InvalidImmM1_62:
7095 case Match_InvalidMemoryIndexedRange2UImm0:
7096 case Match_InvalidMemoryIndexedRange2UImm1:
7097 case Match_InvalidMemoryIndexedRange2UImm2:
7098 case Match_InvalidMemoryIndexedRange2UImm3:
7099 case Match_InvalidMemoryIndexedRange4UImm0:
7100 case Match_InvalidMemoryIndexedRange4UImm1:
7101 case Match_InvalidMemoryIndexedRange4UImm2:
7102 case Match_InvalidSVEAddSubImm8:
7103 case Match_InvalidSVEAddSubImm16:
7104 case Match_InvalidSVEAddSubImm32:
7105 case Match_InvalidSVEAddSubImm64:
7106 case Match_InvalidSVECpyImm8:
7107 case Match_InvalidSVECpyImm16:
7108 case Match_InvalidSVECpyImm32:
7109 case Match_InvalidSVECpyImm64:
7110 case Match_InvalidIndexRange0_0:
7111 case Match_InvalidIndexRange1_1:
7112 case Match_InvalidIndexRange0_15:
7113 case Match_InvalidIndexRange0_7:
7114 case Match_InvalidIndexRange0_3:
7115 case Match_InvalidIndexRange0_1:
7116 case Match_InvalidSVEIndexRange0_63:
7117 case Match_InvalidSVEIndexRange0_31:
7118 case Match_InvalidSVEIndexRange0_15:
7119 case Match_InvalidSVEIndexRange0_7:
7120 case Match_InvalidSVEIndexRange0_3:
7121 case Match_InvalidLabel:
7122 case Match_InvalidComplexRotationEven:
7123 case Match_InvalidComplexRotationOdd:
7124 case Match_InvalidGPR64shifted8:
7125 case Match_InvalidGPR64shifted16:
7126 case Match_InvalidGPR64shifted32:
7127 case Match_InvalidGPR64shifted64:
7128 case Match_InvalidGPR64shifted128:
7129 case Match_InvalidGPR64NoXZRshifted8:
7130 case Match_InvalidGPR64NoXZRshifted16:
7131 case Match_InvalidGPR64NoXZRshifted32:
7132 case Match_InvalidGPR64NoXZRshifted64:
7133 case Match_InvalidGPR64NoXZRshifted128:
7134 case Match_InvalidZPR32UXTW8:
7135 case Match_InvalidZPR32UXTW16:
7136 case Match_InvalidZPR32UXTW32:
7137 case Match_InvalidZPR32UXTW64:
7138 case Match_InvalidZPR32SXTW8:
7139 case Match_InvalidZPR32SXTW16:
7140 case Match_InvalidZPR32SXTW32:
7141 case Match_InvalidZPR32SXTW64:
7142 case Match_InvalidZPR64UXTW8:
7143 case Match_InvalidZPR64SXTW8:
7144 case Match_InvalidZPR64UXTW16:
7145 case Match_InvalidZPR64SXTW16:
7146 case Match_InvalidZPR64UXTW32:
7147 case Match_InvalidZPR64SXTW32:
7148 case Match_InvalidZPR64UXTW64:
7149 case Match_InvalidZPR64SXTW64:
7150 case Match_InvalidZPR32LSL8:
7151 case Match_InvalidZPR32LSL16:
7152 case Match_InvalidZPR32LSL32:
7153 case Match_InvalidZPR32LSL64:
7154 case Match_InvalidZPR64LSL8:
7155 case Match_InvalidZPR64LSL16:
7156 case Match_InvalidZPR64LSL32:
7157 case Match_InvalidZPR64LSL64:
7158 case Match_InvalidZPR0:
7159 case Match_InvalidZPR8:
7160 case Match_InvalidZPR16:
7161 case Match_InvalidZPR32:
7162 case Match_InvalidZPR64:
7163 case Match_InvalidZPR128:
7164 case Match_InvalidZPR_3b8:
7165 case Match_InvalidZPR_3b16:
7166 case Match_InvalidZPR_3b32:
7167 case Match_InvalidZPR_4b8:
7168 case Match_InvalidZPR_4b16:
7169 case Match_InvalidZPR_4b32:
7170 case Match_InvalidZPR_4b64:
7171 case Match_InvalidSVEPPRorPNRAnyReg:
7172 case Match_InvalidSVEPPRorPNRBReg:
7173 case Match_InvalidSVEPredicateAnyReg:
7174 case Match_InvalidSVEPattern:
7175 case Match_InvalidSVEVecLenSpecifier:
7176 case Match_InvalidSVEPredicateBReg:
7177 case Match_InvalidSVEPredicateHReg:
7178 case Match_InvalidSVEPredicateSReg:
7179 case Match_InvalidSVEPredicateDReg:
7180 case Match_InvalidSVEPredicate3bAnyReg:
7181 case Match_InvalidSVEPNPredicateB_p8to15Reg:
7182 case Match_InvalidSVEPNPredicateH_p8to15Reg:
7183 case Match_InvalidSVEPNPredicateS_p8to15Reg:
7184 case Match_InvalidSVEPNPredicateD_p8to15Reg:
7185 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
7186 case Match_InvalidSVEPNPredicateBReg:
7187 case Match_InvalidSVEPNPredicateHReg:
7188 case Match_InvalidSVEPNPredicateSReg:
7189 case Match_InvalidSVEPNPredicateDReg:
7190 case Match_InvalidSVEPredicateListMul2x8:
7191 case Match_InvalidSVEPredicateListMul2x16:
7192 case Match_InvalidSVEPredicateListMul2x32:
7193 case Match_InvalidSVEPredicateListMul2x64:
7194 case Match_InvalidSVEExactFPImmOperandHalfOne:
7195 case Match_InvalidSVEExactFPImmOperandHalfTwo:
7196 case Match_InvalidSVEExactFPImmOperandZeroOne:
7197 case Match_InvalidMatrixTile16:
7198 case Match_InvalidMatrixTile32:
7199 case Match_InvalidMatrixTile64:
7200 case Match_InvalidMatrix:
7201 case Match_InvalidMatrix8:
7202 case Match_InvalidMatrix16:
7203 case Match_InvalidMatrix32:
7204 case Match_InvalidMatrix64:
7205 case Match_InvalidMatrixTileVectorH8:
7206 case Match_InvalidMatrixTileVectorH16:
7207 case Match_InvalidMatrixTileVectorH32:
7208 case Match_InvalidMatrixTileVectorH64:
7209 case Match_InvalidMatrixTileVectorH128:
7210 case Match_InvalidMatrixTileVectorV8:
7211 case Match_InvalidMatrixTileVectorV16:
7212 case Match_InvalidMatrixTileVectorV32:
7213 case Match_InvalidMatrixTileVectorV64:
7214 case Match_InvalidMatrixTileVectorV128:
7215 case Match_InvalidSVCR:
7216 case Match_InvalidMatrixIndexGPR32_12_15:
7217 case Match_InvalidMatrixIndexGPR32_8_11:
7218 case Match_InvalidLookupTable:
7219 case Match_InvalidZPRMul2_Lo8:
7220 case Match_InvalidZPRMul2_Hi8:
7221 case Match_InvalidZPRMul2_Lo16:
7222 case Match_InvalidZPRMul2_Hi16:
7223 case Match_InvalidZPRMul2_Lo32:
7224 case Match_InvalidZPRMul2_Hi32:
7225 case Match_InvalidZPRMul2_Lo64:
7226 case Match_InvalidZPRMul2_Hi64:
7227 case Match_InvalidZPR_K0:
7228 case Match_InvalidSVEVectorList2x8Mul2:
7229 case Match_InvalidSVEVectorList2x16Mul2:
7230 case Match_InvalidSVEVectorList2x32Mul2:
7231 case Match_InvalidSVEVectorList2x64Mul2:
7232 case Match_InvalidSVEVectorList2x128Mul2:
7233 case Match_InvalidSVEVectorList4x8Mul4:
7234 case Match_InvalidSVEVectorList4x16Mul4:
7235 case Match_InvalidSVEVectorList4x32Mul4:
7236 case Match_InvalidSVEVectorList4x64Mul4:
7237 case Match_InvalidSVEVectorList4x128Mul4:
7238 case Match_InvalidSVEVectorList2x8Mul2_Lo:
7239 case Match_InvalidSVEVectorList2x16Mul2_Lo:
7240 case Match_InvalidSVEVectorList2x32Mul2_Lo:
7241 case Match_InvalidSVEVectorList2x64Mul2_Lo:
7242 case Match_InvalidSVEVectorList2x8Mul2_Hi:
7243 case Match_InvalidSVEVectorList2x16Mul2_Hi:
7244 case Match_InvalidSVEVectorList2x32Mul2_Hi:
7245 case Match_InvalidSVEVectorList2x64Mul2_Hi:
7246 case Match_InvalidSVEVectorListStrided2x8:
7247 case Match_InvalidSVEVectorListStrided2x16:
7248 case Match_InvalidSVEVectorListStrided2x32:
7249 case Match_InvalidSVEVectorListStrided2x64:
7250 case Match_InvalidSVEVectorListStrided4x8:
7251 case Match_InvalidSVEVectorListStrided4x16:
7252 case Match_InvalidSVEVectorListStrided4x32:
7253 case Match_InvalidSVEVectorListStrided4x64:
7254 case Match_MSR:
7255 case Match_MRS: {
7256 if (ErrorInfo >= Operands.size())
7257 return Error(L: IDLoc, Msg: "too few operands for instruction", Range: SMRange(IDLoc, (*Operands.back()).getEndLoc()));
7258 // Any time we get here, there's nothing fancy to do. Just get the
7259 // operand SMLoc and display the diagnostic.
7260 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
7261 if (ErrorLoc == SMLoc())
7262 ErrorLoc = IDLoc;
7263 return showMatchError(Loc: ErrorLoc, ErrCode: MatchResult, ErrorInfo, Operands);
7264 }
7265 }
7266
7267 llvm_unreachable("Implement any new match types added!");
7268}
7269
7270/// ParseDirective parses the arm specific directives
7271bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
7272 const MCContext::Environment Format = getContext().getObjectFileType();
7273 bool IsMachO = Format == MCContext::IsMachO;
7274 bool IsCOFF = Format == MCContext::IsCOFF;
7275 bool IsELF = Format == MCContext::IsELF;
7276
7277 auto IDVal = DirectiveID.getIdentifier().lower();
7278 SMLoc Loc = DirectiveID.getLoc();
7279 if (IDVal == ".arch")
7280 parseDirectiveArch(L: Loc);
7281 else if (IDVal == ".cpu")
7282 parseDirectiveCPU(L: Loc);
7283 else if (IDVal == ".tlsdesccall")
7284 parseDirectiveTLSDescCall(L: Loc);
7285 else if (IDVal == ".ltorg" || IDVal == ".pool")
7286 parseDirectiveLtorg(L: Loc);
7287 else if (IDVal == ".unreq")
7288 parseDirectiveUnreq(L: Loc);
7289 else if (IDVal == ".inst")
7290 parseDirectiveInst(L: Loc);
7291 else if (IDVal == ".cfi_negate_ra_state")
7292 parseDirectiveCFINegateRAState();
7293 else if (IDVal == ".cfi_negate_ra_state_with_pc")
7294 parseDirectiveCFINegateRAStateWithPC();
7295 else if (IDVal == ".cfi_b_key_frame")
7296 parseDirectiveCFIBKeyFrame();
7297 else if (IDVal == ".cfi_mte_tagged_frame")
7298 parseDirectiveCFIMTETaggedFrame();
7299 else if (IDVal == ".arch_extension")
7300 parseDirectiveArchExtension(L: Loc);
7301 else if (IDVal == ".variant_pcs")
7302 parseDirectiveVariantPCS(L: Loc);
7303 else if (IsMachO) {
7304 if (IDVal == MCLOHDirectiveName())
7305 parseDirectiveLOH(LOH: IDVal, L: Loc);
7306 else
7307 return true;
7308 } else if (IsCOFF) {
7309 if (IDVal == ".seh_stackalloc")
7310 parseDirectiveSEHAllocStack(L: Loc);
7311 else if (IDVal == ".seh_endprologue")
7312 parseDirectiveSEHPrologEnd(L: Loc);
7313 else if (IDVal == ".seh_save_r19r20_x")
7314 parseDirectiveSEHSaveR19R20X(L: Loc);
7315 else if (IDVal == ".seh_save_fplr")
7316 parseDirectiveSEHSaveFPLR(L: Loc);
7317 else if (IDVal == ".seh_save_fplr_x")
7318 parseDirectiveSEHSaveFPLRX(L: Loc);
7319 else if (IDVal == ".seh_save_reg")
7320 parseDirectiveSEHSaveReg(L: Loc);
7321 else if (IDVal == ".seh_save_reg_x")
7322 parseDirectiveSEHSaveRegX(L: Loc);
7323 else if (IDVal == ".seh_save_regp")
7324 parseDirectiveSEHSaveRegP(L: Loc);
7325 else if (IDVal == ".seh_save_regp_x")
7326 parseDirectiveSEHSaveRegPX(L: Loc);
7327 else if (IDVal == ".seh_save_lrpair")
7328 parseDirectiveSEHSaveLRPair(L: Loc);
7329 else if (IDVal == ".seh_save_freg")
7330 parseDirectiveSEHSaveFReg(L: Loc);
7331 else if (IDVal == ".seh_save_freg_x")
7332 parseDirectiveSEHSaveFRegX(L: Loc);
7333 else if (IDVal == ".seh_save_fregp")
7334 parseDirectiveSEHSaveFRegP(L: Loc);
7335 else if (IDVal == ".seh_save_fregp_x")
7336 parseDirectiveSEHSaveFRegPX(L: Loc);
7337 else if (IDVal == ".seh_set_fp")
7338 parseDirectiveSEHSetFP(L: Loc);
7339 else if (IDVal == ".seh_add_fp")
7340 parseDirectiveSEHAddFP(L: Loc);
7341 else if (IDVal == ".seh_nop")
7342 parseDirectiveSEHNop(L: Loc);
7343 else if (IDVal == ".seh_save_next")
7344 parseDirectiveSEHSaveNext(L: Loc);
7345 else if (IDVal == ".seh_startepilogue")
7346 parseDirectiveSEHEpilogStart(L: Loc);
7347 else if (IDVal == ".seh_endepilogue")
7348 parseDirectiveSEHEpilogEnd(L: Loc);
7349 else if (IDVal == ".seh_trap_frame")
7350 parseDirectiveSEHTrapFrame(L: Loc);
7351 else if (IDVal == ".seh_pushframe")
7352 parseDirectiveSEHMachineFrame(L: Loc);
7353 else if (IDVal == ".seh_context")
7354 parseDirectiveSEHContext(L: Loc);
7355 else if (IDVal == ".seh_ec_context")
7356 parseDirectiveSEHECContext(L: Loc);
7357 else if (IDVal == ".seh_clear_unwound_to_call")
7358 parseDirectiveSEHClearUnwoundToCall(L: Loc);
7359 else if (IDVal == ".seh_pac_sign_lr")
7360 parseDirectiveSEHPACSignLR(L: Loc);
7361 else if (IDVal == ".seh_save_any_reg")
7362 parseDirectiveSEHSaveAnyReg(L: Loc, Paired: false, Writeback: false);
7363 else if (IDVal == ".seh_save_any_reg_p")
7364 parseDirectiveSEHSaveAnyReg(L: Loc, Paired: true, Writeback: false);
7365 else if (IDVal == ".seh_save_any_reg_x")
7366 parseDirectiveSEHSaveAnyReg(L: Loc, Paired: false, Writeback: true);
7367 else if (IDVal == ".seh_save_any_reg_px")
7368 parseDirectiveSEHSaveAnyReg(L: Loc, Paired: true, Writeback: true);
7369 else if (IDVal == ".seh_allocz")
7370 parseDirectiveSEHAllocZ(L: Loc);
7371 else if (IDVal == ".seh_save_zreg")
7372 parseDirectiveSEHSaveZReg(L: Loc);
7373 else if (IDVal == ".seh_save_preg")
7374 parseDirectiveSEHSavePReg(L: Loc);
7375 else
7376 return true;
7377 } else if (IsELF) {
7378 if (IDVal == ".aeabi_subsection")
7379 parseDirectiveAeabiSubSectionHeader(L: Loc);
7380 else if (IDVal == ".aeabi_attribute")
7381 parseDirectiveAeabiAArch64Attr(L: Loc);
7382 else
7383 return true;
7384 } else
7385 return true;
7386 return false;
7387}
7388
7389static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo,
7390 SmallVector<StringRef, 4> &RequestedExtensions) {
7391 const bool NoCrypto = llvm::is_contained(Range&: RequestedExtensions, Element: "nocrypto");
7392 const bool Crypto = llvm::is_contained(Range&: RequestedExtensions, Element: "crypto");
7393
7394 if (!NoCrypto && Crypto) {
7395 // Map 'generic' (and others) to sha2 and aes, because
7396 // that was the traditional meaning of crypto.
7397 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7398 ArchInfo == AArch64::ARMV8_3A) {
7399 RequestedExtensions.push_back(Elt: "sha2");
7400 RequestedExtensions.push_back(Elt: "aes");
7401 }
7402 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7403 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7404 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7405 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7406 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7407 ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) {
7408 RequestedExtensions.push_back(Elt: "sm4");
7409 RequestedExtensions.push_back(Elt: "sha3");
7410 RequestedExtensions.push_back(Elt: "sha2");
7411 RequestedExtensions.push_back(Elt: "aes");
7412 }
7413 } else if (NoCrypto) {
7414 // Map 'generic' (and others) to sha2 and aes, because
7415 // that was the traditional meaning of crypto.
7416 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7417 ArchInfo == AArch64::ARMV8_3A) {
7418 RequestedExtensions.push_back(Elt: "nosha2");
7419 RequestedExtensions.push_back(Elt: "noaes");
7420 }
7421 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7422 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7423 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7424 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7425 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7426 ArchInfo == AArch64::ARMV9_4A) {
7427 RequestedExtensions.push_back(Elt: "nosm4");
7428 RequestedExtensions.push_back(Elt: "nosha3");
7429 RequestedExtensions.push_back(Elt: "nosha2");
7430 RequestedExtensions.push_back(Elt: "noaes");
7431 }
7432 }
7433}
7434
7435static SMLoc incrementLoc(SMLoc L, int Offset) {
7436 return SMLoc::getFromPointer(Ptr: L.getPointer() + Offset);
7437}
7438
7439/// parseDirectiveArch
7440/// ::= .arch token
7441bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
7442 SMLoc CurLoc = getLoc();
7443
7444 StringRef Name = getParser().parseStringToEndOfStatement().trim();
7445 StringRef Arch, ExtensionString;
7446 std::tie(args&: Arch, args&: ExtensionString) = Name.split(Separator: '+');
7447
7448 const AArch64::ArchInfo *ArchInfo = AArch64::parseArch(Arch);
7449 if (!ArchInfo)
7450 return Error(L: CurLoc, Msg: "unknown arch name");
7451
7452 if (parseToken(T: AsmToken::EndOfStatement))
7453 return true;
7454
7455 // Get the architecture and extension features.
7456 std::vector<StringRef> AArch64Features;
7457 AArch64Features.push_back(x: ArchInfo->ArchFeature);
7458 AArch64::getExtensionFeatures(Extensions: ArchInfo->DefaultExts, Features&: AArch64Features);
7459
7460 MCSubtargetInfo &STI = copySTI();
7461 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
7462 STI.setDefaultFeatures(CPU: "generic", /*TuneCPU*/ "generic",
7463 FS: join(Begin: ArchFeatures.begin(), End: ArchFeatures.end(), Separator: ","));
7464
7465 SmallVector<StringRef, 4> RequestedExtensions;
7466 if (!ExtensionString.empty())
7467 ExtensionString.split(A&: RequestedExtensions, Separator: '+');
7468
7469 ExpandCryptoAEK(ArchInfo: *ArchInfo, RequestedExtensions);
7470 CurLoc = incrementLoc(L: CurLoc, Offset: Arch.size());
7471
7472 for (auto Name : RequestedExtensions) {
7473 // Advance source location past '+'.
7474 CurLoc = incrementLoc(L: CurLoc, Offset: 1);
7475
7476 bool EnableFeature = !Name.consume_front_insensitive(Prefix: "no");
7477
7478 auto It = llvm::find_if(Range: ExtensionMap, P: [&Name](const auto &Extension) {
7479 return Extension.Name == Name;
7480 });
7481
7482 if (It == std::end(arr: ExtensionMap))
7483 return Error(L: CurLoc, Msg: "unsupported architectural extension: " + Name);
7484
7485 if (EnableFeature)
7486 STI.SetFeatureBitsTransitively(It->Features);
7487 else
7488 STI.ClearFeatureBitsTransitively(FB: It->Features);
7489 CurLoc = incrementLoc(L: CurLoc, Offset: Name.size());
7490 }
7491 FeatureBitset Features = ComputeAvailableFeatures(FB: STI.getFeatureBits());
7492 setAvailableFeatures(Features);
7493
7494 getTargetStreamer().emitDirectiveArch(Name);
7495 return false;
7496}
7497
7498/// parseDirectiveArchExtension
7499/// ::= .arch_extension [no]feature
7500bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
7501 SMLoc ExtLoc = getLoc();
7502
7503 StringRef FullName = getParser().parseStringToEndOfStatement().trim();
7504
7505 if (parseEOL())
7506 return true;
7507
7508 bool EnableFeature = true;
7509 StringRef Name = FullName;
7510 if (Name.starts_with_insensitive(Prefix: "no")) {
7511 EnableFeature = false;
7512 Name = Name.substr(Start: 2);
7513 }
7514
7515 auto It = llvm::find_if(Range: ExtensionMap, P: [&Name](const auto &Extension) {
7516 return Extension.Name == Name;
7517 });
7518
7519 if (It == std::end(arr: ExtensionMap))
7520 return Error(L: ExtLoc, Msg: "unsupported architectural extension: " + Name);
7521
7522 MCSubtargetInfo &STI = copySTI();
7523 if (EnableFeature)
7524 STI.SetFeatureBitsTransitively(It->Features);
7525 else
7526 STI.ClearFeatureBitsTransitively(FB: It->Features);
7527 FeatureBitset Features = ComputeAvailableFeatures(FB: STI.getFeatureBits());
7528 setAvailableFeatures(Features);
7529
7530 getTargetStreamer().emitDirectiveArchExtension(Name: FullName);
7531 return false;
7532}
7533
7534/// parseDirectiveCPU
7535/// ::= .cpu id
7536bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
7537 SMLoc CurLoc = getLoc();
7538
7539 StringRef CPU, ExtensionString;
7540 std::tie(args&: CPU, args&: ExtensionString) =
7541 getParser().parseStringToEndOfStatement().trim().split(Separator: '+');
7542
7543 if (parseToken(T: AsmToken::EndOfStatement))
7544 return true;
7545
7546 SmallVector<StringRef, 4> RequestedExtensions;
7547 if (!ExtensionString.empty())
7548 ExtensionString.split(A&: RequestedExtensions, Separator: '+');
7549
7550 const llvm::AArch64::ArchInfo *CpuArch = llvm::AArch64::getArchForCpu(CPU);
7551 if (!CpuArch) {
7552 Error(L: CurLoc, Msg: "unknown CPU name");
7553 return false;
7554 }
7555 ExpandCryptoAEK(ArchInfo: *CpuArch, RequestedExtensions);
7556
7557 MCSubtargetInfo &STI = copySTI();
7558 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, FS: "");
7559 CurLoc = incrementLoc(L: CurLoc, Offset: CPU.size());
7560
7561 for (auto Name : RequestedExtensions) {
7562 // Advance source location past '+'.
7563 CurLoc = incrementLoc(L: CurLoc, Offset: 1);
7564
7565 bool EnableFeature = !Name.consume_front_insensitive(Prefix: "no");
7566
7567 auto It = llvm::find_if(Range: ExtensionMap, P: [&Name](const auto &Extension) {
7568 return Extension.Name == Name;
7569 });
7570
7571 if (It == std::end(arr: ExtensionMap))
7572 return Error(L: CurLoc, Msg: "unsupported architectural extension: " + Name);
7573
7574 if (EnableFeature)
7575 STI.SetFeatureBitsTransitively(It->Features);
7576 else
7577 STI.ClearFeatureBitsTransitively(FB: It->Features);
7578 CurLoc = incrementLoc(L: CurLoc, Offset: Name.size());
7579 }
7580 FeatureBitset Features = ComputeAvailableFeatures(FB: STI.getFeatureBits());
7581 setAvailableFeatures(Features);
7582 return false;
7583}
7584
7585/// parseDirectiveInst
7586/// ::= .inst opcode [, ...]
7587bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
7588 if (getLexer().is(K: AsmToken::EndOfStatement))
7589 return Error(L: Loc, Msg: "expected expression following '.inst' directive");
7590
7591 auto parseOp = [&]() -> bool {
7592 SMLoc L = getLoc();
7593 const MCExpr *Expr = nullptr;
7594 if (check(P: getParser().parseExpression(Res&: Expr), Loc: L, Msg: "expected expression"))
7595 return true;
7596 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Val: Expr);
7597 if (check(P: !Value, Loc: L, Msg: "expected constant expression"))
7598 return true;
7599 getTargetStreamer().emitInst(Inst: Value->getValue());
7600 return false;
7601 };
7602
7603 return parseMany(parseOne: parseOp);
7604}
7605
7606// parseDirectiveTLSDescCall:
7607// ::= .tlsdesccall symbol
7608bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
7609 StringRef Name;
7610 if (check(P: getParser().parseIdentifier(Res&: Name), Loc: L, Msg: "expected symbol") ||
7611 parseToken(T: AsmToken::EndOfStatement))
7612 return true;
7613
7614 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
7615 const MCExpr *Expr = MCSymbolRefExpr::create(Symbol: Sym, Ctx&: getContext());
7616 Expr = MCSpecifierExpr::create(Expr, S: AArch64::S_TLSDESC, Ctx&: getContext());
7617
7618 MCInst Inst;
7619 Inst.setOpcode(AArch64::TLSDESCCALL);
7620 Inst.addOperand(Op: MCOperand::createExpr(Val: Expr));
7621
7622 getParser().getStreamer().emitInstruction(Inst, STI: getSTI());
7623 return false;
7624}
7625
7626/// ::= .loh <lohName | lohId> label1, ..., labelN
7627/// The number of arguments depends on the loh identifier.
7628bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
7629 MCLOHType Kind;
7630 if (getTok().isNot(K: AsmToken::Identifier)) {
7631 if (getTok().isNot(K: AsmToken::Integer))
7632 return TokError(Msg: "expected an identifier or a number in directive");
7633 // We successfully get a numeric value for the identifier.
7634 // Check if it is valid.
7635 int64_t Id = getTok().getIntVal();
7636 if (Id <= -1U && !isValidMCLOHType(Kind: Id))
7637 return TokError(Msg: "invalid numeric identifier in directive");
7638 Kind = (MCLOHType)Id;
7639 } else {
7640 StringRef Name = getTok().getIdentifier();
7641 // We successfully parse an identifier.
7642 // Check if it is a recognized one.
7643 int Id = MCLOHNameToId(Name);
7644
7645 if (Id == -1)
7646 return TokError(Msg: "invalid identifier in directive");
7647 Kind = (MCLOHType)Id;
7648 }
7649 // Consume the identifier.
7650 Lex();
7651 // Get the number of arguments of this LOH.
7652 int NbArgs = MCLOHIdToNbArgs(Kind);
7653
7654 assert(NbArgs != -1 && "Invalid number of arguments");
7655
7656 SmallVector<MCSymbol *, 3> Args;
7657 for (int Idx = 0; Idx < NbArgs; ++Idx) {
7658 StringRef Name;
7659 if (getParser().parseIdentifier(Res&: Name))
7660 return TokError(Msg: "expected identifier in directive");
7661 Args.push_back(Elt: getContext().getOrCreateSymbol(Name));
7662
7663 if (Idx + 1 == NbArgs)
7664 break;
7665 if (parseComma())
7666 return true;
7667 }
7668 if (parseEOL())
7669 return true;
7670
7671 getStreamer().emitLOHDirective(Kind, Args);
7672 return false;
7673}
7674
7675/// parseDirectiveLtorg
7676/// ::= .ltorg | .pool
7677bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
7678 if (parseEOL())
7679 return true;
7680 getTargetStreamer().emitCurrentConstantPool();
7681 return false;
7682}
7683
7684/// parseDirectiveReq
7685/// ::= name .req registername
7686bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7687 Lex(); // Eat the '.req' token.
7688 SMLoc SRegLoc = getLoc();
7689 RegKind RegisterKind = RegKind::Scalar;
7690 MCRegister RegNum;
7691 ParseStatus ParseRes = tryParseScalarRegister(RegNum);
7692
7693 if (!ParseRes.isSuccess()) {
7694 StringRef Kind;
7695 RegisterKind = RegKind::NeonVector;
7696 ParseRes = tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::NeonVector);
7697
7698 if (ParseRes.isFailure())
7699 return true;
7700
7701 if (ParseRes.isSuccess() && !Kind.empty())
7702 return Error(L: SRegLoc, Msg: "vector register without type specifier expected");
7703 }
7704
7705 if (!ParseRes.isSuccess()) {
7706 StringRef Kind;
7707 RegisterKind = RegKind::SVEDataVector;
7708 ParseRes =
7709 tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::SVEDataVector);
7710
7711 if (ParseRes.isFailure())
7712 return true;
7713
7714 if (ParseRes.isSuccess() && !Kind.empty())
7715 return Error(L: SRegLoc,
7716 Msg: "sve vector register without type specifier expected");
7717 }
7718
7719 if (!ParseRes.isSuccess()) {
7720 StringRef Kind;
7721 RegisterKind = RegKind::SVEPredicateVector;
7722 ParseRes = tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::SVEPredicateVector);
7723
7724 if (ParseRes.isFailure())
7725 return true;
7726
7727 if (ParseRes.isSuccess() && !Kind.empty())
7728 return Error(L: SRegLoc,
7729 Msg: "sve predicate register without type specifier expected");
7730 }
7731
7732 if (!ParseRes.isSuccess())
7733 return Error(L: SRegLoc, Msg: "register name or alias expected");
7734
7735 // Shouldn't be anything else.
7736 if (parseEOL())
7737 return true;
7738
7739 auto pair = std::make_pair(x&: RegisterKind, y&: RegNum);
7740 if (RegisterReqs.insert(KV: std::make_pair(x&: Name, y&: pair)).first->second != pair)
7741 Warning(L, Msg: "ignoring redefinition of register alias '" + Name + "'");
7742
7743 return false;
7744}
7745
7746/// parseDirectiveUneq
7747/// ::= .unreq registername
7748bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
7749 if (getTok().isNot(K: AsmToken::Identifier))
7750 return TokError(Msg: "unexpected input in .unreq directive.");
7751 RegisterReqs.erase(Key: getTok().getIdentifier().lower());
7752 Lex(); // Eat the identifier.
7753 return parseToken(T: AsmToken::EndOfStatement);
7754}
7755
7756bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
7757 if (parseEOL())
7758 return true;
7759 getStreamer().emitCFINegateRAState();
7760 return false;
7761}
7762
7763bool AArch64AsmParser::parseDirectiveCFINegateRAStateWithPC() {
7764 if (parseEOL())
7765 return true;
7766 getStreamer().emitCFINegateRAStateWithPC();
7767 return false;
7768}
7769
7770/// parseDirectiveCFIBKeyFrame
7771/// ::= .cfi_b_key
7772bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
7773 if (parseEOL())
7774 return true;
7775 getStreamer().emitCFIBKeyFrame();
7776 return false;
7777}
7778
7779/// parseDirectiveCFIMTETaggedFrame
7780/// ::= .cfi_mte_tagged_frame
7781bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() {
7782 if (parseEOL())
7783 return true;
7784 getStreamer().emitCFIMTETaggedFrame();
7785 return false;
7786}
7787
7788/// parseDirectiveVariantPCS
7789/// ::= .variant_pcs symbolname
7790bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
7791 StringRef Name;
7792 if (getParser().parseIdentifier(Res&: Name))
7793 return TokError(Msg: "expected symbol name");
7794 if (parseEOL())
7795 return true;
7796 getTargetStreamer().emitDirectiveVariantPCS(
7797 Symbol: getContext().getOrCreateSymbol(Name));
7798 return false;
7799}
7800
7801/// parseDirectiveSEHAllocStack
7802/// ::= .seh_stackalloc
7803bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
7804 int64_t Size;
7805 if (parseImmExpr(Out&: Size))
7806 return true;
7807 getTargetStreamer().emitARM64WinCFIAllocStack(Size);
7808 return false;
7809}
7810
7811/// parseDirectiveSEHPrologEnd
7812/// ::= .seh_endprologue
7813bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
7814 getTargetStreamer().emitARM64WinCFIPrologEnd();
7815 return false;
7816}
7817
7818/// parseDirectiveSEHSaveR19R20X
7819/// ::= .seh_save_r19r20_x
7820bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
7821 int64_t Offset;
7822 if (parseImmExpr(Out&: Offset))
7823 return true;
7824 getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset);
7825 return false;
7826}
7827
7828/// parseDirectiveSEHSaveFPLR
7829/// ::= .seh_save_fplr
7830bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
7831 int64_t Offset;
7832 if (parseImmExpr(Out&: Offset))
7833 return true;
7834 getTargetStreamer().emitARM64WinCFISaveFPLR(Offset);
7835 return false;
7836}
7837
7838/// parseDirectiveSEHSaveFPLRX
7839/// ::= .seh_save_fplr_x
7840bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
7841 int64_t Offset;
7842 if (parseImmExpr(Out&: Offset))
7843 return true;
7844 getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset);
7845 return false;
7846}
7847
7848/// parseDirectiveSEHSaveReg
7849/// ::= .seh_save_reg
7850bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
7851 unsigned Reg;
7852 int64_t Offset;
7853 if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::LR) ||
7854 parseComma() || parseImmExpr(Out&: Offset))
7855 return true;
7856 getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset);
7857 return false;
7858}
7859
7860/// parseDirectiveSEHSaveRegX
7861/// ::= .seh_save_reg_x
7862bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
7863 unsigned Reg;
7864 int64_t Offset;
7865 if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::LR) ||
7866 parseComma() || parseImmExpr(Out&: Offset))
7867 return true;
7868 getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset);
7869 return false;
7870}
7871
7872/// parseDirectiveSEHSaveRegP
7873/// ::= .seh_save_regp
7874bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
7875 unsigned Reg;
7876 int64_t Offset;
7877 if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::FP) ||
7878 parseComma() || parseImmExpr(Out&: Offset))
7879 return true;
7880 getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset);
7881 return false;
7882}
7883
7884/// parseDirectiveSEHSaveRegPX
7885/// ::= .seh_save_regp_x
7886bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
7887 unsigned Reg;
7888 int64_t Offset;
7889 if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::FP) ||
7890 parseComma() || parseImmExpr(Out&: Offset))
7891 return true;
7892 getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset);
7893 return false;
7894}
7895
7896/// parseDirectiveSEHSaveLRPair
7897/// ::= .seh_save_lrpair
7898bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
7899 unsigned Reg;
7900 int64_t Offset;
7901 L = getLoc();
7902 if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::LR) ||
7903 parseComma() || parseImmExpr(Out&: Offset))
7904 return true;
7905 if (check(P: ((Reg - 19) % 2 != 0), Loc: L,
7906 Msg: "expected register with even offset from x19"))
7907 return true;
7908 getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset);
7909 return false;
7910}
7911
7912/// parseDirectiveSEHSaveFReg
7913/// ::= .seh_save_freg
7914bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
7915 unsigned Reg;
7916 int64_t Offset;
7917 if (parseRegisterInRange(Out&: Reg, Base: AArch64::D0, First: AArch64::D8, Last: AArch64::D15) ||
7918 parseComma() || parseImmExpr(Out&: Offset))
7919 return true;
7920 getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset);
7921 return false;
7922}
7923
7924/// parseDirectiveSEHSaveFRegX
7925/// ::= .seh_save_freg_x
7926bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
7927 unsigned Reg;
7928 int64_t Offset;
7929 if (parseRegisterInRange(Out&: Reg, Base: AArch64::D0, First: AArch64::D8, Last: AArch64::D15) ||
7930 parseComma() || parseImmExpr(Out&: Offset))
7931 return true;
7932 getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset);
7933 return false;
7934}
7935
7936/// parseDirectiveSEHSaveFRegP
7937/// ::= .seh_save_fregp
7938bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
7939 unsigned Reg;
7940 int64_t Offset;
7941 if (parseRegisterInRange(Out&: Reg, Base: AArch64::D0, First: AArch64::D8, Last: AArch64::D14) ||
7942 parseComma() || parseImmExpr(Out&: Offset))
7943 return true;
7944 getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset);
7945 return false;
7946}
7947
7948/// parseDirectiveSEHSaveFRegPX
7949/// ::= .seh_save_fregp_x
7950bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
7951 unsigned Reg;
7952 int64_t Offset;
7953 if (parseRegisterInRange(Out&: Reg, Base: AArch64::D0, First: AArch64::D8, Last: AArch64::D14) ||
7954 parseComma() || parseImmExpr(Out&: Offset))
7955 return true;
7956 getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset);
7957 return false;
7958}
7959
7960/// parseDirectiveSEHSetFP
7961/// ::= .seh_set_fp
7962bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
7963 getTargetStreamer().emitARM64WinCFISetFP();
7964 return false;
7965}
7966
7967/// parseDirectiveSEHAddFP
7968/// ::= .seh_add_fp
7969bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
7970 int64_t Size;
7971 if (parseImmExpr(Out&: Size))
7972 return true;
7973 getTargetStreamer().emitARM64WinCFIAddFP(Size);
7974 return false;
7975}
7976
7977/// parseDirectiveSEHNop
7978/// ::= .seh_nop
7979bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
7980 getTargetStreamer().emitARM64WinCFINop();
7981 return false;
7982}
7983
7984/// parseDirectiveSEHSaveNext
7985/// ::= .seh_save_next
7986bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
7987 getTargetStreamer().emitARM64WinCFISaveNext();
7988 return false;
7989}
7990
7991/// parseDirectiveSEHEpilogStart
7992/// ::= .seh_startepilogue
7993bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
7994 getTargetStreamer().emitARM64WinCFIEpilogStart();
7995 return false;
7996}
7997
7998/// parseDirectiveSEHEpilogEnd
7999/// ::= .seh_endepilogue
8000bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
8001 getTargetStreamer().emitARM64WinCFIEpilogEnd();
8002 return false;
8003}
8004
8005/// parseDirectiveSEHTrapFrame
8006/// ::= .seh_trap_frame
8007bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
8008 getTargetStreamer().emitARM64WinCFITrapFrame();
8009 return false;
8010}
8011
8012/// parseDirectiveSEHMachineFrame
8013/// ::= .seh_pushframe
8014bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
8015 getTargetStreamer().emitARM64WinCFIMachineFrame();
8016 return false;
8017}
8018
8019/// parseDirectiveSEHContext
8020/// ::= .seh_context
8021bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
8022 getTargetStreamer().emitARM64WinCFIContext();
8023 return false;
8024}
8025
8026/// parseDirectiveSEHECContext
8027/// ::= .seh_ec_context
8028bool AArch64AsmParser::parseDirectiveSEHECContext(SMLoc L) {
8029 getTargetStreamer().emitARM64WinCFIECContext();
8030 return false;
8031}
8032
8033/// parseDirectiveSEHClearUnwoundToCall
8034/// ::= .seh_clear_unwound_to_call
8035bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
8036 getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
8037 return false;
8038}
8039
8040/// parseDirectiveSEHPACSignLR
8041/// ::= .seh_pac_sign_lr
8042bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) {
8043 getTargetStreamer().emitARM64WinCFIPACSignLR();
8044 return false;
8045}
8046
8047/// parseDirectiveSEHSaveAnyReg
8048/// ::= .seh_save_any_reg
8049/// ::= .seh_save_any_reg_p
8050/// ::= .seh_save_any_reg_x
8051/// ::= .seh_save_any_reg_px
8052bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired,
8053 bool Writeback) {
8054 MCRegister Reg;
8055 SMLoc Start, End;
8056 int64_t Offset;
8057 if (check(P: parseRegister(Reg, StartLoc&: Start, EndLoc&: End), Loc: getLoc(), Msg: "expected register") ||
8058 parseComma() || parseImmExpr(Out&: Offset))
8059 return true;
8060
8061 if (Reg == AArch64::FP || Reg == AArch64::LR ||
8062 (Reg >= AArch64::X0 && Reg <= AArch64::X28)) {
8063 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
8064 return Error(L, Msg: "invalid save_any_reg offset");
8065 unsigned EncodedReg;
8066 if (Reg == AArch64::FP)
8067 EncodedReg = 29;
8068 else if (Reg == AArch64::LR)
8069 EncodedReg = 30;
8070 else
8071 EncodedReg = Reg - AArch64::X0;
8072 if (Paired) {
8073 if (Reg == AArch64::LR)
8074 return Error(L: Start, Msg: "lr cannot be paired with another register");
8075 if (Writeback)
8076 getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(Reg: EncodedReg, Offset);
8077 else
8078 getTargetStreamer().emitARM64WinCFISaveAnyRegIP(Reg: EncodedReg, Offset);
8079 } else {
8080 if (Writeback)
8081 getTargetStreamer().emitARM64WinCFISaveAnyRegIX(Reg: EncodedReg, Offset);
8082 else
8083 getTargetStreamer().emitARM64WinCFISaveAnyRegI(Reg: EncodedReg, Offset);
8084 }
8085 } else if (Reg >= AArch64::D0 && Reg <= AArch64::D31) {
8086 unsigned EncodedReg = Reg - AArch64::D0;
8087 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
8088 return Error(L, Msg: "invalid save_any_reg offset");
8089 if (Paired) {
8090 if (Reg == AArch64::D31)
8091 return Error(L: Start, Msg: "d31 cannot be paired with another register");
8092 if (Writeback)
8093 getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(Reg: EncodedReg, Offset);
8094 else
8095 getTargetStreamer().emitARM64WinCFISaveAnyRegDP(Reg: EncodedReg, Offset);
8096 } else {
8097 if (Writeback)
8098 getTargetStreamer().emitARM64WinCFISaveAnyRegDX(Reg: EncodedReg, Offset);
8099 else
8100 getTargetStreamer().emitARM64WinCFISaveAnyRegD(Reg: EncodedReg, Offset);
8101 }
8102 } else if (Reg >= AArch64::Q0 && Reg <= AArch64::Q31) {
8103 unsigned EncodedReg = Reg - AArch64::Q0;
8104 if (Offset < 0 || Offset % 16)
8105 return Error(L, Msg: "invalid save_any_reg offset");
8106 if (Paired) {
8107 if (Reg == AArch64::Q31)
8108 return Error(L: Start, Msg: "q31 cannot be paired with another register");
8109 if (Writeback)
8110 getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(Reg: EncodedReg, Offset);
8111 else
8112 getTargetStreamer().emitARM64WinCFISaveAnyRegQP(Reg: EncodedReg, Offset);
8113 } else {
8114 if (Writeback)
8115 getTargetStreamer().emitARM64WinCFISaveAnyRegQX(Reg: EncodedReg, Offset);
8116 else
8117 getTargetStreamer().emitARM64WinCFISaveAnyRegQ(Reg: EncodedReg, Offset);
8118 }
8119 } else {
8120 return Error(L: Start, Msg: "save_any_reg register must be x, q or d register");
8121 }
8122 return false;
8123}
8124
8125/// parseDirectiveAllocZ
8126/// ::= .seh_allocz
8127bool AArch64AsmParser::parseDirectiveSEHAllocZ(SMLoc L) {
8128 int64_t Offset;
8129 if (parseImmExpr(Out&: Offset))
8130 return true;
8131 getTargetStreamer().emitARM64WinCFIAllocZ(Offset);
8132 return false;
8133}
8134
8135/// parseDirectiveSEHSaveZReg
8136/// ::= .seh_save_zreg
8137bool AArch64AsmParser::parseDirectiveSEHSaveZReg(SMLoc L) {
8138 MCRegister RegNum;
8139 StringRef Kind;
8140 int64_t Offset;
8141 ParseStatus Res =
8142 tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::SVEDataVector);
8143 if (!Res.isSuccess())
8144 return true;
8145 if (check(P: RegNum < AArch64::Z8 || RegNum > AArch64::Z23, Loc: L,
8146 Msg: "expected register in range z8 to z23"))
8147 return true;
8148 if (parseComma() || parseImmExpr(Out&: Offset))
8149 return true;
8150 getTargetStreamer().emitARM64WinCFISaveZReg(Reg: RegNum - AArch64::Z0, Offset);
8151 return false;
8152}
8153
8154/// parseDirectiveSEHSavePReg
8155/// ::= .seh_save_preg
8156bool AArch64AsmParser::parseDirectiveSEHSavePReg(SMLoc L) {
8157 MCRegister RegNum;
8158 StringRef Kind;
8159 int64_t Offset;
8160 ParseStatus Res =
8161 tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::SVEPredicateVector);
8162 if (!Res.isSuccess())
8163 return true;
8164 if (check(P: RegNum < AArch64::P4 || RegNum > AArch64::P15, Loc: L,
8165 Msg: "expected register in range p4 to p15"))
8166 return true;
8167 if (parseComma() || parseImmExpr(Out&: Offset))
8168 return true;
8169 getTargetStreamer().emitARM64WinCFISavePReg(Reg: RegNum - AArch64::P0, Offset);
8170 return false;
8171}
8172
8173bool AArch64AsmParser::parseDirectiveAeabiSubSectionHeader(SMLoc L) {
8174 // Handle parsing of .aeabi_subsection directives
8175 // - On first declaration of a subsection, expect exactly three identifiers
8176 // after `.aeabi_subsection`: the subsection name and two parameters.
8177 // - When switching to an existing subsection, it is valid to provide only
8178 // the subsection name, or the name together with the two parameters.
8179 MCAsmParser &Parser = getParser();
8180
8181 // Consume the name (subsection name)
8182 StringRef SubsectionName;
8183 AArch64BuildAttributes::VendorID SubsectionNameID;
8184 if (Parser.getTok().is(K: AsmToken::Identifier)) {
8185 SubsectionName = Parser.getTok().getIdentifier();
8186 SubsectionNameID = AArch64BuildAttributes::getVendorID(Vendor: SubsectionName);
8187 } else {
8188 Error(L: Parser.getTok().getLoc(), Msg: "subsection name not found");
8189 return true;
8190 }
8191 Parser.Lex();
8192
8193 std::unique_ptr<MCELFStreamer::AttributeSubSection> SubsectionExists =
8194 getTargetStreamer().getAttributesSubsectionByName(Name: SubsectionName);
8195 // Check whether only the subsection name was provided.
8196 // If so, the user is trying to switch to a subsection that should have been
8197 // declared before.
8198 if (Parser.getTok().is(K: llvm::AsmToken::EndOfStatement)) {
8199 if (SubsectionExists) {
8200 getTargetStreamer().emitAttributesSubsection(
8201 VendorName: SubsectionName,
8202 IsOptional: static_cast<AArch64BuildAttributes::SubsectionOptional>(
8203 SubsectionExists->IsOptional),
8204 ParameterType: static_cast<AArch64BuildAttributes::SubsectionType>(
8205 SubsectionExists->ParameterType));
8206 return false;
8207 }
8208 // If subsection does not exists, report error.
8209 else {
8210 Error(L: Parser.getTok().getLoc(),
8211 Msg: "Could not switch to subsection '" + SubsectionName +
8212 "' using subsection name, subsection has not been defined");
8213 return true;
8214 }
8215 }
8216
8217 // Otherwise, expecting 2 more parameters: consume a comma
8218 // parseComma() return *false* on success, and call Lex(), no need to call
8219 // Lex() again.
8220 if (Parser.parseComma()) {
8221 return true;
8222 }
8223
8224 // Consume the first parameter (optionality parameter)
8225 AArch64BuildAttributes::SubsectionOptional IsOptional;
8226 // options: optional/required
8227 if (Parser.getTok().is(K: AsmToken::Identifier)) {
8228 StringRef Optionality = Parser.getTok().getIdentifier();
8229 IsOptional = AArch64BuildAttributes::getOptionalID(Optional: Optionality);
8230 if (AArch64BuildAttributes::OPTIONAL_NOT_FOUND == IsOptional) {
8231 Error(L: Parser.getTok().getLoc(),
8232 Msg: AArch64BuildAttributes::getSubsectionOptionalUnknownError());
8233 return true;
8234 }
8235 if (SubsectionExists) {
8236 if (IsOptional != SubsectionExists->IsOptional) {
8237 Error(L: Parser.getTok().getLoc(),
8238 Msg: "optionality mismatch! subsection '" + SubsectionName +
8239 "' already exists with optionality defined as '" +
8240 AArch64BuildAttributes::getOptionalStr(
8241 Optional: SubsectionExists->IsOptional) +
8242 "' and not '" +
8243 AArch64BuildAttributes::getOptionalStr(Optional: IsOptional) + "'");
8244 return true;
8245 }
8246 }
8247 } else {
8248 Error(L: Parser.getTok().getLoc(),
8249 Msg: "optionality parameter not found, expected required|optional");
8250 return true;
8251 }
8252 // Check for possible IsOptional unaccepted values for known subsections
8253 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID) {
8254 if (AArch64BuildAttributes::REQUIRED == IsOptional) {
8255 Error(L: Parser.getTok().getLoc(),
8256 Msg: "aeabi_feature_and_bits must be marked as optional");
8257 return true;
8258 }
8259 }
8260 if (AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
8261 if (AArch64BuildAttributes::OPTIONAL == IsOptional) {
8262 Error(L: Parser.getTok().getLoc(),
8263 Msg: "aeabi_pauthabi must be marked as required");
8264 return true;
8265 }
8266 }
8267 Parser.Lex();
8268 // consume a comma
8269 if (Parser.parseComma()) {
8270 return true;
8271 }
8272
8273 // Consume the second parameter (type parameter)
8274 AArch64BuildAttributes::SubsectionType Type;
8275 if (Parser.getTok().is(K: AsmToken::Identifier)) {
8276 StringRef Name = Parser.getTok().getIdentifier();
8277 Type = AArch64BuildAttributes::getTypeID(Type: Name);
8278 if (AArch64BuildAttributes::TYPE_NOT_FOUND == Type) {
8279 Error(L: Parser.getTok().getLoc(),
8280 Msg: AArch64BuildAttributes::getSubsectionTypeUnknownError());
8281 return true;
8282 }
8283 if (SubsectionExists) {
8284 if (Type != SubsectionExists->ParameterType) {
8285 Error(L: Parser.getTok().getLoc(),
8286 Msg: "type mismatch! subsection '" + SubsectionName +
8287 "' already exists with type defined as '" +
8288 AArch64BuildAttributes::getTypeStr(
8289 Type: SubsectionExists->ParameterType) +
8290 "' and not '" + AArch64BuildAttributes::getTypeStr(Type) +
8291 "'");
8292 return true;
8293 }
8294 }
8295 } else {
8296 Error(L: Parser.getTok().getLoc(),
8297 Msg: "type parameter not found, expected uleb128|ntbs");
8298 return true;
8299 }
8300 // Check for possible unaccepted 'type' values for known subsections
8301 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID ||
8302 AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
8303 if (AArch64BuildAttributes::NTBS == Type) {
8304 Error(L: Parser.getTok().getLoc(),
8305 Msg: SubsectionName + " must be marked as ULEB128");
8306 return true;
8307 }
8308 }
8309 Parser.Lex();
8310
8311 // Parsing finished, check for trailing tokens.
8312 if (Parser.getTok().isNot(K: llvm::AsmToken::EndOfStatement)) {
8313 Error(L: Parser.getTok().getLoc(), Msg: "unexpected token for AArch64 build "
8314 "attributes subsection header directive");
8315 return true;
8316 }
8317
8318 getTargetStreamer().emitAttributesSubsection(VendorName: SubsectionName, IsOptional, ParameterType: Type);
8319
8320 return false;
8321}
8322
8323bool AArch64AsmParser::parseDirectiveAeabiAArch64Attr(SMLoc L) {
8324 // Expecting 2 Tokens: after '.aeabi_attribute', e.g.:
8325 // .aeabi_attribute (1)Tag_Feature_BTI, (2)[uleb128|ntbs]
8326 // separated by a comma.
8327 MCAsmParser &Parser = getParser();
8328
8329 std::unique_ptr<MCELFStreamer::AttributeSubSection> ActiveSubsection =
8330 getTargetStreamer().getActiveAttributesSubsection();
8331 if (nullptr == ActiveSubsection) {
8332 Error(L: Parser.getTok().getLoc(),
8333 Msg: "no active subsection, build attribute can not be added");
8334 return true;
8335 }
8336 StringRef ActiveSubsectionName = ActiveSubsection->VendorName;
8337 unsigned ActiveSubsectionType = ActiveSubsection->ParameterType;
8338
8339 unsigned ActiveSubsectionID = AArch64BuildAttributes::VENDOR_UNKNOWN;
8340 if (AArch64BuildAttributes::getVendorName(
8341 Vendor: AArch64BuildAttributes::AEABI_PAUTHABI) == ActiveSubsectionName)
8342 ActiveSubsectionID = AArch64BuildAttributes::AEABI_PAUTHABI;
8343 if (AArch64BuildAttributes::getVendorName(
8344 Vendor: AArch64BuildAttributes::AEABI_FEATURE_AND_BITS) ==
8345 ActiveSubsectionName)
8346 ActiveSubsectionID = AArch64BuildAttributes::AEABI_FEATURE_AND_BITS;
8347
8348 StringRef TagStr = "";
8349 unsigned Tag;
8350 if (Parser.getTok().is(K: AsmToken::Integer)) {
8351 Tag = getTok().getIntVal();
8352 } else if (Parser.getTok().is(K: AsmToken::Identifier)) {
8353 TagStr = Parser.getTok().getIdentifier();
8354 switch (ActiveSubsectionID) {
8355 case AArch64BuildAttributes::VENDOR_UNKNOWN:
8356 // Tag was provided as an unrecognized string instead of an unsigned
8357 // integer
8358 Error(L: Parser.getTok().getLoc(), Msg: "unrecognized Tag: '" + TagStr +
8359 "' \nExcept for public subsections, "
8360 "tags have to be an unsigned int.");
8361 return true;
8362 break;
8363 case AArch64BuildAttributes::AEABI_PAUTHABI:
8364 Tag = AArch64BuildAttributes::getPauthABITagsID(PauthABITag: TagStr);
8365 if (AArch64BuildAttributes::PAUTHABI_TAG_NOT_FOUND == Tag) {
8366 Error(L: Parser.getTok().getLoc(), Msg: "unknown AArch64 build attribute '" +
8367 TagStr + "' for subsection '" +
8368 ActiveSubsectionName + "'");
8369 return true;
8370 }
8371 break;
8372 case AArch64BuildAttributes::AEABI_FEATURE_AND_BITS:
8373 Tag = AArch64BuildAttributes::getFeatureAndBitsTagsID(FeatureAndBitsTag: TagStr);
8374 if (AArch64BuildAttributes::FEATURE_AND_BITS_TAG_NOT_FOUND == Tag) {
8375 Error(L: Parser.getTok().getLoc(), Msg: "unknown AArch64 build attribute '" +
8376 TagStr + "' for subsection '" +
8377 ActiveSubsectionName + "'");
8378 return true;
8379 }
8380 break;
8381 }
8382 } else {
8383 Error(L: Parser.getTok().getLoc(), Msg: "AArch64 build attributes tag not found");
8384 return true;
8385 }
8386 Parser.Lex();
8387 // consume a comma
8388 // parseComma() return *false* on success, and call Lex(), no need to call
8389 // Lex() again.
8390 if (Parser.parseComma()) {
8391 return true;
8392 }
8393
8394 // Consume the second parameter (attribute value)
8395 unsigned ValueInt = unsigned(-1);
8396 std::string ValueStr = "";
8397 if (Parser.getTok().is(K: AsmToken::Integer)) {
8398 if (AArch64BuildAttributes::NTBS == ActiveSubsectionType) {
8399 Error(
8400 L: Parser.getTok().getLoc(),
8401 Msg: "active subsection type is NTBS (string), found ULEB128 (unsigned)");
8402 return true;
8403 }
8404 ValueInt = getTok().getIntVal();
8405 } else if (Parser.getTok().is(K: AsmToken::Identifier)) {
8406 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8407 Error(
8408 L: Parser.getTok().getLoc(),
8409 Msg: "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8410 return true;
8411 }
8412 ValueStr = Parser.getTok().getIdentifier();
8413 } else if (Parser.getTok().is(K: AsmToken::String)) {
8414 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8415 Error(
8416 L: Parser.getTok().getLoc(),
8417 Msg: "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8418 return true;
8419 }
8420 ValueStr = Parser.getTok().getString();
8421 } else {
8422 Error(L: Parser.getTok().getLoc(), Msg: "AArch64 build attributes value not found");
8423 return true;
8424 }
8425 // Check for possible unaccepted values for known tags
8426 // (AEABI_FEATURE_AND_BITS)
8427 if (ActiveSubsectionID == AArch64BuildAttributes::AEABI_FEATURE_AND_BITS) {
8428 if (0 != ValueInt && 1 != ValueInt) {
8429 Error(L: Parser.getTok().getLoc(),
8430 Msg: "unknown AArch64 build attributes Value for Tag '" + TagStr +
8431 "' options are 0|1");
8432 return true;
8433 }
8434 }
8435 Parser.Lex();
8436
8437 // Parsing finished. Check for trailing tokens.
8438 if (Parser.getTok().isNot(K: llvm::AsmToken::EndOfStatement)) {
8439 Error(L: Parser.getTok().getLoc(),
8440 Msg: "unexpected token for AArch64 build attributes tag and value "
8441 "attribute directive");
8442 return true;
8443 }
8444
8445 if (unsigned(-1) != ValueInt) {
8446 getTargetStreamer().emitAttribute(VendorName: ActiveSubsectionName, Tag, Value: ValueInt, String: "");
8447 }
8448 if ("" != ValueStr) {
8449 getTargetStreamer().emitAttribute(VendorName: ActiveSubsectionName, Tag, Value: unsigned(-1),
8450 String: ValueStr);
8451 }
8452 return false;
8453}
8454
8455bool AArch64AsmParser::parseDataExpr(const MCExpr *&Res) {
8456 SMLoc EndLoc;
8457
8458 if (getParser().parseExpression(Res))
8459 return true;
8460 MCAsmParser &Parser = getParser();
8461 if (!parseOptionalToken(T: AsmToken::At))
8462 return false;
8463 if (getLexer().getKind() != AsmToken::Identifier)
8464 return Error(L: getLoc(), Msg: "expected relocation specifier");
8465
8466 std::string Identifier = Parser.getTok().getIdentifier().lower();
8467 SMLoc Loc = getLoc();
8468 Lex();
8469 if (Identifier == "auth")
8470 return parseAuthExpr(Res, EndLoc);
8471
8472 auto Spec = AArch64::S_None;
8473 if (STI->getTargetTriple().isOSBinFormatMachO()) {
8474 if (Identifier == "got")
8475 Spec = AArch64::S_MACHO_GOT;
8476 } else {
8477 // Unofficial, experimental syntax that will be changed.
8478 if (Identifier == "gotpcrel")
8479 Spec = AArch64::S_GOTPCREL;
8480 else if (Identifier == "plt")
8481 Spec = AArch64::S_PLT;
8482 else if (Identifier == "funcinit")
8483 Spec = AArch64::S_FUNCINIT;
8484 }
8485 if (Spec == AArch64::S_None)
8486 return Error(L: Loc, Msg: "invalid relocation specifier");
8487 if (auto *SRE = dyn_cast<MCSymbolRefExpr>(Val: Res))
8488 Res = MCSymbolRefExpr::create(Symbol: &SRE->getSymbol(), specifier: Spec, Ctx&: getContext(),
8489 Loc: SRE->getLoc());
8490 else
8491 return Error(L: Loc, Msg: "@ specifier only allowed after a symbol");
8492
8493 for (;;) {
8494 std::optional<MCBinaryExpr::Opcode> Opcode;
8495 if (parseOptionalToken(T: AsmToken::Plus))
8496 Opcode = MCBinaryExpr::Add;
8497 else if (parseOptionalToken(T: AsmToken::Minus))
8498 Opcode = MCBinaryExpr::Sub;
8499 else
8500 break;
8501 const MCExpr *Term;
8502 if (getParser().parsePrimaryExpr(Res&: Term, EndLoc, TypeInfo: nullptr))
8503 return true;
8504 Res = MCBinaryExpr::create(Op: *Opcode, LHS: Res, RHS: Term, Ctx&: getContext(), Loc: Res->getLoc());
8505 }
8506 return false;
8507}
8508
8509/// parseAuthExpr
8510/// ::= _sym@AUTH(ib,123[,addr])
8511/// ::= (_sym + 5)@AUTH(ib,123[,addr])
8512/// ::= (_sym - 5)@AUTH(ib,123[,addr])
8513bool AArch64AsmParser::parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc) {
8514 MCAsmParser &Parser = getParser();
8515 MCContext &Ctx = getContext();
8516 AsmToken Tok = Parser.getTok();
8517
8518 // At this point, we encountered "<id>@AUTH". There is no fallback anymore.
8519 if (parseToken(T: AsmToken::LParen, Msg: "expected '('"))
8520 return true;
8521
8522 if (Parser.getTok().isNot(K: AsmToken::Identifier))
8523 return TokError(Msg: "expected key name");
8524
8525 StringRef KeyStr = Parser.getTok().getIdentifier();
8526 auto KeyIDOrNone = AArch64StringToPACKeyID(Name: KeyStr);
8527 if (!KeyIDOrNone)
8528 return TokError(Msg: "invalid key '" + KeyStr + "'");
8529 Parser.Lex();
8530
8531 if (parseToken(T: AsmToken::Comma, Msg: "expected ','"))
8532 return true;
8533
8534 if (Parser.getTok().isNot(K: AsmToken::Integer))
8535 return TokError(Msg: "expected integer discriminator");
8536 int64_t Discriminator = Parser.getTok().getIntVal();
8537
8538 if (!isUInt<16>(x: Discriminator))
8539 return TokError(Msg: "integer discriminator " + Twine(Discriminator) +
8540 " out of range [0, 0xFFFF]");
8541 Parser.Lex();
8542
8543 bool UseAddressDiversity = false;
8544 if (Parser.getTok().is(K: AsmToken::Comma)) {
8545 Parser.Lex();
8546 if (Parser.getTok().isNot(K: AsmToken::Identifier) ||
8547 Parser.getTok().getIdentifier() != "addr")
8548 return TokError(Msg: "expected 'addr'");
8549 UseAddressDiversity = true;
8550 Parser.Lex();
8551 }
8552
8553 EndLoc = Parser.getTok().getEndLoc();
8554 if (parseToken(T: AsmToken::RParen, Msg: "expected ')'"))
8555 return true;
8556
8557 Res = AArch64AuthMCExpr::create(Expr: Res, Discriminator, Key: *KeyIDOrNone,
8558 HasAddressDiversity: UseAddressDiversity, Ctx, Loc: Res->getLoc());
8559 return false;
8560}
8561
8562bool AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
8563 AArch64::Specifier &ELFSpec,
8564 AArch64::Specifier &DarwinSpec,
8565 int64_t &Addend) {
8566 ELFSpec = AArch64::S_INVALID;
8567 DarwinSpec = AArch64::S_None;
8568 Addend = 0;
8569
8570 if (auto *AE = dyn_cast<MCSpecifierExpr>(Val: Expr)) {
8571 ELFSpec = AE->getSpecifier();
8572 Expr = AE->getSubExpr();
8573 }
8574
8575 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Val: Expr);
8576 if (SE) {
8577 // It's a simple symbol reference with no addend.
8578 DarwinSpec = AArch64::Specifier(SE->getKind());
8579 return true;
8580 }
8581
8582 // Check that it looks like a symbol + an addend
8583 MCValue Res;
8584 bool Relocatable = Expr->evaluateAsRelocatable(Res, Asm: nullptr);
8585 if (!Relocatable || Res.getSubSym())
8586 return false;
8587
8588 // Treat expressions with an ELFSpec (like ":abs_g1:3", or
8589 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
8590 if (!Res.getAddSym() && ELFSpec == AArch64::S_INVALID)
8591 return false;
8592
8593 if (Res.getAddSym())
8594 DarwinSpec = AArch64::Specifier(Res.getSpecifier());
8595 Addend = Res.getConstant();
8596
8597 // It's some symbol reference + a constant addend, but really
8598 // shouldn't use both Darwin and ELF syntax.
8599 return ELFSpec == AArch64::S_INVALID || DarwinSpec == AArch64::S_None;
8600}
8601
8602/// Force static initialization.
8603extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void
8604LLVMInitializeAArch64AsmParser() {
8605 RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
8606 RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
8607 RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
8608 RegisterMCAsmParser<AArch64AsmParser> W(getTheARM64_32Target());
8609 RegisterMCAsmParser<AArch64AsmParser> V(getTheAArch64_32Target());
8610}
8611
8612#define GET_REGISTER_MATCHER
8613#define GET_SUBTARGET_FEATURE_NAME
8614#define GET_MATCHER_IMPLEMENTATION
8615#define GET_MNEMONIC_SPELL_CHECKER
8616#include "AArch64GenAsmMatcher.inc"
8617
8618// Define this matcher function after the auto-generated include so we
8619// have the match class enum definitions.
8620unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
8621 unsigned Kind) {
8622 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
8623
8624 auto MatchesOpImmediate = [&](int64_t ExpectedVal) -> MatchResultTy {
8625 if (!Op.isImm())
8626 return Match_InvalidOperand;
8627 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Op.getImm());
8628 if (!CE)
8629 return Match_InvalidOperand;
8630 if (CE->getValue() == ExpectedVal)
8631 return Match_Success;
8632 return Match_InvalidOperand;
8633 };
8634
8635 switch (Kind) {
8636 default:
8637 return Match_InvalidOperand;
8638 case MCK_MPR:
8639 // If the Kind is a token for the MPR register class which has the "za"
8640 // register (SME accumulator array), check if the asm is a literal "za"
8641 // token. This is for the "smstart za" alias that defines the register
8642 // as a literal token.
8643 if (Op.isTokenEqual(Str: "za"))
8644 return Match_Success;
8645 return Match_InvalidOperand;
8646
8647 // If the kind is a token for a literal immediate, check if our asm operand
8648 // matches. This is for InstAliases which have a fixed-value immediate in
8649 // the asm string, such as hints which are parsed into a specific
8650 // instruction definition.
8651#define MATCH_HASH(N) \
8652 case MCK__HASH_##N: \
8653 return MatchesOpImmediate(N);
8654 MATCH_HASH(0)
8655 MATCH_HASH(1)
8656 MATCH_HASH(2)
8657 MATCH_HASH(3)
8658 MATCH_HASH(4)
8659 MATCH_HASH(6)
8660 MATCH_HASH(7)
8661 MATCH_HASH(8)
8662 MATCH_HASH(10)
8663 MATCH_HASH(12)
8664 MATCH_HASH(14)
8665 MATCH_HASH(16)
8666 MATCH_HASH(24)
8667 MATCH_HASH(25)
8668 MATCH_HASH(26)
8669 MATCH_HASH(27)
8670 MATCH_HASH(28)
8671 MATCH_HASH(29)
8672 MATCH_HASH(30)
8673 MATCH_HASH(31)
8674 MATCH_HASH(32)
8675 MATCH_HASH(40)
8676 MATCH_HASH(48)
8677 MATCH_HASH(64)
8678#undef MATCH_HASH
8679#define MATCH_HASH_MINUS(N) \
8680 case MCK__HASH__MINUS_##N: \
8681 return MatchesOpImmediate(-N);
8682 MATCH_HASH_MINUS(4)
8683 MATCH_HASH_MINUS(8)
8684 MATCH_HASH_MINUS(16)
8685#undef MATCH_HASH_MINUS
8686 }
8687}
8688
8689ParseStatus AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
8690
8691 SMLoc S = getLoc();
8692
8693 if (getTok().isNot(K: AsmToken::Identifier))
8694 return Error(L: S, Msg: "expected register");
8695
8696 MCRegister FirstReg;
8697 ParseStatus Res = tryParseScalarRegister(RegNum&: FirstReg);
8698 if (!Res.isSuccess())
8699 return Error(L: S, Msg: "expected first even register of a consecutive same-size "
8700 "even/odd register pair");
8701
8702 const MCRegisterClass &WRegClass =
8703 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
8704 const MCRegisterClass &XRegClass =
8705 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
8706
8707 bool isXReg = XRegClass.contains(Reg: FirstReg),
8708 isWReg = WRegClass.contains(Reg: FirstReg);
8709 if (!isXReg && !isWReg)
8710 return Error(L: S, Msg: "expected first even register of a consecutive same-size "
8711 "even/odd register pair");
8712
8713 const MCRegisterInfo *RI = getContext().getRegisterInfo();
8714 unsigned FirstEncoding = RI->getEncodingValue(Reg: FirstReg);
8715
8716 if (FirstEncoding & 0x1)
8717 return Error(L: S, Msg: "expected first even register of a consecutive same-size "
8718 "even/odd register pair");
8719
8720 if (getTok().isNot(K: AsmToken::Comma))
8721 return Error(L: getLoc(), Msg: "expected comma");
8722 // Eat the comma
8723 Lex();
8724
8725 SMLoc E = getLoc();
8726 MCRegister SecondReg;
8727 Res = tryParseScalarRegister(RegNum&: SecondReg);
8728 if (!Res.isSuccess())
8729 return Error(L: E, Msg: "expected second odd register of a consecutive same-size "
8730 "even/odd register pair");
8731
8732 if (RI->getEncodingValue(Reg: SecondReg) != FirstEncoding + 1 ||
8733 (isXReg && !XRegClass.contains(Reg: SecondReg)) ||
8734 (isWReg && !WRegClass.contains(Reg: SecondReg)))
8735 return Error(L: E, Msg: "expected second odd register of a consecutive same-size "
8736 "even/odd register pair");
8737
8738 MCRegister Pair;
8739 if (isXReg) {
8740 Pair = RI->getMatchingSuperReg(Reg: FirstReg, SubIdx: AArch64::sube64,
8741 RC: &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
8742 } else {
8743 Pair = RI->getMatchingSuperReg(Reg: FirstReg, SubIdx: AArch64::sube32,
8744 RC: &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
8745 }
8746
8747 Operands.push_back(Elt: AArch64Operand::CreateReg(Reg: Pair, Kind: RegKind::Scalar, S,
8748 E: getLoc(), Ctx&: getContext()));
8749
8750 return ParseStatus::Success;
8751}
8752
8753template <bool ParseShiftExtend, bool ParseSuffix>
8754ParseStatus AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
8755 const SMLoc S = getLoc();
8756 // Check for a SVE vector register specifier first.
8757 MCRegister RegNum;
8758 StringRef Kind;
8759
8760 ParseStatus Res =
8761 tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::SVEDataVector);
8762
8763 if (!Res.isSuccess())
8764 return Res;
8765
8766 if (ParseSuffix && Kind.empty())
8767 return ParseStatus::NoMatch;
8768
8769 const auto &KindRes = parseVectorKind(Suffix: Kind, VectorKind: RegKind::SVEDataVector);
8770 if (!KindRes)
8771 return ParseStatus::NoMatch;
8772
8773 unsigned ElementWidth = KindRes->second;
8774
8775 // No shift/extend is the default.
8776 if (!ParseShiftExtend || getTok().isNot(K: AsmToken::Comma)) {
8777 Operands.push_back(Elt: AArch64Operand::CreateVectorReg(
8778 Reg: RegNum, Kind: RegKind::SVEDataVector, ElementWidth, S, E: S, Ctx&: getContext()));
8779
8780 ParseStatus Res = tryParseVectorIndex(Operands);
8781 if (Res.isFailure())
8782 return ParseStatus::Failure;
8783 return ParseStatus::Success;
8784 }
8785
8786 // Eat the comma
8787 Lex();
8788
8789 // Match the shift
8790 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
8791 Res = tryParseOptionalShiftExtend(Operands&: ExtOpnd);
8792 if (!Res.isSuccess())
8793 return Res;
8794
8795 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
8796 Operands.push_back(Elt: AArch64Operand::CreateVectorReg(
8797 Reg: RegNum, Kind: RegKind::SVEDataVector, ElementWidth, S, E: Ext->getEndLoc(),
8798 Ctx&: getContext(), ExtTy: Ext->getShiftExtendType(), ShiftAmount: Ext->getShiftExtendAmount(),
8799 HasExplicitAmount: Ext->hasShiftExtendAmount()));
8800
8801 return ParseStatus::Success;
8802}
8803
8804ParseStatus AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
8805 MCAsmParser &Parser = getParser();
8806
8807 SMLoc SS = getLoc();
8808 const AsmToken &TokE = getTok();
8809 bool IsHash = TokE.is(K: AsmToken::Hash);
8810
8811 if (!IsHash && TokE.isNot(K: AsmToken::Identifier))
8812 return ParseStatus::NoMatch;
8813
8814 int64_t Pattern;
8815 if (IsHash) {
8816 Lex(); // Eat hash
8817
8818 // Parse the immediate operand.
8819 const MCExpr *ImmVal;
8820 SS = getLoc();
8821 if (Parser.parseExpression(Res&: ImmVal))
8822 return ParseStatus::Failure;
8823
8824 auto *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
8825 if (!MCE)
8826 return TokError(Msg: "invalid operand for instruction");
8827
8828 Pattern = MCE->getValue();
8829 } else {
8830 // Parse the pattern
8831 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(Name: TokE.getString());
8832 if (!Pat)
8833 return ParseStatus::NoMatch;
8834
8835 Lex();
8836 Pattern = Pat->Encoding;
8837 assert(Pattern >= 0 && Pattern < 32);
8838 }
8839
8840 Operands.push_back(
8841 Elt: AArch64Operand::CreateImm(Val: MCConstantExpr::create(Value: Pattern, Ctx&: getContext()),
8842 S: SS, E: getLoc(), Ctx&: getContext()));
8843
8844 return ParseStatus::Success;
8845}
8846
8847ParseStatus
8848AArch64AsmParser::tryParseSVEVecLenSpecifier(OperandVector &Operands) {
8849 int64_t Pattern;
8850 SMLoc SS = getLoc();
8851 const AsmToken &TokE = getTok();
8852 // Parse the pattern
8853 auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName(
8854 Name: TokE.getString());
8855 if (!Pat)
8856 return ParseStatus::NoMatch;
8857
8858 Lex();
8859 Pattern = Pat->Encoding;
8860 assert(Pattern >= 0 && Pattern <= 1 && "Pattern does not exist");
8861
8862 Operands.push_back(
8863 Elt: AArch64Operand::CreateImm(Val: MCConstantExpr::create(Value: Pattern, Ctx&: getContext()),
8864 S: SS, E: getLoc(), Ctx&: getContext()));
8865
8866 return ParseStatus::Success;
8867}
8868
8869ParseStatus AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
8870 SMLoc SS = getLoc();
8871
8872 MCRegister XReg;
8873 if (!tryParseScalarRegister(RegNum&: XReg).isSuccess())
8874 return ParseStatus::NoMatch;
8875
8876 MCContext &ctx = getContext();
8877 const MCRegisterInfo *RI = ctx.getRegisterInfo();
8878 MCRegister X8Reg = RI->getMatchingSuperReg(
8879 Reg: XReg, SubIdx: AArch64::x8sub_0,
8880 RC: &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
8881 if (!X8Reg)
8882 return Error(L: SS,
8883 Msg: "expected an even-numbered x-register in the range [x0,x22]");
8884
8885 Operands.push_back(
8886 Elt: AArch64Operand::CreateReg(Reg: X8Reg, Kind: RegKind::Scalar, S: SS, E: getLoc(), Ctx&: ctx));
8887 return ParseStatus::Success;
8888}
8889
8890ParseStatus AArch64AsmParser::tryParseImmRange(OperandVector &Operands) {
8891 SMLoc S = getLoc();
8892
8893 if (getTok().isNot(K: AsmToken::Integer))
8894 return ParseStatus::NoMatch;
8895
8896 if (getLexer().peekTok().isNot(K: AsmToken::Colon))
8897 return ParseStatus::NoMatch;
8898
8899 const MCExpr *ImmF;
8900 if (getParser().parseExpression(Res&: ImmF))
8901 return ParseStatus::NoMatch;
8902
8903 if (getTok().isNot(K: AsmToken::Colon))
8904 return ParseStatus::NoMatch;
8905
8906 Lex(); // Eat ':'
8907 if (getTok().isNot(K: AsmToken::Integer))
8908 return ParseStatus::NoMatch;
8909
8910 SMLoc E = getTok().getLoc();
8911 const MCExpr *ImmL;
8912 if (getParser().parseExpression(Res&: ImmL))
8913 return ParseStatus::NoMatch;
8914
8915 unsigned ImmFVal = cast<MCConstantExpr>(Val: ImmF)->getValue();
8916 unsigned ImmLVal = cast<MCConstantExpr>(Val: ImmL)->getValue();
8917
8918 Operands.push_back(
8919 Elt: AArch64Operand::CreateImmRange(First: ImmFVal, Last: ImmLVal, S, E, Ctx&: getContext()));
8920 return ParseStatus::Success;
8921}
8922
8923template <int Adj>
8924ParseStatus AArch64AsmParser::tryParseAdjImm0_63(OperandVector &Operands) {
8925 SMLoc S = getLoc();
8926
8927 parseOptionalToken(T: AsmToken::Hash);
8928 bool IsNegative = parseOptionalToken(T: AsmToken::Minus);
8929
8930 if (getTok().isNot(K: AsmToken::Integer))
8931 return ParseStatus::NoMatch;
8932
8933 const MCExpr *Ex;
8934 if (getParser().parseExpression(Res&: Ex))
8935 return ParseStatus::NoMatch;
8936
8937 int64_t Imm = dyn_cast<MCConstantExpr>(Val: Ex)->getValue();
8938 if (IsNegative)
8939 Imm = -Imm;
8940
8941 // We want an adjusted immediate in the range [0, 63]. If we don't have one,
8942 // return a value, which is certain to trigger a error message about invalid
8943 // immediate range instead of a non-descriptive invalid operand error.
8944 static_assert(Adj == 1 || Adj == -1, "Unsafe immediate adjustment");
8945 if (Imm == INT64_MIN || Imm == INT64_MAX || Imm + Adj < 0 || Imm + Adj > 63)
8946 Imm = -2;
8947 else
8948 Imm += Adj;
8949
8950 SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1);
8951 Operands.push_back(Elt: AArch64Operand::CreateImm(
8952 Val: MCConstantExpr::create(Value: Imm, Ctx&: getContext()), S, E, Ctx&: getContext()));
8953
8954 return ParseStatus::Success;
8955}
8956