1 | //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include "AArch64InstrInfo.h" |
10 | #include "MCTargetDesc/AArch64AddressingModes.h" |
11 | #include "MCTargetDesc/AArch64InstPrinter.h" |
12 | #include "MCTargetDesc/AArch64MCExpr.h" |
13 | #include "MCTargetDesc/AArch64MCTargetDesc.h" |
14 | #include "MCTargetDesc/AArch64TargetStreamer.h" |
15 | #include "TargetInfo/AArch64TargetInfo.h" |
16 | #include "Utils/AArch64BaseInfo.h" |
17 | #include "llvm/ADT/APFloat.h" |
18 | #include "llvm/ADT/APInt.h" |
19 | #include "llvm/ADT/ArrayRef.h" |
20 | #include "llvm/ADT/STLExtras.h" |
21 | #include "llvm/ADT/SmallSet.h" |
22 | #include "llvm/ADT/SmallVector.h" |
23 | #include "llvm/ADT/StringExtras.h" |
24 | #include "llvm/ADT/StringMap.h" |
25 | #include "llvm/ADT/StringRef.h" |
26 | #include "llvm/ADT/StringSwitch.h" |
27 | #include "llvm/ADT/Twine.h" |
28 | #include "llvm/MC/MCContext.h" |
29 | #include "llvm/MC/MCExpr.h" |
30 | #include "llvm/MC/MCInst.h" |
31 | #include "llvm/MC/MCLinkerOptimizationHint.h" |
32 | #include "llvm/MC/MCObjectFileInfo.h" |
33 | #include "llvm/MC/MCParser/MCAsmLexer.h" |
34 | #include "llvm/MC/MCParser/MCAsmParser.h" |
35 | #include "llvm/MC/MCParser/MCAsmParserExtension.h" |
36 | #include "llvm/MC/MCParser/MCParsedAsmOperand.h" |
37 | #include "llvm/MC/MCParser/MCTargetAsmParser.h" |
38 | #include "llvm/MC/MCRegisterInfo.h" |
39 | #include "llvm/MC/MCStreamer.h" |
40 | #include "llvm/MC/MCSubtargetInfo.h" |
41 | #include "llvm/MC/MCSymbol.h" |
42 | #include "llvm/MC/MCTargetOptions.h" |
43 | #include "llvm/MC/MCValue.h" |
44 | #include "llvm/MC/TargetRegistry.h" |
45 | #include "llvm/Support/Casting.h" |
46 | #include "llvm/Support/Compiler.h" |
47 | #include "llvm/Support/ErrorHandling.h" |
48 | #include "llvm/Support/MathExtras.h" |
49 | #include "llvm/Support/SMLoc.h" |
50 | #include "llvm/Support/raw_ostream.h" |
51 | #include "llvm/TargetParser/AArch64TargetParser.h" |
52 | #include "llvm/TargetParser/SubtargetFeature.h" |
53 | #include <cassert> |
54 | #include <cctype> |
55 | #include <cstdint> |
56 | #include <cstdio> |
57 | #include <optional> |
58 | #include <string> |
59 | #include <tuple> |
60 | #include <utility> |
61 | #include <vector> |
62 | |
63 | using namespace llvm; |
64 | |
65 | namespace { |
66 | |
67 | enum class RegKind { |
68 | Scalar, |
69 | NeonVector, |
70 | SVEDataVector, |
71 | SVEPredicateAsCounter, |
72 | SVEPredicateVector, |
73 | Matrix, |
74 | LookupTable |
75 | }; |
76 | |
77 | enum class MatrixKind { Array, Tile, Row, Col }; |
78 | |
79 | enum RegConstraintEqualityTy { |
80 | EqualsReg, |
81 | EqualsSuperReg, |
82 | EqualsSubReg |
83 | }; |
84 | |
85 | class AArch64AsmParser : public MCTargetAsmParser { |
86 | private: |
87 | StringRef Mnemonic; ///< Instruction mnemonic. |
88 | |
89 | // Map of register aliases registers via the .req directive. |
90 | StringMap<std::pair<RegKind, unsigned>> RegisterReqs; |
91 | |
92 | class PrefixInfo { |
93 | public: |
94 | static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) { |
95 | PrefixInfo Prefix; |
96 | switch (Inst.getOpcode()) { |
97 | case AArch64::MOVPRFX_ZZ: |
98 | Prefix.Active = true; |
99 | Prefix.Dst = Inst.getOperand(i: 0).getReg(); |
100 | break; |
101 | case AArch64::MOVPRFX_ZPmZ_B: |
102 | case AArch64::MOVPRFX_ZPmZ_H: |
103 | case AArch64::MOVPRFX_ZPmZ_S: |
104 | case AArch64::MOVPRFX_ZPmZ_D: |
105 | Prefix.Active = true; |
106 | Prefix.Predicated = true; |
107 | Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask; |
108 | assert(Prefix.ElementSize != AArch64::ElementSizeNone && |
109 | "No destructive element size set for movprfx" ); |
110 | Prefix.Dst = Inst.getOperand(i: 0).getReg(); |
111 | Prefix.Pg = Inst.getOperand(i: 2).getReg(); |
112 | break; |
113 | case AArch64::MOVPRFX_ZPzZ_B: |
114 | case AArch64::MOVPRFX_ZPzZ_H: |
115 | case AArch64::MOVPRFX_ZPzZ_S: |
116 | case AArch64::MOVPRFX_ZPzZ_D: |
117 | Prefix.Active = true; |
118 | Prefix.Predicated = true; |
119 | Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask; |
120 | assert(Prefix.ElementSize != AArch64::ElementSizeNone && |
121 | "No destructive element size set for movprfx" ); |
122 | Prefix.Dst = Inst.getOperand(i: 0).getReg(); |
123 | Prefix.Pg = Inst.getOperand(i: 1).getReg(); |
124 | break; |
125 | default: |
126 | break; |
127 | } |
128 | |
129 | return Prefix; |
130 | } |
131 | |
132 | PrefixInfo() = default; |
133 | bool isActive() const { return Active; } |
134 | bool isPredicated() const { return Predicated; } |
135 | unsigned getElementSize() const { |
136 | assert(Predicated); |
137 | return ElementSize; |
138 | } |
139 | unsigned getDstReg() const { return Dst; } |
140 | unsigned getPgReg() const { |
141 | assert(Predicated); |
142 | return Pg; |
143 | } |
144 | |
145 | private: |
146 | bool Active = false; |
147 | bool Predicated = false; |
148 | unsigned ElementSize; |
149 | unsigned Dst; |
150 | unsigned Pg; |
151 | } NextPrefix; |
152 | |
153 | AArch64TargetStreamer &getTargetStreamer() { |
154 | MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer(); |
155 | return static_cast<AArch64TargetStreamer &>(TS); |
156 | } |
157 | |
158 | SMLoc getLoc() const { return getParser().getTok().getLoc(); } |
159 | |
160 | bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands); |
161 | bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands); |
162 | void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S); |
163 | AArch64CC::CondCode parseCondCodeString(StringRef Cond, |
164 | std::string &Suggestion); |
165 | bool parseCondCode(OperandVector &Operands, bool invertCondCode); |
166 | unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind); |
167 | bool parseRegister(OperandVector &Operands); |
168 | bool parseSymbolicImmVal(const MCExpr *&ImmVal); |
169 | bool parseNeonVectorList(OperandVector &Operands); |
170 | bool parseOptionalMulOperand(OperandVector &Operands); |
171 | bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup); |
172 | bool parseKeywordOperand(OperandVector &Operands); |
173 | bool parseOperand(OperandVector &Operands, bool isCondCode, |
174 | bool invertCondCode); |
175 | bool parseImmExpr(int64_t &Out); |
176 | bool parseComma(); |
177 | bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First, |
178 | unsigned Last); |
179 | |
180 | bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo, |
181 | OperandVector &Operands); |
182 | |
183 | bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc); |
184 | |
185 | bool parseDirectiveArch(SMLoc L); |
186 | bool parseDirectiveArchExtension(SMLoc L); |
187 | bool parseDirectiveCPU(SMLoc L); |
188 | bool parseDirectiveInst(SMLoc L); |
189 | |
190 | bool parseDirectiveTLSDescCall(SMLoc L); |
191 | |
192 | bool parseDirectiveLOH(StringRef LOH, SMLoc L); |
193 | bool parseDirectiveLtorg(SMLoc L); |
194 | |
195 | bool parseDirectiveReq(StringRef Name, SMLoc L); |
196 | bool parseDirectiveUnreq(SMLoc L); |
197 | bool parseDirectiveCFINegateRAState(); |
198 | bool parseDirectiveCFIBKeyFrame(); |
199 | bool parseDirectiveCFIMTETaggedFrame(); |
200 | |
201 | bool parseDirectiveVariantPCS(SMLoc L); |
202 | |
203 | bool parseDirectiveSEHAllocStack(SMLoc L); |
204 | bool parseDirectiveSEHPrologEnd(SMLoc L); |
205 | bool parseDirectiveSEHSaveR19R20X(SMLoc L); |
206 | bool parseDirectiveSEHSaveFPLR(SMLoc L); |
207 | bool parseDirectiveSEHSaveFPLRX(SMLoc L); |
208 | bool parseDirectiveSEHSaveReg(SMLoc L); |
209 | bool parseDirectiveSEHSaveRegX(SMLoc L); |
210 | bool parseDirectiveSEHSaveRegP(SMLoc L); |
211 | bool parseDirectiveSEHSaveRegPX(SMLoc L); |
212 | bool parseDirectiveSEHSaveLRPair(SMLoc L); |
213 | bool parseDirectiveSEHSaveFReg(SMLoc L); |
214 | bool parseDirectiveSEHSaveFRegX(SMLoc L); |
215 | bool parseDirectiveSEHSaveFRegP(SMLoc L); |
216 | bool parseDirectiveSEHSaveFRegPX(SMLoc L); |
217 | bool parseDirectiveSEHSetFP(SMLoc L); |
218 | bool parseDirectiveSEHAddFP(SMLoc L); |
219 | bool parseDirectiveSEHNop(SMLoc L); |
220 | bool parseDirectiveSEHSaveNext(SMLoc L); |
221 | bool parseDirectiveSEHEpilogStart(SMLoc L); |
222 | bool parseDirectiveSEHEpilogEnd(SMLoc L); |
223 | bool parseDirectiveSEHTrapFrame(SMLoc L); |
224 | bool parseDirectiveSEHMachineFrame(SMLoc L); |
225 | bool parseDirectiveSEHContext(SMLoc L); |
226 | bool parseDirectiveSEHECContext(SMLoc L); |
227 | bool parseDirectiveSEHClearUnwoundToCall(SMLoc L); |
228 | bool parseDirectiveSEHPACSignLR(SMLoc L); |
229 | bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback); |
230 | |
231 | bool validateInstruction(MCInst &Inst, SMLoc &IDLoc, |
232 | SmallVectorImpl<SMLoc> &Loc); |
233 | unsigned getNumRegsForRegKind(RegKind K); |
234 | bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, |
235 | OperandVector &Operands, MCStreamer &Out, |
236 | uint64_t &ErrorInfo, |
237 | bool MatchingInlineAsm) override; |
238 | /// @name Auto-generated Match Functions |
239 | /// { |
240 | |
241 | #define |
242 | #include "AArch64GenAsmMatcher.inc" |
243 | |
244 | /// } |
245 | |
246 | ParseStatus tryParseScalarRegister(MCRegister &Reg); |
247 | ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind, |
248 | RegKind MatchKind); |
249 | ParseStatus tryParseMatrixRegister(OperandVector &Operands); |
250 | ParseStatus tryParseSVCR(OperandVector &Operands); |
251 | ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands); |
252 | ParseStatus tryParseBarrierOperand(OperandVector &Operands); |
253 | ParseStatus tryParseBarriernXSOperand(OperandVector &Operands); |
254 | ParseStatus tryParseSysReg(OperandVector &Operands); |
255 | ParseStatus tryParseSysCROperand(OperandVector &Operands); |
256 | template <bool IsSVEPrefetch = false> |
257 | ParseStatus tryParsePrefetch(OperandVector &Operands); |
258 | ParseStatus tryParseRPRFMOperand(OperandVector &Operands); |
259 | ParseStatus tryParsePSBHint(OperandVector &Operands); |
260 | ParseStatus tryParseBTIHint(OperandVector &Operands); |
261 | ParseStatus tryParseAdrpLabel(OperandVector &Operands); |
262 | ParseStatus tryParseAdrLabel(OperandVector &Operands); |
263 | template <bool AddFPZeroAsLiteral> |
264 | ParseStatus tryParseFPImm(OperandVector &Operands); |
265 | ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands); |
266 | ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands); |
267 | bool tryParseNeonVectorRegister(OperandVector &Operands); |
268 | ParseStatus tryParseVectorIndex(OperandVector &Operands); |
269 | ParseStatus tryParseGPRSeqPair(OperandVector &Operands); |
270 | ParseStatus tryParseSyspXzrPair(OperandVector &Operands); |
271 | template <bool ParseShiftExtend, |
272 | RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg> |
273 | ParseStatus tryParseGPROperand(OperandVector &Operands); |
274 | ParseStatus tryParseZTOperand(OperandVector &Operands); |
275 | template <bool ParseShiftExtend, bool ParseSuffix> |
276 | ParseStatus tryParseSVEDataVector(OperandVector &Operands); |
277 | template <RegKind RK> |
278 | ParseStatus tryParseSVEPredicateVector(OperandVector &Operands); |
279 | ParseStatus |
280 | tryParseSVEPredicateOrPredicateAsCounterVector(OperandVector &Operands); |
281 | template <RegKind VectorKind> |
282 | ParseStatus tryParseVectorList(OperandVector &Operands, |
283 | bool ExpectMatch = false); |
284 | ParseStatus tryParseMatrixTileList(OperandVector &Operands); |
285 | ParseStatus tryParseSVEPattern(OperandVector &Operands); |
286 | ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands); |
287 | ParseStatus tryParseGPR64x8(OperandVector &Operands); |
288 | ParseStatus tryParseImmRange(OperandVector &Operands); |
289 | |
290 | public: |
291 | enum AArch64MatchResultTy { |
292 | Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY, |
293 | #define GET_OPERAND_DIAGNOSTIC_TYPES |
294 | #include "AArch64GenAsmMatcher.inc" |
295 | }; |
296 | bool IsILP32; |
297 | bool IsWindowsArm64EC; |
298 | |
299 | AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser, |
300 | const MCInstrInfo &MII, const MCTargetOptions &Options) |
301 | : MCTargetAsmParser(Options, STI, MII) { |
302 | IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32; |
303 | IsWindowsArm64EC = STI.getTargetTriple().isWindowsArm64EC(); |
304 | MCAsmParserExtension::Initialize(Parser); |
305 | MCStreamer &S = getParser().getStreamer(); |
306 | if (S.getTargetStreamer() == nullptr) |
307 | new AArch64TargetStreamer(S); |
308 | |
309 | // Alias .hword/.word/.[dx]word to the target-independent |
310 | // .2byte/.4byte/.8byte directives as they have the same form and |
311 | // semantics: |
312 | /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ] |
313 | Parser.addAliasForDirective(Directive: ".hword" , Alias: ".2byte" ); |
314 | Parser.addAliasForDirective(Directive: ".word" , Alias: ".4byte" ); |
315 | Parser.addAliasForDirective(Directive: ".dword" , Alias: ".8byte" ); |
316 | Parser.addAliasForDirective(Directive: ".xword" , Alias: ".8byte" ); |
317 | |
318 | // Initialize the set of available features. |
319 | setAvailableFeatures(ComputeAvailableFeatures(FB: getSTI().getFeatureBits())); |
320 | } |
321 | |
322 | bool areEqualRegs(const MCParsedAsmOperand &Op1, |
323 | const MCParsedAsmOperand &Op2) const override; |
324 | bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, |
325 | SMLoc NameLoc, OperandVector &Operands) override; |
326 | bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override; |
327 | ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc, |
328 | SMLoc &EndLoc) override; |
329 | bool ParseDirective(AsmToken DirectiveID) override; |
330 | unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, |
331 | unsigned Kind) override; |
332 | |
333 | bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) override; |
334 | |
335 | static bool classifySymbolRef(const MCExpr *Expr, |
336 | AArch64MCExpr::VariantKind &ELFRefKind, |
337 | MCSymbolRefExpr::VariantKind &DarwinRefKind, |
338 | int64_t &Addend); |
339 | }; |
340 | |
341 | /// AArch64Operand - Instances of this class represent a parsed AArch64 machine |
342 | /// instruction. |
343 | class AArch64Operand : public MCParsedAsmOperand { |
344 | private: |
345 | enum KindTy { |
346 | k_Immediate, |
347 | k_ShiftedImm, |
348 | k_ImmRange, |
349 | k_CondCode, |
350 | k_Register, |
351 | k_MatrixRegister, |
352 | k_MatrixTileList, |
353 | k_SVCR, |
354 | k_VectorList, |
355 | k_VectorIndex, |
356 | k_Token, |
357 | k_SysReg, |
358 | k_SysCR, |
359 | k_Prefetch, |
360 | k_ShiftExtend, |
361 | k_FPImm, |
362 | k_Barrier, |
363 | k_PSBHint, |
364 | k_BTIHint, |
365 | } Kind; |
366 | |
367 | SMLoc StartLoc, EndLoc; |
368 | |
369 | struct TokOp { |
370 | const char *Data; |
371 | unsigned Length; |
372 | bool IsSuffix; // Is the operand actually a suffix on the mnemonic. |
373 | }; |
374 | |
375 | // Separate shift/extend operand. |
376 | struct ShiftExtendOp { |
377 | AArch64_AM::ShiftExtendType Type; |
378 | unsigned Amount; |
379 | bool HasExplicitAmount; |
380 | }; |
381 | |
382 | struct RegOp { |
383 | unsigned RegNum; |
384 | RegKind Kind; |
385 | int ElementWidth; |
386 | |
387 | // The register may be allowed as a different register class, |
388 | // e.g. for GPR64as32 or GPR32as64. |
389 | RegConstraintEqualityTy EqualityTy; |
390 | |
391 | // In some cases the shift/extend needs to be explicitly parsed together |
392 | // with the register, rather than as a separate operand. This is needed |
393 | // for addressing modes where the instruction as a whole dictates the |
394 | // scaling/extend, rather than specific bits in the instruction. |
395 | // By parsing them as a single operand, we avoid the need to pass an |
396 | // extra operand in all CodeGen patterns (because all operands need to |
397 | // have an associated value), and we avoid the need to update TableGen to |
398 | // accept operands that have no associated bits in the instruction. |
399 | // |
400 | // An added benefit of parsing them together is that the assembler |
401 | // can give a sensible diagnostic if the scaling is not correct. |
402 | // |
403 | // The default is 'lsl #0' (HasExplicitAmount = false) if no |
404 | // ShiftExtend is specified. |
405 | ShiftExtendOp ShiftExtend; |
406 | }; |
407 | |
408 | struct MatrixRegOp { |
409 | unsigned RegNum; |
410 | unsigned ElementWidth; |
411 | MatrixKind Kind; |
412 | }; |
413 | |
414 | struct MatrixTileListOp { |
415 | unsigned RegMask = 0; |
416 | }; |
417 | |
418 | struct VectorListOp { |
419 | unsigned RegNum; |
420 | unsigned Count; |
421 | unsigned Stride; |
422 | unsigned NumElements; |
423 | unsigned ElementWidth; |
424 | RegKind RegisterKind; |
425 | }; |
426 | |
427 | struct VectorIndexOp { |
428 | int Val; |
429 | }; |
430 | |
431 | struct ImmOp { |
432 | const MCExpr *Val; |
433 | }; |
434 | |
435 | struct ShiftedImmOp { |
436 | const MCExpr *Val; |
437 | unsigned ShiftAmount; |
438 | }; |
439 | |
440 | struct ImmRangeOp { |
441 | unsigned First; |
442 | unsigned Last; |
443 | }; |
444 | |
445 | struct CondCodeOp { |
446 | AArch64CC::CondCode Code; |
447 | }; |
448 | |
449 | struct FPImmOp { |
450 | uint64_t Val; // APFloat value bitcasted to uint64_t. |
451 | bool IsExact; // describes whether parsed value was exact. |
452 | }; |
453 | |
454 | struct BarrierOp { |
455 | const char *Data; |
456 | unsigned Length; |
457 | unsigned Val; // Not the enum since not all values have names. |
458 | bool HasnXSModifier; |
459 | }; |
460 | |
461 | struct SysRegOp { |
462 | const char *Data; |
463 | unsigned Length; |
464 | uint32_t MRSReg; |
465 | uint32_t MSRReg; |
466 | uint32_t PStateField; |
467 | }; |
468 | |
469 | struct SysCRImmOp { |
470 | unsigned Val; |
471 | }; |
472 | |
473 | struct PrefetchOp { |
474 | const char *Data; |
475 | unsigned Length; |
476 | unsigned Val; |
477 | }; |
478 | |
479 | struct PSBHintOp { |
480 | const char *Data; |
481 | unsigned Length; |
482 | unsigned Val; |
483 | }; |
484 | |
485 | struct BTIHintOp { |
486 | const char *Data; |
487 | unsigned Length; |
488 | unsigned Val; |
489 | }; |
490 | |
491 | struct SVCROp { |
492 | const char *Data; |
493 | unsigned Length; |
494 | unsigned PStateField; |
495 | }; |
496 | |
497 | union { |
498 | struct TokOp Tok; |
499 | struct RegOp Reg; |
500 | struct MatrixRegOp MatrixReg; |
501 | struct MatrixTileListOp MatrixTileList; |
502 | struct VectorListOp VectorList; |
503 | struct VectorIndexOp VectorIndex; |
504 | struct ImmOp Imm; |
505 | struct ShiftedImmOp ShiftedImm; |
506 | struct ImmRangeOp ImmRange; |
507 | struct CondCodeOp CondCode; |
508 | struct FPImmOp FPImm; |
509 | struct BarrierOp Barrier; |
510 | struct SysRegOp SysReg; |
511 | struct SysCRImmOp SysCRImm; |
512 | struct PrefetchOp Prefetch; |
513 | struct PSBHintOp PSBHint; |
514 | struct BTIHintOp BTIHint; |
515 | struct ShiftExtendOp ShiftExtend; |
516 | struct SVCROp SVCR; |
517 | }; |
518 | |
519 | // Keep the MCContext around as the MCExprs may need manipulated during |
520 | // the add<>Operands() calls. |
521 | MCContext &Ctx; |
522 | |
523 | public: |
524 | AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {} |
525 | |
526 | AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) { |
527 | Kind = o.Kind; |
528 | StartLoc = o.StartLoc; |
529 | EndLoc = o.EndLoc; |
530 | switch (Kind) { |
531 | case k_Token: |
532 | Tok = o.Tok; |
533 | break; |
534 | case k_Immediate: |
535 | Imm = o.Imm; |
536 | break; |
537 | case k_ShiftedImm: |
538 | ShiftedImm = o.ShiftedImm; |
539 | break; |
540 | case k_ImmRange: |
541 | ImmRange = o.ImmRange; |
542 | break; |
543 | case k_CondCode: |
544 | CondCode = o.CondCode; |
545 | break; |
546 | case k_FPImm: |
547 | FPImm = o.FPImm; |
548 | break; |
549 | case k_Barrier: |
550 | Barrier = o.Barrier; |
551 | break; |
552 | case k_Register: |
553 | Reg = o.Reg; |
554 | break; |
555 | case k_MatrixRegister: |
556 | MatrixReg = o.MatrixReg; |
557 | break; |
558 | case k_MatrixTileList: |
559 | MatrixTileList = o.MatrixTileList; |
560 | break; |
561 | case k_VectorList: |
562 | VectorList = o.VectorList; |
563 | break; |
564 | case k_VectorIndex: |
565 | VectorIndex = o.VectorIndex; |
566 | break; |
567 | case k_SysReg: |
568 | SysReg = o.SysReg; |
569 | break; |
570 | case k_SysCR: |
571 | SysCRImm = o.SysCRImm; |
572 | break; |
573 | case k_Prefetch: |
574 | Prefetch = o.Prefetch; |
575 | break; |
576 | case k_PSBHint: |
577 | PSBHint = o.PSBHint; |
578 | break; |
579 | case k_BTIHint: |
580 | BTIHint = o.BTIHint; |
581 | break; |
582 | case k_ShiftExtend: |
583 | ShiftExtend = o.ShiftExtend; |
584 | break; |
585 | case k_SVCR: |
586 | SVCR = o.SVCR; |
587 | break; |
588 | } |
589 | } |
590 | |
591 | /// getStartLoc - Get the location of the first token of this operand. |
592 | SMLoc getStartLoc() const override { return StartLoc; } |
593 | /// getEndLoc - Get the location of the last token of this operand. |
594 | SMLoc getEndLoc() const override { return EndLoc; } |
595 | |
596 | StringRef getToken() const { |
597 | assert(Kind == k_Token && "Invalid access!" ); |
598 | return StringRef(Tok.Data, Tok.Length); |
599 | } |
600 | |
601 | bool isTokenSuffix() const { |
602 | assert(Kind == k_Token && "Invalid access!" ); |
603 | return Tok.IsSuffix; |
604 | } |
605 | |
606 | const MCExpr *getImm() const { |
607 | assert(Kind == k_Immediate && "Invalid access!" ); |
608 | return Imm.Val; |
609 | } |
610 | |
611 | const MCExpr *getShiftedImmVal() const { |
612 | assert(Kind == k_ShiftedImm && "Invalid access!" ); |
613 | return ShiftedImm.Val; |
614 | } |
615 | |
616 | unsigned getShiftedImmShift() const { |
617 | assert(Kind == k_ShiftedImm && "Invalid access!" ); |
618 | return ShiftedImm.ShiftAmount; |
619 | } |
620 | |
621 | unsigned getFirstImmVal() const { |
622 | assert(Kind == k_ImmRange && "Invalid access!" ); |
623 | return ImmRange.First; |
624 | } |
625 | |
626 | unsigned getLastImmVal() const { |
627 | assert(Kind == k_ImmRange && "Invalid access!" ); |
628 | return ImmRange.Last; |
629 | } |
630 | |
631 | AArch64CC::CondCode getCondCode() const { |
632 | assert(Kind == k_CondCode && "Invalid access!" ); |
633 | return CondCode.Code; |
634 | } |
635 | |
636 | APFloat getFPImm() const { |
637 | assert (Kind == k_FPImm && "Invalid access!" ); |
638 | return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true)); |
639 | } |
640 | |
641 | bool getFPImmIsExact() const { |
642 | assert (Kind == k_FPImm && "Invalid access!" ); |
643 | return FPImm.IsExact; |
644 | } |
645 | |
646 | unsigned getBarrier() const { |
647 | assert(Kind == k_Barrier && "Invalid access!" ); |
648 | return Barrier.Val; |
649 | } |
650 | |
651 | StringRef getBarrierName() const { |
652 | assert(Kind == k_Barrier && "Invalid access!" ); |
653 | return StringRef(Barrier.Data, Barrier.Length); |
654 | } |
655 | |
656 | bool getBarriernXSModifier() const { |
657 | assert(Kind == k_Barrier && "Invalid access!" ); |
658 | return Barrier.HasnXSModifier; |
659 | } |
660 | |
661 | MCRegister getReg() const override { |
662 | assert(Kind == k_Register && "Invalid access!" ); |
663 | return Reg.RegNum; |
664 | } |
665 | |
666 | unsigned getMatrixReg() const { |
667 | assert(Kind == k_MatrixRegister && "Invalid access!" ); |
668 | return MatrixReg.RegNum; |
669 | } |
670 | |
671 | unsigned getMatrixElementWidth() const { |
672 | assert(Kind == k_MatrixRegister && "Invalid access!" ); |
673 | return MatrixReg.ElementWidth; |
674 | } |
675 | |
676 | MatrixKind getMatrixKind() const { |
677 | assert(Kind == k_MatrixRegister && "Invalid access!" ); |
678 | return MatrixReg.Kind; |
679 | } |
680 | |
681 | unsigned getMatrixTileListRegMask() const { |
682 | assert(isMatrixTileList() && "Invalid access!" ); |
683 | return MatrixTileList.RegMask; |
684 | } |
685 | |
686 | RegConstraintEqualityTy getRegEqualityTy() const { |
687 | assert(Kind == k_Register && "Invalid access!" ); |
688 | return Reg.EqualityTy; |
689 | } |
690 | |
691 | unsigned getVectorListStart() const { |
692 | assert(Kind == k_VectorList && "Invalid access!" ); |
693 | return VectorList.RegNum; |
694 | } |
695 | |
696 | unsigned getVectorListCount() const { |
697 | assert(Kind == k_VectorList && "Invalid access!" ); |
698 | return VectorList.Count; |
699 | } |
700 | |
701 | unsigned getVectorListStride() const { |
702 | assert(Kind == k_VectorList && "Invalid access!" ); |
703 | return VectorList.Stride; |
704 | } |
705 | |
706 | int getVectorIndex() const { |
707 | assert(Kind == k_VectorIndex && "Invalid access!" ); |
708 | return VectorIndex.Val; |
709 | } |
710 | |
711 | StringRef getSysReg() const { |
712 | assert(Kind == k_SysReg && "Invalid access!" ); |
713 | return StringRef(SysReg.Data, SysReg.Length); |
714 | } |
715 | |
716 | unsigned getSysCR() const { |
717 | assert(Kind == k_SysCR && "Invalid access!" ); |
718 | return SysCRImm.Val; |
719 | } |
720 | |
721 | unsigned getPrefetch() const { |
722 | assert(Kind == k_Prefetch && "Invalid access!" ); |
723 | return Prefetch.Val; |
724 | } |
725 | |
726 | unsigned getPSBHint() const { |
727 | assert(Kind == k_PSBHint && "Invalid access!" ); |
728 | return PSBHint.Val; |
729 | } |
730 | |
731 | StringRef getPSBHintName() const { |
732 | assert(Kind == k_PSBHint && "Invalid access!" ); |
733 | return StringRef(PSBHint.Data, PSBHint.Length); |
734 | } |
735 | |
736 | unsigned getBTIHint() const { |
737 | assert(Kind == k_BTIHint && "Invalid access!" ); |
738 | return BTIHint.Val; |
739 | } |
740 | |
741 | StringRef getBTIHintName() const { |
742 | assert(Kind == k_BTIHint && "Invalid access!" ); |
743 | return StringRef(BTIHint.Data, BTIHint.Length); |
744 | } |
745 | |
746 | StringRef getSVCR() const { |
747 | assert(Kind == k_SVCR && "Invalid access!" ); |
748 | return StringRef(SVCR.Data, SVCR.Length); |
749 | } |
750 | |
751 | StringRef getPrefetchName() const { |
752 | assert(Kind == k_Prefetch && "Invalid access!" ); |
753 | return StringRef(Prefetch.Data, Prefetch.Length); |
754 | } |
755 | |
756 | AArch64_AM::ShiftExtendType getShiftExtendType() const { |
757 | if (Kind == k_ShiftExtend) |
758 | return ShiftExtend.Type; |
759 | if (Kind == k_Register) |
760 | return Reg.ShiftExtend.Type; |
761 | llvm_unreachable("Invalid access!" ); |
762 | } |
763 | |
764 | unsigned getShiftExtendAmount() const { |
765 | if (Kind == k_ShiftExtend) |
766 | return ShiftExtend.Amount; |
767 | if (Kind == k_Register) |
768 | return Reg.ShiftExtend.Amount; |
769 | llvm_unreachable("Invalid access!" ); |
770 | } |
771 | |
772 | bool hasShiftExtendAmount() const { |
773 | if (Kind == k_ShiftExtend) |
774 | return ShiftExtend.HasExplicitAmount; |
775 | if (Kind == k_Register) |
776 | return Reg.ShiftExtend.HasExplicitAmount; |
777 | llvm_unreachable("Invalid access!" ); |
778 | } |
779 | |
780 | bool isImm() const override { return Kind == k_Immediate; } |
781 | bool isMem() const override { return false; } |
782 | |
783 | bool isUImm6() const { |
784 | if (!isImm()) |
785 | return false; |
786 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm()); |
787 | if (!MCE) |
788 | return false; |
789 | int64_t Val = MCE->getValue(); |
790 | return (Val >= 0 && Val < 64); |
791 | } |
792 | |
793 | template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); } |
794 | |
795 | template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const { |
796 | return isImmScaled<Bits, Scale>(true); |
797 | } |
798 | |
799 | template <int Bits, int Scale, int Offset = 0, bool IsRange = false> |
800 | DiagnosticPredicate isUImmScaled() const { |
801 | if (IsRange && isImmRange() && |
802 | (getLastImmVal() != getFirstImmVal() + Offset)) |
803 | return DiagnosticPredicateTy::NoMatch; |
804 | |
805 | return isImmScaled<Bits, Scale, IsRange>(false); |
806 | } |
807 | |
808 | template <int Bits, int Scale, bool IsRange = false> |
809 | DiagnosticPredicate isImmScaled(bool Signed) const { |
810 | if ((!isImm() && !isImmRange()) || (isImm() && IsRange) || |
811 | (isImmRange() && !IsRange)) |
812 | return DiagnosticPredicateTy::NoMatch; |
813 | |
814 | int64_t Val; |
815 | if (isImmRange()) |
816 | Val = getFirstImmVal(); |
817 | else { |
818 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm()); |
819 | if (!MCE) |
820 | return DiagnosticPredicateTy::NoMatch; |
821 | Val = MCE->getValue(); |
822 | } |
823 | |
824 | int64_t MinVal, MaxVal; |
825 | if (Signed) { |
826 | int64_t Shift = Bits - 1; |
827 | MinVal = (int64_t(1) << Shift) * -Scale; |
828 | MaxVal = ((int64_t(1) << Shift) - 1) * Scale; |
829 | } else { |
830 | MinVal = 0; |
831 | MaxVal = ((int64_t(1) << Bits) - 1) * Scale; |
832 | } |
833 | |
834 | if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0) |
835 | return DiagnosticPredicateTy::Match; |
836 | |
837 | return DiagnosticPredicateTy::NearMatch; |
838 | } |
839 | |
840 | DiagnosticPredicate isSVEPattern() const { |
841 | if (!isImm()) |
842 | return DiagnosticPredicateTy::NoMatch; |
843 | auto *MCE = dyn_cast<MCConstantExpr>(Val: getImm()); |
844 | if (!MCE) |
845 | return DiagnosticPredicateTy::NoMatch; |
846 | int64_t Val = MCE->getValue(); |
847 | if (Val >= 0 && Val < 32) |
848 | return DiagnosticPredicateTy::Match; |
849 | return DiagnosticPredicateTy::NearMatch; |
850 | } |
851 | |
852 | DiagnosticPredicate isSVEVecLenSpecifier() const { |
853 | if (!isImm()) |
854 | return DiagnosticPredicateTy::NoMatch; |
855 | auto *MCE = dyn_cast<MCConstantExpr>(Val: getImm()); |
856 | if (!MCE) |
857 | return DiagnosticPredicateTy::NoMatch; |
858 | int64_t Val = MCE->getValue(); |
859 | if (Val >= 0 && Val <= 1) |
860 | return DiagnosticPredicateTy::Match; |
861 | return DiagnosticPredicateTy::NearMatch; |
862 | } |
863 | |
864 | bool isSymbolicUImm12Offset(const MCExpr *Expr) const { |
865 | AArch64MCExpr::VariantKind ELFRefKind; |
866 | MCSymbolRefExpr::VariantKind DarwinRefKind; |
867 | int64_t Addend; |
868 | if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, |
869 | Addend)) { |
870 | // If we don't understand the expression, assume the best and |
871 | // let the fixup and relocation code deal with it. |
872 | return true; |
873 | } |
874 | |
875 | if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF || |
876 | ELFRefKind == AArch64MCExpr::VK_LO12 || |
877 | ELFRefKind == AArch64MCExpr::VK_GOT_LO12 || |
878 | ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 || |
879 | ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC || |
880 | ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 || |
881 | ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC || |
882 | ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC || |
883 | ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 || |
884 | ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 || |
885 | ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 || |
886 | ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) { |
887 | // Note that we don't range-check the addend. It's adjusted modulo page |
888 | // size when converted, so there is no "out of range" condition when using |
889 | // @pageoff. |
890 | return true; |
891 | } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF || |
892 | DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) { |
893 | // @gotpageoff/@tlvppageoff can only be used directly, not with an addend. |
894 | return Addend == 0; |
895 | } |
896 | |
897 | return false; |
898 | } |
899 | |
900 | template <int Scale> bool isUImm12Offset() const { |
901 | if (!isImm()) |
902 | return false; |
903 | |
904 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm()); |
905 | if (!MCE) |
906 | return isSymbolicUImm12Offset(Expr: getImm()); |
907 | |
908 | int64_t Val = MCE->getValue(); |
909 | return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000; |
910 | } |
911 | |
912 | template <int N, int M> |
913 | bool isImmInRange() const { |
914 | if (!isImm()) |
915 | return false; |
916 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm()); |
917 | if (!MCE) |
918 | return false; |
919 | int64_t Val = MCE->getValue(); |
920 | return (Val >= N && Val <= M); |
921 | } |
922 | |
923 | // NOTE: Also used for isLogicalImmNot as anything that can be represented as |
924 | // a logical immediate can always be represented when inverted. |
925 | template <typename T> |
926 | bool isLogicalImm() const { |
927 | if (!isImm()) |
928 | return false; |
929 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm()); |
930 | if (!MCE) |
931 | return false; |
932 | |
933 | int64_t Val = MCE->getValue(); |
934 | // Avoid left shift by 64 directly. |
935 | uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4); |
936 | // Allow all-0 or all-1 in top bits to permit bitwise NOT. |
937 | if ((Val & Upper) && (Val & Upper) != Upper) |
938 | return false; |
939 | |
940 | return AArch64_AM::isLogicalImmediate(imm: Val & ~Upper, regSize: sizeof(T) * 8); |
941 | } |
942 | |
943 | bool isShiftedImm() const { return Kind == k_ShiftedImm; } |
944 | |
945 | bool isImmRange() const { return Kind == k_ImmRange; } |
946 | |
947 | /// Returns the immediate value as a pair of (imm, shift) if the immediate is |
948 | /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted |
949 | /// immediate that can be shifted by 'Shift'. |
950 | template <unsigned Width> |
951 | std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const { |
952 | if (isShiftedImm() && Width == getShiftedImmShift()) |
953 | if (auto *CE = dyn_cast<MCConstantExpr>(Val: getShiftedImmVal())) |
954 | return std::make_pair(x: CE->getValue(), y: Width); |
955 | |
956 | if (isImm()) |
957 | if (auto *CE = dyn_cast<MCConstantExpr>(Val: getImm())) { |
958 | int64_t Val = CE->getValue(); |
959 | if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val)) |
960 | return std::make_pair(x: Val >> Width, y: Width); |
961 | else |
962 | return std::make_pair(x&: Val, y: 0u); |
963 | } |
964 | |
965 | return {}; |
966 | } |
967 | |
968 | bool isAddSubImm() const { |
969 | if (!isShiftedImm() && !isImm()) |
970 | return false; |
971 | |
972 | const MCExpr *Expr; |
973 | |
974 | // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'. |
975 | if (isShiftedImm()) { |
976 | unsigned Shift = ShiftedImm.ShiftAmount; |
977 | Expr = ShiftedImm.Val; |
978 | if (Shift != 0 && Shift != 12) |
979 | return false; |
980 | } else { |
981 | Expr = getImm(); |
982 | } |
983 | |
984 | AArch64MCExpr::VariantKind ELFRefKind; |
985 | MCSymbolRefExpr::VariantKind DarwinRefKind; |
986 | int64_t Addend; |
987 | if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, |
988 | DarwinRefKind, Addend)) { |
989 | return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF |
990 | || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF |
991 | || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0) |
992 | || ELFRefKind == AArch64MCExpr::VK_LO12 |
993 | || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 |
994 | || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 |
995 | || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC |
996 | || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 |
997 | || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 |
998 | || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC |
999 | || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 |
1000 | || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 |
1001 | || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12; |
1002 | } |
1003 | |
1004 | // If it's a constant, it should be a real immediate in range. |
1005 | if (auto ShiftedVal = getShiftedVal<12>()) |
1006 | return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff; |
1007 | |
1008 | // If it's an expression, we hope for the best and let the fixup/relocation |
1009 | // code deal with it. |
1010 | return true; |
1011 | } |
1012 | |
1013 | bool isAddSubImmNeg() const { |
1014 | if (!isShiftedImm() && !isImm()) |
1015 | return false; |
1016 | |
1017 | // Otherwise it should be a real negative immediate in range. |
1018 | if (auto ShiftedVal = getShiftedVal<12>()) |
1019 | return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff; |
1020 | |
1021 | return false; |
1022 | } |
1023 | |
1024 | // Signed value in the range -128 to +127. For element widths of |
1025 | // 16 bits or higher it may also be a signed multiple of 256 in the |
1026 | // range -32768 to +32512. |
1027 | // For element-width of 8 bits a range of -128 to 255 is accepted, |
1028 | // since a copy of a byte can be either signed/unsigned. |
1029 | template <typename T> |
1030 | DiagnosticPredicate isSVECpyImm() const { |
1031 | if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(Val: getImm()))) |
1032 | return DiagnosticPredicateTy::NoMatch; |
1033 | |
1034 | bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value || |
1035 | std::is_same<int8_t, T>::value; |
1036 | if (auto ShiftedImm = getShiftedVal<8>()) |
1037 | if (!(IsByte && ShiftedImm->second) && |
1038 | AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first) |
1039 | << ShiftedImm->second)) |
1040 | return DiagnosticPredicateTy::Match; |
1041 | |
1042 | return DiagnosticPredicateTy::NearMatch; |
1043 | } |
1044 | |
1045 | // Unsigned value in the range 0 to 255. For element widths of |
1046 | // 16 bits or higher it may also be a signed multiple of 256 in the |
1047 | // range 0 to 65280. |
1048 | template <typename T> DiagnosticPredicate isSVEAddSubImm() const { |
1049 | if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(Val: getImm()))) |
1050 | return DiagnosticPredicateTy::NoMatch; |
1051 | |
1052 | bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value || |
1053 | std::is_same<int8_t, T>::value; |
1054 | if (auto ShiftedImm = getShiftedVal<8>()) |
1055 | if (!(IsByte && ShiftedImm->second) && |
1056 | AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first |
1057 | << ShiftedImm->second)) |
1058 | return DiagnosticPredicateTy::Match; |
1059 | |
1060 | return DiagnosticPredicateTy::NearMatch; |
1061 | } |
1062 | |
1063 | template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const { |
1064 | if (isLogicalImm<T>() && !isSVECpyImm<T>()) |
1065 | return DiagnosticPredicateTy::Match; |
1066 | return DiagnosticPredicateTy::NoMatch; |
1067 | } |
1068 | |
1069 | bool isCondCode() const { return Kind == k_CondCode; } |
1070 | |
1071 | bool isSIMDImmType10() const { |
1072 | if (!isImm()) |
1073 | return false; |
1074 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm()); |
1075 | if (!MCE) |
1076 | return false; |
1077 | return AArch64_AM::isAdvSIMDModImmType10(Imm: MCE->getValue()); |
1078 | } |
1079 | |
1080 | template<int N> |
1081 | bool isBranchTarget() const { |
1082 | if (!isImm()) |
1083 | return false; |
1084 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm()); |
1085 | if (!MCE) |
1086 | return true; |
1087 | int64_t Val = MCE->getValue(); |
1088 | if (Val & 0x3) |
1089 | return false; |
1090 | assert(N > 0 && "Branch target immediate cannot be 0 bits!" ); |
1091 | return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2)); |
1092 | } |
1093 | |
1094 | bool |
1095 | isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const { |
1096 | if (!isImm()) |
1097 | return false; |
1098 | |
1099 | AArch64MCExpr::VariantKind ELFRefKind; |
1100 | MCSymbolRefExpr::VariantKind DarwinRefKind; |
1101 | int64_t Addend; |
1102 | if (!AArch64AsmParser::classifySymbolRef(Expr: getImm(), ELFRefKind, |
1103 | DarwinRefKind, Addend)) { |
1104 | return false; |
1105 | } |
1106 | if (DarwinRefKind != MCSymbolRefExpr::VK_None) |
1107 | return false; |
1108 | |
1109 | return llvm::is_contained(Range&: AllowedModifiers, Element: ELFRefKind); |
1110 | } |
1111 | |
1112 | bool isMovWSymbolG3() const { |
1113 | return isMovWSymbol(AllowedModifiers: {AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3}); |
1114 | } |
1115 | |
1116 | bool isMovWSymbolG2() const { |
1117 | return isMovWSymbol( |
1118 | AllowedModifiers: {AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S, |
1119 | AArch64MCExpr::VK_ABS_G2_NC, AArch64MCExpr::VK_PREL_G2, |
1120 | AArch64MCExpr::VK_PREL_G2_NC, AArch64MCExpr::VK_TPREL_G2, |
1121 | AArch64MCExpr::VK_DTPREL_G2}); |
1122 | } |
1123 | |
1124 | bool isMovWSymbolG1() const { |
1125 | return isMovWSymbol( |
1126 | AllowedModifiers: {AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S, |
1127 | AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_PREL_G1, |
1128 | AArch64MCExpr::VK_PREL_G1_NC, AArch64MCExpr::VK_GOTTPREL_G1, |
1129 | AArch64MCExpr::VK_TPREL_G1, AArch64MCExpr::VK_TPREL_G1_NC, |
1130 | AArch64MCExpr::VK_DTPREL_G1, AArch64MCExpr::VK_DTPREL_G1_NC}); |
1131 | } |
1132 | |
1133 | bool isMovWSymbolG0() const { |
1134 | return isMovWSymbol( |
1135 | AllowedModifiers: {AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S, |
1136 | AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_PREL_G0, |
1137 | AArch64MCExpr::VK_PREL_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC, |
1138 | AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_TPREL_G0_NC, |
1139 | AArch64MCExpr::VK_DTPREL_G0, AArch64MCExpr::VK_DTPREL_G0_NC}); |
1140 | } |
1141 | |
1142 | template<int RegWidth, int Shift> |
1143 | bool isMOVZMovAlias() const { |
1144 | if (!isImm()) return false; |
1145 | |
1146 | const MCExpr *E = getImm(); |
1147 | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: E)) { |
1148 | uint64_t Value = CE->getValue(); |
1149 | |
1150 | return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth); |
1151 | } |
1152 | // Only supports the case of Shift being 0 if an expression is used as an |
1153 | // operand |
1154 | return !Shift && E; |
1155 | } |
1156 | |
1157 | template<int RegWidth, int Shift> |
1158 | bool isMOVNMovAlias() const { |
1159 | if (!isImm()) return false; |
1160 | |
1161 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm()); |
1162 | if (!CE) return false; |
1163 | uint64_t Value = CE->getValue(); |
1164 | |
1165 | return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth); |
1166 | } |
1167 | |
1168 | bool isFPImm() const { |
1169 | return Kind == k_FPImm && |
1170 | AArch64_AM::getFP64Imm(Imm: getFPImm().bitcastToAPInt()) != -1; |
1171 | } |
1172 | |
1173 | bool isBarrier() const { |
1174 | return Kind == k_Barrier && !getBarriernXSModifier(); |
1175 | } |
1176 | bool isBarriernXS() const { |
1177 | return Kind == k_Barrier && getBarriernXSModifier(); |
1178 | } |
1179 | bool isSysReg() const { return Kind == k_SysReg; } |
1180 | |
1181 | bool isMRSSystemRegister() const { |
1182 | if (!isSysReg()) return false; |
1183 | |
1184 | return SysReg.MRSReg != -1U; |
1185 | } |
1186 | |
1187 | bool isMSRSystemRegister() const { |
1188 | if (!isSysReg()) return false; |
1189 | return SysReg.MSRReg != -1U; |
1190 | } |
1191 | |
1192 | bool isSystemPStateFieldWithImm0_1() const { |
1193 | if (!isSysReg()) return false; |
1194 | return AArch64PState::lookupPStateImm0_1ByEncoding(Encoding: SysReg.PStateField); |
1195 | } |
1196 | |
1197 | bool isSystemPStateFieldWithImm0_15() const { |
1198 | if (!isSysReg()) |
1199 | return false; |
1200 | return AArch64PState::lookupPStateImm0_15ByEncoding(Encoding: SysReg.PStateField); |
1201 | } |
1202 | |
1203 | bool isSVCR() const { |
1204 | if (Kind != k_SVCR) |
1205 | return false; |
1206 | return SVCR.PStateField != -1U; |
1207 | } |
1208 | |
1209 | bool isReg() const override { |
1210 | return Kind == k_Register; |
1211 | } |
1212 | |
1213 | bool isVectorList() const { return Kind == k_VectorList; } |
1214 | |
1215 | bool isScalarReg() const { |
1216 | return Kind == k_Register && Reg.Kind == RegKind::Scalar; |
1217 | } |
1218 | |
1219 | bool isNeonVectorReg() const { |
1220 | return Kind == k_Register && Reg.Kind == RegKind::NeonVector; |
1221 | } |
1222 | |
1223 | bool isNeonVectorRegLo() const { |
1224 | return Kind == k_Register && Reg.Kind == RegKind::NeonVector && |
1225 | (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains( |
1226 | Reg: Reg.RegNum) || |
1227 | AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains( |
1228 | Reg: Reg.RegNum)); |
1229 | } |
1230 | |
1231 | bool isNeonVectorReg0to7() const { |
1232 | return Kind == k_Register && Reg.Kind == RegKind::NeonVector && |
1233 | (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains( |
1234 | Reg: Reg.RegNum)); |
1235 | } |
1236 | |
1237 | bool isMatrix() const { return Kind == k_MatrixRegister; } |
1238 | bool isMatrixTileList() const { return Kind == k_MatrixTileList; } |
1239 | |
1240 | template <unsigned Class> bool isSVEPredicateAsCounterReg() const { |
1241 | RegKind RK; |
1242 | switch (Class) { |
1243 | case AArch64::PPRRegClassID: |
1244 | case AArch64::PPR_3bRegClassID: |
1245 | case AArch64::PPR_p8to15RegClassID: |
1246 | case AArch64::PNRRegClassID: |
1247 | case AArch64::PNR_p8to15RegClassID: |
1248 | case AArch64::PPRorPNRRegClassID: |
1249 | RK = RegKind::SVEPredicateAsCounter; |
1250 | break; |
1251 | default: |
1252 | llvm_unreachable("Unsupport register class" ); |
1253 | } |
1254 | |
1255 | return (Kind == k_Register && Reg.Kind == RK) && |
1256 | AArch64MCRegisterClasses[Class].contains(Reg: getReg()); |
1257 | } |
1258 | |
1259 | template <unsigned Class> bool isSVEVectorReg() const { |
1260 | RegKind RK; |
1261 | switch (Class) { |
1262 | case AArch64::ZPRRegClassID: |
1263 | case AArch64::ZPR_3bRegClassID: |
1264 | case AArch64::ZPR_4bRegClassID: |
1265 | RK = RegKind::SVEDataVector; |
1266 | break; |
1267 | case AArch64::PPRRegClassID: |
1268 | case AArch64::PPR_3bRegClassID: |
1269 | case AArch64::PPR_p8to15RegClassID: |
1270 | case AArch64::PNRRegClassID: |
1271 | case AArch64::PNR_p8to15RegClassID: |
1272 | case AArch64::PPRorPNRRegClassID: |
1273 | RK = RegKind::SVEPredicateVector; |
1274 | break; |
1275 | default: |
1276 | llvm_unreachable("Unsupport register class" ); |
1277 | } |
1278 | |
1279 | return (Kind == k_Register && Reg.Kind == RK) && |
1280 | AArch64MCRegisterClasses[Class].contains(Reg: getReg()); |
1281 | } |
1282 | |
1283 | template <unsigned Class> bool isFPRasZPR() const { |
1284 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && |
1285 | AArch64MCRegisterClasses[Class].contains(Reg: getReg()); |
1286 | } |
1287 | |
1288 | template <int ElementWidth, unsigned Class> |
1289 | DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const { |
1290 | if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector) |
1291 | return DiagnosticPredicateTy::NoMatch; |
1292 | |
1293 | if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth)) |
1294 | return DiagnosticPredicateTy::Match; |
1295 | |
1296 | return DiagnosticPredicateTy::NearMatch; |
1297 | } |
1298 | |
1299 | template <int ElementWidth, unsigned Class> |
1300 | DiagnosticPredicate isSVEPredicateOrPredicateAsCounterRegOfWidth() const { |
1301 | if (Kind != k_Register || (Reg.Kind != RegKind::SVEPredicateAsCounter && |
1302 | Reg.Kind != RegKind::SVEPredicateVector)) |
1303 | return DiagnosticPredicateTy::NoMatch; |
1304 | |
1305 | if ((isSVEPredicateAsCounterReg<Class>() || |
1306 | isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) && |
1307 | Reg.ElementWidth == ElementWidth) |
1308 | return DiagnosticPredicateTy::Match; |
1309 | |
1310 | return DiagnosticPredicateTy::NearMatch; |
1311 | } |
1312 | |
1313 | template <int ElementWidth, unsigned Class> |
1314 | DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const { |
1315 | if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter) |
1316 | return DiagnosticPredicateTy::NoMatch; |
1317 | |
1318 | if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth)) |
1319 | return DiagnosticPredicateTy::Match; |
1320 | |
1321 | return DiagnosticPredicateTy::NearMatch; |
1322 | } |
1323 | |
1324 | template <int ElementWidth, unsigned Class> |
1325 | DiagnosticPredicate isSVEDataVectorRegOfWidth() const { |
1326 | if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector) |
1327 | return DiagnosticPredicateTy::NoMatch; |
1328 | |
1329 | if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth) |
1330 | return DiagnosticPredicateTy::Match; |
1331 | |
1332 | return DiagnosticPredicateTy::NearMatch; |
1333 | } |
1334 | |
1335 | template <int ElementWidth, unsigned Class, |
1336 | AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth, |
1337 | bool ShiftWidthAlwaysSame> |
1338 | DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const { |
1339 | auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>(); |
1340 | if (!VectorMatch.isMatch()) |
1341 | return DiagnosticPredicateTy::NoMatch; |
1342 | |
1343 | // Give a more specific diagnostic when the user has explicitly typed in |
1344 | // a shift-amount that does not match what is expected, but for which |
1345 | // there is also an unscaled addressing mode (e.g. sxtw/uxtw). |
1346 | bool MatchShift = getShiftExtendAmount() == Log2_32(Value: ShiftWidth / 8); |
1347 | if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW || |
1348 | ShiftExtendTy == AArch64_AM::SXTW) && |
1349 | !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8) |
1350 | return DiagnosticPredicateTy::NoMatch; |
1351 | |
1352 | if (MatchShift && ShiftExtendTy == getShiftExtendType()) |
1353 | return DiagnosticPredicateTy::Match; |
1354 | |
1355 | return DiagnosticPredicateTy::NearMatch; |
1356 | } |
1357 | |
1358 | bool isGPR32as64() const { |
1359 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && |
1360 | AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg: Reg.RegNum); |
1361 | } |
1362 | |
1363 | bool isGPR64as32() const { |
1364 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && |
1365 | AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg: Reg.RegNum); |
1366 | } |
1367 | |
1368 | bool isGPR64x8() const { |
1369 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && |
1370 | AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains( |
1371 | Reg: Reg.RegNum); |
1372 | } |
1373 | |
1374 | bool isWSeqPair() const { |
1375 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && |
1376 | AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains( |
1377 | Reg: Reg.RegNum); |
1378 | } |
1379 | |
1380 | bool isXSeqPair() const { |
1381 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && |
1382 | AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains( |
1383 | Reg: Reg.RegNum); |
1384 | } |
1385 | |
1386 | bool isSyspXzrPair() const { |
1387 | return isGPR64<AArch64::GPR64RegClassID>() && Reg.RegNum == AArch64::XZR; |
1388 | } |
1389 | |
1390 | template<int64_t Angle, int64_t Remainder> |
1391 | DiagnosticPredicate isComplexRotation() const { |
1392 | if (!isImm()) return DiagnosticPredicateTy::NoMatch; |
1393 | |
1394 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm()); |
1395 | if (!CE) return DiagnosticPredicateTy::NoMatch; |
1396 | uint64_t Value = CE->getValue(); |
1397 | |
1398 | if (Value % Angle == Remainder && Value <= 270) |
1399 | return DiagnosticPredicateTy::Match; |
1400 | return DiagnosticPredicateTy::NearMatch; |
1401 | } |
1402 | |
1403 | template <unsigned RegClassID> bool isGPR64() const { |
1404 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && |
1405 | AArch64MCRegisterClasses[RegClassID].contains(Reg: getReg()); |
1406 | } |
1407 | |
1408 | template <unsigned RegClassID, int ExtWidth> |
1409 | DiagnosticPredicate isGPR64WithShiftExtend() const { |
1410 | if (Kind != k_Register || Reg.Kind != RegKind::Scalar) |
1411 | return DiagnosticPredicateTy::NoMatch; |
1412 | |
1413 | if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL && |
1414 | getShiftExtendAmount() == Log2_32(Value: ExtWidth / 8)) |
1415 | return DiagnosticPredicateTy::Match; |
1416 | return DiagnosticPredicateTy::NearMatch; |
1417 | } |
1418 | |
1419 | /// Is this a vector list with the type implicit (presumably attached to the |
1420 | /// instruction itself)? |
1421 | template <RegKind VectorKind, unsigned NumRegs> |
1422 | bool isImplicitlyTypedVectorList() const { |
1423 | return Kind == k_VectorList && VectorList.Count == NumRegs && |
1424 | VectorList.NumElements == 0 && |
1425 | VectorList.RegisterKind == VectorKind; |
1426 | } |
1427 | |
1428 | template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements, |
1429 | unsigned ElementWidth, unsigned Stride = 1> |
1430 | bool isTypedVectorList() const { |
1431 | if (Kind != k_VectorList) |
1432 | return false; |
1433 | if (VectorList.Count != NumRegs) |
1434 | return false; |
1435 | if (VectorList.RegisterKind != VectorKind) |
1436 | return false; |
1437 | if (VectorList.ElementWidth != ElementWidth) |
1438 | return false; |
1439 | if (VectorList.Stride != Stride) |
1440 | return false; |
1441 | return VectorList.NumElements == NumElements; |
1442 | } |
1443 | |
1444 | template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements, |
1445 | unsigned ElementWidth> |
1446 | DiagnosticPredicate isTypedVectorListMultiple() const { |
1447 | bool Res = |
1448 | isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>(); |
1449 | if (!Res) |
1450 | return DiagnosticPredicateTy::NoMatch; |
1451 | if (((VectorList.RegNum - AArch64::Z0) % NumRegs) != 0) |
1452 | return DiagnosticPredicateTy::NearMatch; |
1453 | return DiagnosticPredicateTy::Match; |
1454 | } |
1455 | |
1456 | template <RegKind VectorKind, unsigned NumRegs, unsigned Stride, |
1457 | unsigned ElementWidth> |
1458 | DiagnosticPredicate isTypedVectorListStrided() const { |
1459 | bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0, |
1460 | ElementWidth, Stride>(); |
1461 | if (!Res) |
1462 | return DiagnosticPredicateTy::NoMatch; |
1463 | if ((VectorList.RegNum < (AArch64::Z0 + Stride)) || |
1464 | ((VectorList.RegNum >= AArch64::Z16) && |
1465 | (VectorList.RegNum < (AArch64::Z16 + Stride)))) |
1466 | return DiagnosticPredicateTy::Match; |
1467 | return DiagnosticPredicateTy::NoMatch; |
1468 | } |
1469 | |
1470 | template <int Min, int Max> |
1471 | DiagnosticPredicate isVectorIndex() const { |
1472 | if (Kind != k_VectorIndex) |
1473 | return DiagnosticPredicateTy::NoMatch; |
1474 | if (VectorIndex.Val >= Min && VectorIndex.Val <= Max) |
1475 | return DiagnosticPredicateTy::Match; |
1476 | return DiagnosticPredicateTy::NearMatch; |
1477 | } |
1478 | |
1479 | bool isToken() const override { return Kind == k_Token; } |
1480 | |
1481 | bool isTokenEqual(StringRef Str) const { |
1482 | return Kind == k_Token && getToken() == Str; |
1483 | } |
1484 | bool isSysCR() const { return Kind == k_SysCR; } |
1485 | bool isPrefetch() const { return Kind == k_Prefetch; } |
1486 | bool isPSBHint() const { return Kind == k_PSBHint; } |
1487 | bool isBTIHint() const { return Kind == k_BTIHint; } |
1488 | bool isShiftExtend() const { return Kind == k_ShiftExtend; } |
1489 | bool isShifter() const { |
1490 | if (!isShiftExtend()) |
1491 | return false; |
1492 | |
1493 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); |
1494 | return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || |
1495 | ST == AArch64_AM::ASR || ST == AArch64_AM::ROR || |
1496 | ST == AArch64_AM::MSL); |
1497 | } |
1498 | |
1499 | template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const { |
1500 | if (Kind != k_FPImm) |
1501 | return DiagnosticPredicateTy::NoMatch; |
1502 | |
1503 | if (getFPImmIsExact()) { |
1504 | // Lookup the immediate from table of supported immediates. |
1505 | auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(Enum: ImmEnum); |
1506 | assert(Desc && "Unknown enum value" ); |
1507 | |
1508 | // Calculate its FP value. |
1509 | APFloat RealVal(APFloat::IEEEdouble()); |
1510 | auto StatusOrErr = |
1511 | RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero); |
1512 | if (errorToBool(Err: StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK) |
1513 | llvm_unreachable("FP immediate is not exact" ); |
1514 | |
1515 | if (getFPImm().bitwiseIsEqual(RHS: RealVal)) |
1516 | return DiagnosticPredicateTy::Match; |
1517 | } |
1518 | |
1519 | return DiagnosticPredicateTy::NearMatch; |
1520 | } |
1521 | |
1522 | template <unsigned ImmA, unsigned ImmB> |
1523 | DiagnosticPredicate isExactFPImm() const { |
1524 | DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch; |
1525 | if ((Res = isExactFPImm<ImmA>())) |
1526 | return DiagnosticPredicateTy::Match; |
1527 | if ((Res = isExactFPImm<ImmB>())) |
1528 | return DiagnosticPredicateTy::Match; |
1529 | return Res; |
1530 | } |
1531 | |
1532 | bool isExtend() const { |
1533 | if (!isShiftExtend()) |
1534 | return false; |
1535 | |
1536 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); |
1537 | return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB || |
1538 | ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH || |
1539 | ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW || |
1540 | ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX || |
1541 | ET == AArch64_AM::LSL) && |
1542 | getShiftExtendAmount() <= 4; |
1543 | } |
1544 | |
1545 | bool isExtend64() const { |
1546 | if (!isExtend()) |
1547 | return false; |
1548 | // Make sure the extend expects a 32-bit source register. |
1549 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); |
1550 | return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB || |
1551 | ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH || |
1552 | ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW; |
1553 | } |
1554 | |
1555 | bool isExtendLSL64() const { |
1556 | if (!isExtend()) |
1557 | return false; |
1558 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); |
1559 | return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX || |
1560 | ET == AArch64_AM::LSL) && |
1561 | getShiftExtendAmount() <= 4; |
1562 | } |
1563 | |
1564 | bool isLSLImm3Shift() const { |
1565 | if (!isShiftExtend()) |
1566 | return false; |
1567 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); |
1568 | return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7; |
1569 | } |
1570 | |
1571 | template<int Width> bool isMemXExtend() const { |
1572 | if (!isExtend()) |
1573 | return false; |
1574 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); |
1575 | return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) && |
1576 | (getShiftExtendAmount() == Log2_32(Value: Width / 8) || |
1577 | getShiftExtendAmount() == 0); |
1578 | } |
1579 | |
1580 | template<int Width> bool isMemWExtend() const { |
1581 | if (!isExtend()) |
1582 | return false; |
1583 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); |
1584 | return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) && |
1585 | (getShiftExtendAmount() == Log2_32(Value: Width / 8) || |
1586 | getShiftExtendAmount() == 0); |
1587 | } |
1588 | |
1589 | template <unsigned width> |
1590 | bool isArithmeticShifter() const { |
1591 | if (!isShifter()) |
1592 | return false; |
1593 | |
1594 | // An arithmetic shifter is LSL, LSR, or ASR. |
1595 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); |
1596 | return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || |
1597 | ST == AArch64_AM::ASR) && getShiftExtendAmount() < width; |
1598 | } |
1599 | |
1600 | template <unsigned width> |
1601 | bool isLogicalShifter() const { |
1602 | if (!isShifter()) |
1603 | return false; |
1604 | |
1605 | // A logical shifter is LSL, LSR, ASR or ROR. |
1606 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); |
1607 | return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || |
1608 | ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) && |
1609 | getShiftExtendAmount() < width; |
1610 | } |
1611 | |
1612 | bool isMovImm32Shifter() const { |
1613 | if (!isShifter()) |
1614 | return false; |
1615 | |
1616 | // A MOVi shifter is LSL of 0, 16, 32, or 48. |
1617 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); |
1618 | if (ST != AArch64_AM::LSL) |
1619 | return false; |
1620 | uint64_t Val = getShiftExtendAmount(); |
1621 | return (Val == 0 || Val == 16); |
1622 | } |
1623 | |
1624 | bool isMovImm64Shifter() const { |
1625 | if (!isShifter()) |
1626 | return false; |
1627 | |
1628 | // A MOVi shifter is LSL of 0 or 16. |
1629 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); |
1630 | if (ST != AArch64_AM::LSL) |
1631 | return false; |
1632 | uint64_t Val = getShiftExtendAmount(); |
1633 | return (Val == 0 || Val == 16 || Val == 32 || Val == 48); |
1634 | } |
1635 | |
1636 | bool isLogicalVecShifter() const { |
1637 | if (!isShifter()) |
1638 | return false; |
1639 | |
1640 | // A logical vector shifter is a left shift by 0, 8, 16, or 24. |
1641 | unsigned Shift = getShiftExtendAmount(); |
1642 | return getShiftExtendType() == AArch64_AM::LSL && |
1643 | (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24); |
1644 | } |
1645 | |
1646 | bool isLogicalVecHalfWordShifter() const { |
1647 | if (!isLogicalVecShifter()) |
1648 | return false; |
1649 | |
1650 | // A logical vector shifter is a left shift by 0 or 8. |
1651 | unsigned Shift = getShiftExtendAmount(); |
1652 | return getShiftExtendType() == AArch64_AM::LSL && |
1653 | (Shift == 0 || Shift == 8); |
1654 | } |
1655 | |
1656 | bool isMoveVecShifter() const { |
1657 | if (!isShiftExtend()) |
1658 | return false; |
1659 | |
1660 | // A logical vector shifter is a left shift by 8 or 16. |
1661 | unsigned Shift = getShiftExtendAmount(); |
1662 | return getShiftExtendType() == AArch64_AM::MSL && |
1663 | (Shift == 8 || Shift == 16); |
1664 | } |
1665 | |
1666 | // Fallback unscaled operands are for aliases of LDR/STR that fall back |
1667 | // to LDUR/STUR when the offset is not legal for the former but is for |
1668 | // the latter. As such, in addition to checking for being a legal unscaled |
1669 | // address, also check that it is not a legal scaled address. This avoids |
1670 | // ambiguity in the matcher. |
1671 | template<int Width> |
1672 | bool isSImm9OffsetFB() const { |
1673 | return isSImm<9>() && !isUImm12Offset<Width / 8>(); |
1674 | } |
1675 | |
1676 | bool isAdrpLabel() const { |
1677 | // Validation was handled during parsing, so we just verify that |
1678 | // something didn't go haywire. |
1679 | if (!isImm()) |
1680 | return false; |
1681 | |
1682 | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Imm.Val)) { |
1683 | int64_t Val = CE->getValue(); |
1684 | int64_t Min = - (4096 * (1LL << (21 - 1))); |
1685 | int64_t Max = 4096 * ((1LL << (21 - 1)) - 1); |
1686 | return (Val % 4096) == 0 && Val >= Min && Val <= Max; |
1687 | } |
1688 | |
1689 | return true; |
1690 | } |
1691 | |
1692 | bool isAdrLabel() const { |
1693 | // Validation was handled during parsing, so we just verify that |
1694 | // something didn't go haywire. |
1695 | if (!isImm()) |
1696 | return false; |
1697 | |
1698 | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Imm.Val)) { |
1699 | int64_t Val = CE->getValue(); |
1700 | int64_t Min = - (1LL << (21 - 1)); |
1701 | int64_t Max = ((1LL << (21 - 1)) - 1); |
1702 | return Val >= Min && Val <= Max; |
1703 | } |
1704 | |
1705 | return true; |
1706 | } |
1707 | |
1708 | template <MatrixKind Kind, unsigned EltSize, unsigned RegClass> |
1709 | DiagnosticPredicate isMatrixRegOperand() const { |
1710 | if (!isMatrix()) |
1711 | return DiagnosticPredicateTy::NoMatch; |
1712 | if (getMatrixKind() != Kind || |
1713 | !AArch64MCRegisterClasses[RegClass].contains(Reg: getMatrixReg()) || |
1714 | EltSize != getMatrixElementWidth()) |
1715 | return DiagnosticPredicateTy::NearMatch; |
1716 | return DiagnosticPredicateTy::Match; |
1717 | } |
1718 | |
1719 | bool isPAuthPCRelLabel16Operand() const { |
1720 | // PAuth PCRel16 operands are similar to regular branch targets, but only |
1721 | // negative values are allowed for concrete immediates as signing instr |
1722 | // should be in a lower address. |
1723 | if (!isImm()) |
1724 | return false; |
1725 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm()); |
1726 | if (!MCE) |
1727 | return true; |
1728 | int64_t Val = MCE->getValue(); |
1729 | if (Val & 0b11) |
1730 | return false; |
1731 | return (Val <= 0) && (Val > -(1 << 18)); |
1732 | } |
1733 | |
1734 | void addExpr(MCInst &Inst, const MCExpr *Expr) const { |
1735 | // Add as immediates when possible. Null MCExpr = 0. |
1736 | if (!Expr) |
1737 | Inst.addOperand(Op: MCOperand::createImm(Val: 0)); |
1738 | else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Expr)) |
1739 | Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue())); |
1740 | else |
1741 | Inst.addOperand(Op: MCOperand::createExpr(Val: Expr)); |
1742 | } |
1743 | |
1744 | void addRegOperands(MCInst &Inst, unsigned N) const { |
1745 | assert(N == 1 && "Invalid number of operands!" ); |
1746 | Inst.addOperand(Op: MCOperand::createReg(Reg: getReg())); |
1747 | } |
1748 | |
1749 | void addMatrixOperands(MCInst &Inst, unsigned N) const { |
1750 | assert(N == 1 && "Invalid number of operands!" ); |
1751 | Inst.addOperand(Op: MCOperand::createReg(Reg: getMatrixReg())); |
1752 | } |
1753 | |
1754 | void addGPR32as64Operands(MCInst &Inst, unsigned N) const { |
1755 | assert(N == 1 && "Invalid number of operands!" ); |
1756 | assert( |
1757 | AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())); |
1758 | |
1759 | const MCRegisterInfo *RI = Ctx.getRegisterInfo(); |
1760 | uint32_t Reg = RI->getRegClass(i: AArch64::GPR32RegClassID).getRegister( |
1761 | i: RI->getEncodingValue(RegNo: getReg())); |
1762 | |
1763 | Inst.addOperand(Op: MCOperand::createReg(Reg)); |
1764 | } |
1765 | |
1766 | void addGPR64as32Operands(MCInst &Inst, unsigned N) const { |
1767 | assert(N == 1 && "Invalid number of operands!" ); |
1768 | assert( |
1769 | AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())); |
1770 | |
1771 | const MCRegisterInfo *RI = Ctx.getRegisterInfo(); |
1772 | uint32_t Reg = RI->getRegClass(i: AArch64::GPR64RegClassID).getRegister( |
1773 | i: RI->getEncodingValue(RegNo: getReg())); |
1774 | |
1775 | Inst.addOperand(Op: MCOperand::createReg(Reg)); |
1776 | } |
1777 | |
1778 | template <int Width> |
1779 | void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const { |
1780 | unsigned Base; |
1781 | switch (Width) { |
1782 | case 8: Base = AArch64::B0; break; |
1783 | case 16: Base = AArch64::H0; break; |
1784 | case 32: Base = AArch64::S0; break; |
1785 | case 64: Base = AArch64::D0; break; |
1786 | case 128: Base = AArch64::Q0; break; |
1787 | default: |
1788 | llvm_unreachable("Unsupported width" ); |
1789 | } |
1790 | Inst.addOperand(Op: MCOperand::createReg(Reg: AArch64::Z0 + getReg() - Base)); |
1791 | } |
1792 | |
1793 | void addPPRorPNRRegOperands(MCInst &Inst, unsigned N) const { |
1794 | assert(N == 1 && "Invalid number of operands!" ); |
1795 | unsigned Reg = getReg(); |
1796 | // Normalise to PPR |
1797 | if (Reg >= AArch64::PN0 && Reg <= AArch64::PN15) |
1798 | Reg = Reg - AArch64::PN0 + AArch64::P0; |
1799 | Inst.addOperand(Op: MCOperand::createReg(Reg)); |
1800 | } |
1801 | |
1802 | void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const { |
1803 | assert(N == 1 && "Invalid number of operands!" ); |
1804 | Inst.addOperand( |
1805 | Op: MCOperand::createReg(Reg: (getReg() - AArch64::PN0) + AArch64::P0)); |
1806 | } |
1807 | |
1808 | void addVectorReg64Operands(MCInst &Inst, unsigned N) const { |
1809 | assert(N == 1 && "Invalid number of operands!" ); |
1810 | assert( |
1811 | AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())); |
1812 | Inst.addOperand(Op: MCOperand::createReg(Reg: AArch64::D0 + getReg() - AArch64::Q0)); |
1813 | } |
1814 | |
1815 | void addVectorReg128Operands(MCInst &Inst, unsigned N) const { |
1816 | assert(N == 1 && "Invalid number of operands!" ); |
1817 | assert( |
1818 | AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())); |
1819 | Inst.addOperand(Op: MCOperand::createReg(Reg: getReg())); |
1820 | } |
1821 | |
1822 | void addVectorRegLoOperands(MCInst &Inst, unsigned N) const { |
1823 | assert(N == 1 && "Invalid number of operands!" ); |
1824 | Inst.addOperand(Op: MCOperand::createReg(Reg: getReg())); |
1825 | } |
1826 | |
1827 | void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const { |
1828 | assert(N == 1 && "Invalid number of operands!" ); |
1829 | Inst.addOperand(Op: MCOperand::createReg(Reg: getReg())); |
1830 | } |
1831 | |
1832 | enum VecListIndexType { |
1833 | VecListIdx_DReg = 0, |
1834 | VecListIdx_QReg = 1, |
1835 | VecListIdx_ZReg = 2, |
1836 | VecListIdx_PReg = 3, |
1837 | }; |
1838 | |
1839 | template <VecListIndexType RegTy, unsigned NumRegs> |
1840 | void addVectorListOperands(MCInst &Inst, unsigned N) const { |
1841 | assert(N == 1 && "Invalid number of operands!" ); |
1842 | static const unsigned FirstRegs[][5] = { |
1843 | /* DReg */ { AArch64::Q0, |
1844 | AArch64::D0, AArch64::D0_D1, |
1845 | AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 }, |
1846 | /* QReg */ { AArch64::Q0, |
1847 | AArch64::Q0, AArch64::Q0_Q1, |
1848 | AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 }, |
1849 | /* ZReg */ { AArch64::Z0, |
1850 | AArch64::Z0, AArch64::Z0_Z1, |
1851 | AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }, |
1852 | /* PReg */ { AArch64::P0, |
1853 | AArch64::P0, AArch64::P0_P1 } |
1854 | }; |
1855 | |
1856 | assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) && |
1857 | " NumRegs must be <= 4 for ZRegs" ); |
1858 | |
1859 | assert((RegTy != VecListIdx_PReg || NumRegs <= 2) && |
1860 | " NumRegs must be <= 2 for PRegs" ); |
1861 | |
1862 | unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs]; |
1863 | Inst.addOperand(Op: MCOperand::createReg(Reg: FirstReg + getVectorListStart() - |
1864 | FirstRegs[(unsigned)RegTy][0])); |
1865 | } |
1866 | |
1867 | template <unsigned NumRegs> |
1868 | void addStridedVectorListOperands(MCInst &Inst, unsigned N) const { |
1869 | assert(N == 1 && "Invalid number of operands!" ); |
1870 | assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4" ); |
1871 | |
1872 | switch (NumRegs) { |
1873 | case 2: |
1874 | if (getVectorListStart() < AArch64::Z16) { |
1875 | assert((getVectorListStart() < AArch64::Z8) && |
1876 | (getVectorListStart() >= AArch64::Z0) && "Invalid Register" ); |
1877 | Inst.addOperand(Op: MCOperand::createReg( |
1878 | Reg: AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0)); |
1879 | } else { |
1880 | assert((getVectorListStart() < AArch64::Z24) && |
1881 | (getVectorListStart() >= AArch64::Z16) && "Invalid Register" ); |
1882 | Inst.addOperand(Op: MCOperand::createReg( |
1883 | Reg: AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16)); |
1884 | } |
1885 | break; |
1886 | case 4: |
1887 | if (getVectorListStart() < AArch64::Z16) { |
1888 | assert((getVectorListStart() < AArch64::Z4) && |
1889 | (getVectorListStart() >= AArch64::Z0) && "Invalid Register" ); |
1890 | Inst.addOperand(Op: MCOperand::createReg( |
1891 | Reg: AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0)); |
1892 | } else { |
1893 | assert((getVectorListStart() < AArch64::Z20) && |
1894 | (getVectorListStart() >= AArch64::Z16) && "Invalid Register" ); |
1895 | Inst.addOperand(Op: MCOperand::createReg( |
1896 | Reg: AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16)); |
1897 | } |
1898 | break; |
1899 | default: |
1900 | llvm_unreachable("Unsupported number of registers for strided vec list" ); |
1901 | } |
1902 | } |
1903 | |
1904 | void addMatrixTileListOperands(MCInst &Inst, unsigned N) const { |
1905 | assert(N == 1 && "Invalid number of operands!" ); |
1906 | unsigned RegMask = getMatrixTileListRegMask(); |
1907 | assert(RegMask <= 0xFF && "Invalid mask!" ); |
1908 | Inst.addOperand(Op: MCOperand::createImm(Val: RegMask)); |
1909 | } |
1910 | |
1911 | void addVectorIndexOperands(MCInst &Inst, unsigned N) const { |
1912 | assert(N == 1 && "Invalid number of operands!" ); |
1913 | Inst.addOperand(Op: MCOperand::createImm(Val: getVectorIndex())); |
1914 | } |
1915 | |
1916 | template <unsigned ImmIs0, unsigned ImmIs1> |
1917 | void addExactFPImmOperands(MCInst &Inst, unsigned N) const { |
1918 | assert(N == 1 && "Invalid number of operands!" ); |
1919 | assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand" ); |
1920 | Inst.addOperand(Op: MCOperand::createImm(Val: bool(isExactFPImm<ImmIs1>()))); |
1921 | } |
1922 | |
1923 | void addImmOperands(MCInst &Inst, unsigned N) const { |
1924 | assert(N == 1 && "Invalid number of operands!" ); |
1925 | // If this is a pageoff symrefexpr with an addend, adjust the addend |
1926 | // to be only the page-offset portion. Otherwise, just add the expr |
1927 | // as-is. |
1928 | addExpr(Inst, Expr: getImm()); |
1929 | } |
1930 | |
1931 | template <int Shift> |
1932 | void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const { |
1933 | assert(N == 2 && "Invalid number of operands!" ); |
1934 | if (auto ShiftedVal = getShiftedVal<Shift>()) { |
1935 | Inst.addOperand(Op: MCOperand::createImm(Val: ShiftedVal->first)); |
1936 | Inst.addOperand(Op: MCOperand::createImm(Val: ShiftedVal->second)); |
1937 | } else if (isShiftedImm()) { |
1938 | addExpr(Inst, Expr: getShiftedImmVal()); |
1939 | Inst.addOperand(Op: MCOperand::createImm(Val: getShiftedImmShift())); |
1940 | } else { |
1941 | addExpr(Inst, Expr: getImm()); |
1942 | Inst.addOperand(Op: MCOperand::createImm(Val: 0)); |
1943 | } |
1944 | } |
1945 | |
1946 | template <int Shift> |
1947 | void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const { |
1948 | assert(N == 2 && "Invalid number of operands!" ); |
1949 | if (auto ShiftedVal = getShiftedVal<Shift>()) { |
1950 | Inst.addOperand(Op: MCOperand::createImm(Val: -ShiftedVal->first)); |
1951 | Inst.addOperand(Op: MCOperand::createImm(Val: ShiftedVal->second)); |
1952 | } else |
1953 | llvm_unreachable("Not a shifted negative immediate" ); |
1954 | } |
1955 | |
1956 | void addCondCodeOperands(MCInst &Inst, unsigned N) const { |
1957 | assert(N == 1 && "Invalid number of operands!" ); |
1958 | Inst.addOperand(Op: MCOperand::createImm(Val: getCondCode())); |
1959 | } |
1960 | |
1961 | void addAdrpLabelOperands(MCInst &Inst, unsigned N) const { |
1962 | assert(N == 1 && "Invalid number of operands!" ); |
1963 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm()); |
1964 | if (!MCE) |
1965 | addExpr(Inst, Expr: getImm()); |
1966 | else |
1967 | Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 12)); |
1968 | } |
1969 | |
1970 | void addAdrLabelOperands(MCInst &Inst, unsigned N) const { |
1971 | addImmOperands(Inst, N); |
1972 | } |
1973 | |
1974 | template<int Scale> |
1975 | void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const { |
1976 | assert(N == 1 && "Invalid number of operands!" ); |
1977 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm()); |
1978 | |
1979 | if (!MCE) { |
1980 | Inst.addOperand(Op: MCOperand::createExpr(Val: getImm())); |
1981 | return; |
1982 | } |
1983 | Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() / Scale)); |
1984 | } |
1985 | |
1986 | void addUImm6Operands(MCInst &Inst, unsigned N) const { |
1987 | assert(N == 1 && "Invalid number of operands!" ); |
1988 | const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm()); |
1989 | Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue())); |
1990 | } |
1991 | |
1992 | template <int Scale> |
1993 | void addImmScaledOperands(MCInst &Inst, unsigned N) const { |
1994 | assert(N == 1 && "Invalid number of operands!" ); |
1995 | const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm()); |
1996 | Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() / Scale)); |
1997 | } |
1998 | |
1999 | template <int Scale> |
2000 | void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const { |
2001 | assert(N == 1 && "Invalid number of operands!" ); |
2002 | Inst.addOperand(Op: MCOperand::createImm(Val: getFirstImmVal() / Scale)); |
2003 | } |
2004 | |
2005 | template <typename T> |
2006 | void addLogicalImmOperands(MCInst &Inst, unsigned N) const { |
2007 | assert(N == 1 && "Invalid number of operands!" ); |
2008 | const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm()); |
2009 | std::make_unsigned_t<T> Val = MCE->getValue(); |
2010 | uint64_t encoding = AArch64_AM::encodeLogicalImmediate(imm: Val, regSize: sizeof(T) * 8); |
2011 | Inst.addOperand(Op: MCOperand::createImm(Val: encoding)); |
2012 | } |
2013 | |
2014 | template <typename T> |
2015 | void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const { |
2016 | assert(N == 1 && "Invalid number of operands!" ); |
2017 | const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm()); |
2018 | std::make_unsigned_t<T> Val = ~MCE->getValue(); |
2019 | uint64_t encoding = AArch64_AM::encodeLogicalImmediate(imm: Val, regSize: sizeof(T) * 8); |
2020 | Inst.addOperand(Op: MCOperand::createImm(Val: encoding)); |
2021 | } |
2022 | |
2023 | void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const { |
2024 | assert(N == 1 && "Invalid number of operands!" ); |
2025 | const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm()); |
2026 | uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(Imm: MCE->getValue()); |
2027 | Inst.addOperand(Op: MCOperand::createImm(Val: encoding)); |
2028 | } |
2029 | |
2030 | void addBranchTarget26Operands(MCInst &Inst, unsigned N) const { |
2031 | // Branch operands don't encode the low bits, so shift them off |
2032 | // here. If it's a label, however, just put it on directly as there's |
2033 | // not enough information now to do anything. |
2034 | assert(N == 1 && "Invalid number of operands!" ); |
2035 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm()); |
2036 | if (!MCE) { |
2037 | addExpr(Inst, Expr: getImm()); |
2038 | return; |
2039 | } |
2040 | assert(MCE && "Invalid constant immediate operand!" ); |
2041 | Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2)); |
2042 | } |
2043 | |
2044 | void addPAuthPCRelLabel16Operands(MCInst &Inst, unsigned N) const { |
2045 | // PC-relative operands don't encode the low bits, so shift them off |
2046 | // here. If it's a label, however, just put it on directly as there's |
2047 | // not enough information now to do anything. |
2048 | assert(N == 1 && "Invalid number of operands!" ); |
2049 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm()); |
2050 | if (!MCE) { |
2051 | addExpr(Inst, Expr: getImm()); |
2052 | return; |
2053 | } |
2054 | Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2)); |
2055 | } |
2056 | |
2057 | void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const { |
2058 | // Branch operands don't encode the low bits, so shift them off |
2059 | // here. If it's a label, however, just put it on directly as there's |
2060 | // not enough information now to do anything. |
2061 | assert(N == 1 && "Invalid number of operands!" ); |
2062 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm()); |
2063 | if (!MCE) { |
2064 | addExpr(Inst, Expr: getImm()); |
2065 | return; |
2066 | } |
2067 | assert(MCE && "Invalid constant immediate operand!" ); |
2068 | Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2)); |
2069 | } |
2070 | |
2071 | void addBranchTarget14Operands(MCInst &Inst, unsigned N) const { |
2072 | // Branch operands don't encode the low bits, so shift them off |
2073 | // here. If it's a label, however, just put it on directly as there's |
2074 | // not enough information now to do anything. |
2075 | assert(N == 1 && "Invalid number of operands!" ); |
2076 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: getImm()); |
2077 | if (!MCE) { |
2078 | addExpr(Inst, Expr: getImm()); |
2079 | return; |
2080 | } |
2081 | assert(MCE && "Invalid constant immediate operand!" ); |
2082 | Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() >> 2)); |
2083 | } |
2084 | |
2085 | void addFPImmOperands(MCInst &Inst, unsigned N) const { |
2086 | assert(N == 1 && "Invalid number of operands!" ); |
2087 | Inst.addOperand(Op: MCOperand::createImm( |
2088 | Val: AArch64_AM::getFP64Imm(Imm: getFPImm().bitcastToAPInt()))); |
2089 | } |
2090 | |
2091 | void addBarrierOperands(MCInst &Inst, unsigned N) const { |
2092 | assert(N == 1 && "Invalid number of operands!" ); |
2093 | Inst.addOperand(Op: MCOperand::createImm(Val: getBarrier())); |
2094 | } |
2095 | |
2096 | void addBarriernXSOperands(MCInst &Inst, unsigned N) const { |
2097 | assert(N == 1 && "Invalid number of operands!" ); |
2098 | Inst.addOperand(Op: MCOperand::createImm(Val: getBarrier())); |
2099 | } |
2100 | |
2101 | void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const { |
2102 | assert(N == 1 && "Invalid number of operands!" ); |
2103 | |
2104 | Inst.addOperand(Op: MCOperand::createImm(Val: SysReg.MRSReg)); |
2105 | } |
2106 | |
2107 | void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const { |
2108 | assert(N == 1 && "Invalid number of operands!" ); |
2109 | |
2110 | Inst.addOperand(Op: MCOperand::createImm(Val: SysReg.MSRReg)); |
2111 | } |
2112 | |
2113 | void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const { |
2114 | assert(N == 1 && "Invalid number of operands!" ); |
2115 | |
2116 | Inst.addOperand(Op: MCOperand::createImm(Val: SysReg.PStateField)); |
2117 | } |
2118 | |
2119 | void addSVCROperands(MCInst &Inst, unsigned N) const { |
2120 | assert(N == 1 && "Invalid number of operands!" ); |
2121 | |
2122 | Inst.addOperand(Op: MCOperand::createImm(Val: SVCR.PStateField)); |
2123 | } |
2124 | |
2125 | void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const { |
2126 | assert(N == 1 && "Invalid number of operands!" ); |
2127 | |
2128 | Inst.addOperand(Op: MCOperand::createImm(Val: SysReg.PStateField)); |
2129 | } |
2130 | |
2131 | void addSysCROperands(MCInst &Inst, unsigned N) const { |
2132 | assert(N == 1 && "Invalid number of operands!" ); |
2133 | Inst.addOperand(Op: MCOperand::createImm(Val: getSysCR())); |
2134 | } |
2135 | |
2136 | void addPrefetchOperands(MCInst &Inst, unsigned N) const { |
2137 | assert(N == 1 && "Invalid number of operands!" ); |
2138 | Inst.addOperand(Op: MCOperand::createImm(Val: getPrefetch())); |
2139 | } |
2140 | |
2141 | void addPSBHintOperands(MCInst &Inst, unsigned N) const { |
2142 | assert(N == 1 && "Invalid number of operands!" ); |
2143 | Inst.addOperand(Op: MCOperand::createImm(Val: getPSBHint())); |
2144 | } |
2145 | |
2146 | void addBTIHintOperands(MCInst &Inst, unsigned N) const { |
2147 | assert(N == 1 && "Invalid number of operands!" ); |
2148 | Inst.addOperand(Op: MCOperand::createImm(Val: getBTIHint())); |
2149 | } |
2150 | |
2151 | void addShifterOperands(MCInst &Inst, unsigned N) const { |
2152 | assert(N == 1 && "Invalid number of operands!" ); |
2153 | unsigned Imm = |
2154 | AArch64_AM::getShifterImm(ST: getShiftExtendType(), Imm: getShiftExtendAmount()); |
2155 | Inst.addOperand(Op: MCOperand::createImm(Val: Imm)); |
2156 | } |
2157 | |
2158 | void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const { |
2159 | assert(N == 1 && "Invalid number of operands!" ); |
2160 | unsigned Imm = getShiftExtendAmount(); |
2161 | Inst.addOperand(Op: MCOperand::createImm(Val: Imm)); |
2162 | } |
2163 | |
2164 | void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const { |
2165 | assert(N == 1 && "Invalid number of operands!" ); |
2166 | |
2167 | if (!isScalarReg()) |
2168 | return; |
2169 | |
2170 | const MCRegisterInfo *RI = Ctx.getRegisterInfo(); |
2171 | uint32_t Reg = RI->getRegClass(i: AArch64::GPR64RegClassID) |
2172 | .getRegister(i: RI->getEncodingValue(RegNo: getReg())); |
2173 | if (Reg != AArch64::XZR) |
2174 | llvm_unreachable("wrong register" ); |
2175 | |
2176 | Inst.addOperand(Op: MCOperand::createReg(Reg: AArch64::XZR)); |
2177 | } |
2178 | |
2179 | void addExtendOperands(MCInst &Inst, unsigned N) const { |
2180 | assert(N == 1 && "Invalid number of operands!" ); |
2181 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); |
2182 | if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW; |
2183 | unsigned Imm = AArch64_AM::getArithExtendImm(ET, Imm: getShiftExtendAmount()); |
2184 | Inst.addOperand(Op: MCOperand::createImm(Val: Imm)); |
2185 | } |
2186 | |
2187 | void addExtend64Operands(MCInst &Inst, unsigned N) const { |
2188 | assert(N == 1 && "Invalid number of operands!" ); |
2189 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); |
2190 | if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX; |
2191 | unsigned Imm = AArch64_AM::getArithExtendImm(ET, Imm: getShiftExtendAmount()); |
2192 | Inst.addOperand(Op: MCOperand::createImm(Val: Imm)); |
2193 | } |
2194 | |
2195 | void addMemExtendOperands(MCInst &Inst, unsigned N) const { |
2196 | assert(N == 2 && "Invalid number of operands!" ); |
2197 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); |
2198 | bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX; |
2199 | Inst.addOperand(Op: MCOperand::createImm(Val: IsSigned)); |
2200 | Inst.addOperand(Op: MCOperand::createImm(Val: getShiftExtendAmount() != 0)); |
2201 | } |
2202 | |
2203 | // For 8-bit load/store instructions with a register offset, both the |
2204 | // "DoShift" and "NoShift" variants have a shift of 0. Because of this, |
2205 | // they're disambiguated by whether the shift was explicit or implicit rather |
2206 | // than its size. |
2207 | void addMemExtend8Operands(MCInst &Inst, unsigned N) const { |
2208 | assert(N == 2 && "Invalid number of operands!" ); |
2209 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); |
2210 | bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX; |
2211 | Inst.addOperand(Op: MCOperand::createImm(Val: IsSigned)); |
2212 | Inst.addOperand(Op: MCOperand::createImm(Val: hasShiftExtendAmount())); |
2213 | } |
2214 | |
2215 | template<int Shift> |
2216 | void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const { |
2217 | assert(N == 1 && "Invalid number of operands!" ); |
2218 | |
2219 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm()); |
2220 | if (CE) { |
2221 | uint64_t Value = CE->getValue(); |
2222 | Inst.addOperand(Op: MCOperand::createImm(Val: (Value >> Shift) & 0xffff)); |
2223 | } else { |
2224 | addExpr(Inst, Expr: getImm()); |
2225 | } |
2226 | } |
2227 | |
2228 | template<int Shift> |
2229 | void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const { |
2230 | assert(N == 1 && "Invalid number of operands!" ); |
2231 | |
2232 | const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm()); |
2233 | uint64_t Value = CE->getValue(); |
2234 | Inst.addOperand(Op: MCOperand::createImm(Val: (~Value >> Shift) & 0xffff)); |
2235 | } |
2236 | |
2237 | void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const { |
2238 | assert(N == 1 && "Invalid number of operands!" ); |
2239 | const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm()); |
2240 | Inst.addOperand(Op: MCOperand::createImm(Val: MCE->getValue() / 90)); |
2241 | } |
2242 | |
2243 | void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const { |
2244 | assert(N == 1 && "Invalid number of operands!" ); |
2245 | const MCConstantExpr *MCE = cast<MCConstantExpr>(Val: getImm()); |
2246 | Inst.addOperand(Op: MCOperand::createImm(Val: (MCE->getValue() - 90) / 180)); |
2247 | } |
2248 | |
2249 | void print(raw_ostream &OS) const override; |
2250 | |
2251 | static std::unique_ptr<AArch64Operand> |
2252 | CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) { |
2253 | auto Op = std::make_unique<AArch64Operand>(args: k_Token, args&: Ctx); |
2254 | Op->Tok.Data = Str.data(); |
2255 | Op->Tok.Length = Str.size(); |
2256 | Op->Tok.IsSuffix = IsSuffix; |
2257 | Op->StartLoc = S; |
2258 | Op->EndLoc = S; |
2259 | return Op; |
2260 | } |
2261 | |
2262 | static std::unique_ptr<AArch64Operand> |
2263 | CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx, |
2264 | RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg, |
2265 | AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL, |
2266 | unsigned ShiftAmount = 0, |
2267 | unsigned HasExplicitAmount = false) { |
2268 | auto Op = std::make_unique<AArch64Operand>(args: k_Register, args&: Ctx); |
2269 | Op->Reg.RegNum = RegNum; |
2270 | Op->Reg.Kind = Kind; |
2271 | Op->Reg.ElementWidth = 0; |
2272 | Op->Reg.EqualityTy = EqTy; |
2273 | Op->Reg.ShiftExtend.Type = ExtTy; |
2274 | Op->Reg.ShiftExtend.Amount = ShiftAmount; |
2275 | Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount; |
2276 | Op->StartLoc = S; |
2277 | Op->EndLoc = E; |
2278 | return Op; |
2279 | } |
2280 | |
2281 | static std::unique_ptr<AArch64Operand> |
2282 | CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth, |
2283 | SMLoc S, SMLoc E, MCContext &Ctx, |
2284 | AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL, |
2285 | unsigned ShiftAmount = 0, |
2286 | unsigned HasExplicitAmount = false) { |
2287 | assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || |
2288 | Kind == RegKind::SVEPredicateVector || |
2289 | Kind == RegKind::SVEPredicateAsCounter) && |
2290 | "Invalid vector kind" ); |
2291 | auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqTy: EqualsReg, ExtTy, ShiftAmount, |
2292 | HasExplicitAmount); |
2293 | Op->Reg.ElementWidth = ElementWidth; |
2294 | return Op; |
2295 | } |
2296 | |
2297 | static std::unique_ptr<AArch64Operand> |
2298 | CreateVectorList(unsigned RegNum, unsigned Count, unsigned Stride, |
2299 | unsigned NumElements, unsigned ElementWidth, |
2300 | RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) { |
2301 | auto Op = std::make_unique<AArch64Operand>(args: k_VectorList, args&: Ctx); |
2302 | Op->VectorList.RegNum = RegNum; |
2303 | Op->VectorList.Count = Count; |
2304 | Op->VectorList.Stride = Stride; |
2305 | Op->VectorList.NumElements = NumElements; |
2306 | Op->VectorList.ElementWidth = ElementWidth; |
2307 | Op->VectorList.RegisterKind = RegisterKind; |
2308 | Op->StartLoc = S; |
2309 | Op->EndLoc = E; |
2310 | return Op; |
2311 | } |
2312 | |
2313 | static std::unique_ptr<AArch64Operand> |
2314 | CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) { |
2315 | auto Op = std::make_unique<AArch64Operand>(args: k_VectorIndex, args&: Ctx); |
2316 | Op->VectorIndex.Val = Idx; |
2317 | Op->StartLoc = S; |
2318 | Op->EndLoc = E; |
2319 | return Op; |
2320 | } |
2321 | |
2322 | static std::unique_ptr<AArch64Operand> |
2323 | CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) { |
2324 | auto Op = std::make_unique<AArch64Operand>(args: k_MatrixTileList, args&: Ctx); |
2325 | Op->MatrixTileList.RegMask = RegMask; |
2326 | Op->StartLoc = S; |
2327 | Op->EndLoc = E; |
2328 | return Op; |
2329 | } |
2330 | |
2331 | static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs, |
2332 | const unsigned ElementWidth) { |
2333 | static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>> |
2334 | RegMap = { |
2335 | {{0, AArch64::ZAB0}, |
2336 | {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3, |
2337 | AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}}, |
2338 | {{8, AArch64::ZAB0}, |
2339 | {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3, |
2340 | AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}}, |
2341 | {{16, AArch64::ZAH0}, |
2342 | {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}}, |
2343 | {{16, AArch64::ZAH1}, |
2344 | {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}}, |
2345 | {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}}, |
2346 | {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}}, |
2347 | {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}}, |
2348 | {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}}, |
2349 | }; |
2350 | |
2351 | if (ElementWidth == 64) |
2352 | OutRegs.insert(V: Reg); |
2353 | else { |
2354 | std::vector<unsigned> Regs = RegMap[std::make_pair(x: ElementWidth, y&: Reg)]; |
2355 | assert(!Regs.empty() && "Invalid tile or element width!" ); |
2356 | for (auto OutReg : Regs) |
2357 | OutRegs.insert(V: OutReg); |
2358 | } |
2359 | } |
2360 | |
2361 | static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S, |
2362 | SMLoc E, MCContext &Ctx) { |
2363 | auto Op = std::make_unique<AArch64Operand>(args: k_Immediate, args&: Ctx); |
2364 | Op->Imm.Val = Val; |
2365 | Op->StartLoc = S; |
2366 | Op->EndLoc = E; |
2367 | return Op; |
2368 | } |
2369 | |
2370 | static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val, |
2371 | unsigned ShiftAmount, |
2372 | SMLoc S, SMLoc E, |
2373 | MCContext &Ctx) { |
2374 | auto Op = std::make_unique<AArch64Operand>(args: k_ShiftedImm, args&: Ctx); |
2375 | Op->ShiftedImm .Val = Val; |
2376 | Op->ShiftedImm.ShiftAmount = ShiftAmount; |
2377 | Op->StartLoc = S; |
2378 | Op->EndLoc = E; |
2379 | return Op; |
2380 | } |
2381 | |
2382 | static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First, |
2383 | unsigned Last, SMLoc S, |
2384 | SMLoc E, |
2385 | MCContext &Ctx) { |
2386 | auto Op = std::make_unique<AArch64Operand>(args: k_ImmRange, args&: Ctx); |
2387 | Op->ImmRange.First = First; |
2388 | Op->ImmRange.Last = Last; |
2389 | Op->EndLoc = E; |
2390 | return Op; |
2391 | } |
2392 | |
2393 | static std::unique_ptr<AArch64Operand> |
2394 | CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) { |
2395 | auto Op = std::make_unique<AArch64Operand>(args: k_CondCode, args&: Ctx); |
2396 | Op->CondCode.Code = Code; |
2397 | Op->StartLoc = S; |
2398 | Op->EndLoc = E; |
2399 | return Op; |
2400 | } |
2401 | |
2402 | static std::unique_ptr<AArch64Operand> |
2403 | CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) { |
2404 | auto Op = std::make_unique<AArch64Operand>(args: k_FPImm, args&: Ctx); |
2405 | Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue(); |
2406 | Op->FPImm.IsExact = IsExact; |
2407 | Op->StartLoc = S; |
2408 | Op->EndLoc = S; |
2409 | return Op; |
2410 | } |
2411 | |
2412 | static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val, |
2413 | StringRef Str, |
2414 | SMLoc S, |
2415 | MCContext &Ctx, |
2416 | bool HasnXSModifier) { |
2417 | auto Op = std::make_unique<AArch64Operand>(args: k_Barrier, args&: Ctx); |
2418 | Op->Barrier.Val = Val; |
2419 | Op->Barrier.Data = Str.data(); |
2420 | Op->Barrier.Length = Str.size(); |
2421 | Op->Barrier.HasnXSModifier = HasnXSModifier; |
2422 | Op->StartLoc = S; |
2423 | Op->EndLoc = S; |
2424 | return Op; |
2425 | } |
2426 | |
2427 | static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S, |
2428 | uint32_t MRSReg, |
2429 | uint32_t MSRReg, |
2430 | uint32_t PStateField, |
2431 | MCContext &Ctx) { |
2432 | auto Op = std::make_unique<AArch64Operand>(args: k_SysReg, args&: Ctx); |
2433 | Op->SysReg.Data = Str.data(); |
2434 | Op->SysReg.Length = Str.size(); |
2435 | Op->SysReg.MRSReg = MRSReg; |
2436 | Op->SysReg.MSRReg = MSRReg; |
2437 | Op->SysReg.PStateField = PStateField; |
2438 | Op->StartLoc = S; |
2439 | Op->EndLoc = S; |
2440 | return Op; |
2441 | } |
2442 | |
2443 | static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S, |
2444 | SMLoc E, MCContext &Ctx) { |
2445 | auto Op = std::make_unique<AArch64Operand>(args: k_SysCR, args&: Ctx); |
2446 | Op->SysCRImm.Val = Val; |
2447 | Op->StartLoc = S; |
2448 | Op->EndLoc = E; |
2449 | return Op; |
2450 | } |
2451 | |
2452 | static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val, |
2453 | StringRef Str, |
2454 | SMLoc S, |
2455 | MCContext &Ctx) { |
2456 | auto Op = std::make_unique<AArch64Operand>(args: k_Prefetch, args&: Ctx); |
2457 | Op->Prefetch.Val = Val; |
2458 | Op->Barrier.Data = Str.data(); |
2459 | Op->Barrier.Length = Str.size(); |
2460 | Op->StartLoc = S; |
2461 | Op->EndLoc = S; |
2462 | return Op; |
2463 | } |
2464 | |
2465 | static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val, |
2466 | StringRef Str, |
2467 | SMLoc S, |
2468 | MCContext &Ctx) { |
2469 | auto Op = std::make_unique<AArch64Operand>(args: k_PSBHint, args&: Ctx); |
2470 | Op->PSBHint.Val = Val; |
2471 | Op->PSBHint.Data = Str.data(); |
2472 | Op->PSBHint.Length = Str.size(); |
2473 | Op->StartLoc = S; |
2474 | Op->EndLoc = S; |
2475 | return Op; |
2476 | } |
2477 | |
2478 | static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val, |
2479 | StringRef Str, |
2480 | SMLoc S, |
2481 | MCContext &Ctx) { |
2482 | auto Op = std::make_unique<AArch64Operand>(args: k_BTIHint, args&: Ctx); |
2483 | Op->BTIHint.Val = Val | 32; |
2484 | Op->BTIHint.Data = Str.data(); |
2485 | Op->BTIHint.Length = Str.size(); |
2486 | Op->StartLoc = S; |
2487 | Op->EndLoc = S; |
2488 | return Op; |
2489 | } |
2490 | |
2491 | static std::unique_ptr<AArch64Operand> |
2492 | CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind, |
2493 | SMLoc S, SMLoc E, MCContext &Ctx) { |
2494 | auto Op = std::make_unique<AArch64Operand>(args: k_MatrixRegister, args&: Ctx); |
2495 | Op->MatrixReg.RegNum = RegNum; |
2496 | Op->MatrixReg.ElementWidth = ElementWidth; |
2497 | Op->MatrixReg.Kind = Kind; |
2498 | Op->StartLoc = S; |
2499 | Op->EndLoc = E; |
2500 | return Op; |
2501 | } |
2502 | |
2503 | static std::unique_ptr<AArch64Operand> |
2504 | CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) { |
2505 | auto Op = std::make_unique<AArch64Operand>(args: k_SVCR, args&: Ctx); |
2506 | Op->SVCR.PStateField = PStateField; |
2507 | Op->SVCR.Data = Str.data(); |
2508 | Op->SVCR.Length = Str.size(); |
2509 | Op->StartLoc = S; |
2510 | Op->EndLoc = S; |
2511 | return Op; |
2512 | } |
2513 | |
2514 | static std::unique_ptr<AArch64Operand> |
2515 | CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val, |
2516 | bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) { |
2517 | auto Op = std::make_unique<AArch64Operand>(args: k_ShiftExtend, args&: Ctx); |
2518 | Op->ShiftExtend.Type = ShOp; |
2519 | Op->ShiftExtend.Amount = Val; |
2520 | Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount; |
2521 | Op->StartLoc = S; |
2522 | Op->EndLoc = E; |
2523 | return Op; |
2524 | } |
2525 | }; |
2526 | |
2527 | } // end anonymous namespace. |
2528 | |
2529 | void AArch64Operand::print(raw_ostream &OS) const { |
2530 | switch (Kind) { |
2531 | case k_FPImm: |
2532 | OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue(); |
2533 | if (!getFPImmIsExact()) |
2534 | OS << " (inexact)" ; |
2535 | OS << ">" ; |
2536 | break; |
2537 | case k_Barrier: { |
2538 | StringRef Name = getBarrierName(); |
2539 | if (!Name.empty()) |
2540 | OS << "<barrier " << Name << ">" ; |
2541 | else |
2542 | OS << "<barrier invalid #" << getBarrier() << ">" ; |
2543 | break; |
2544 | } |
2545 | case k_Immediate: |
2546 | OS << *getImm(); |
2547 | break; |
2548 | case k_ShiftedImm: { |
2549 | unsigned Shift = getShiftedImmShift(); |
2550 | OS << "<shiftedimm " ; |
2551 | OS << *getShiftedImmVal(); |
2552 | OS << ", lsl #" << AArch64_AM::getShiftValue(Imm: Shift) << ">" ; |
2553 | break; |
2554 | } |
2555 | case k_ImmRange: { |
2556 | OS << "<immrange " ; |
2557 | OS << getFirstImmVal(); |
2558 | OS << ":" << getLastImmVal() << ">" ; |
2559 | break; |
2560 | } |
2561 | case k_CondCode: |
2562 | OS << "<condcode " << getCondCode() << ">" ; |
2563 | break; |
2564 | case k_VectorList: { |
2565 | OS << "<vectorlist " ; |
2566 | unsigned Reg = getVectorListStart(); |
2567 | for (unsigned i = 0, e = getVectorListCount(); i != e; ++i) |
2568 | OS << Reg + i * getVectorListStride() << " " ; |
2569 | OS << ">" ; |
2570 | break; |
2571 | } |
2572 | case k_VectorIndex: |
2573 | OS << "<vectorindex " << getVectorIndex() << ">" ; |
2574 | break; |
2575 | case k_SysReg: |
2576 | OS << "<sysreg: " << getSysReg() << '>'; |
2577 | break; |
2578 | case k_Token: |
2579 | OS << "'" << getToken() << "'" ; |
2580 | break; |
2581 | case k_SysCR: |
2582 | OS << "c" << getSysCR(); |
2583 | break; |
2584 | case k_Prefetch: { |
2585 | StringRef Name = getPrefetchName(); |
2586 | if (!Name.empty()) |
2587 | OS << "<prfop " << Name << ">" ; |
2588 | else |
2589 | OS << "<prfop invalid #" << getPrefetch() << ">" ; |
2590 | break; |
2591 | } |
2592 | case k_PSBHint: |
2593 | OS << getPSBHintName(); |
2594 | break; |
2595 | case k_BTIHint: |
2596 | OS << getBTIHintName(); |
2597 | break; |
2598 | case k_MatrixRegister: |
2599 | OS << "<matrix " << getMatrixReg() << ">" ; |
2600 | break; |
2601 | case k_MatrixTileList: { |
2602 | OS << "<matrixlist " ; |
2603 | unsigned RegMask = getMatrixTileListRegMask(); |
2604 | unsigned MaxBits = 8; |
2605 | for (unsigned I = MaxBits; I > 0; --I) |
2606 | OS << ((RegMask & (1 << (I - 1))) >> (I - 1)); |
2607 | OS << '>'; |
2608 | break; |
2609 | } |
2610 | case k_SVCR: { |
2611 | OS << getSVCR(); |
2612 | break; |
2613 | } |
2614 | case k_Register: |
2615 | OS << "<register " << getReg() << ">" ; |
2616 | if (!getShiftExtendAmount() && !hasShiftExtendAmount()) |
2617 | break; |
2618 | [[fallthrough]]; |
2619 | case k_ShiftExtend: |
2620 | OS << "<" << AArch64_AM::getShiftExtendName(ST: getShiftExtendType()) << " #" |
2621 | << getShiftExtendAmount(); |
2622 | if (!hasShiftExtendAmount()) |
2623 | OS << "<imp>" ; |
2624 | OS << '>'; |
2625 | break; |
2626 | } |
2627 | } |
2628 | |
2629 | /// @name Auto-generated Match Functions |
2630 | /// { |
2631 | |
2632 | static MCRegister MatchRegisterName(StringRef Name); |
2633 | |
2634 | /// } |
2635 | |
2636 | static unsigned MatchNeonVectorRegName(StringRef Name) { |
2637 | return StringSwitch<unsigned>(Name.lower()) |
2638 | .Case(S: "v0" , Value: AArch64::Q0) |
2639 | .Case(S: "v1" , Value: AArch64::Q1) |
2640 | .Case(S: "v2" , Value: AArch64::Q2) |
2641 | .Case(S: "v3" , Value: AArch64::Q3) |
2642 | .Case(S: "v4" , Value: AArch64::Q4) |
2643 | .Case(S: "v5" , Value: AArch64::Q5) |
2644 | .Case(S: "v6" , Value: AArch64::Q6) |
2645 | .Case(S: "v7" , Value: AArch64::Q7) |
2646 | .Case(S: "v8" , Value: AArch64::Q8) |
2647 | .Case(S: "v9" , Value: AArch64::Q9) |
2648 | .Case(S: "v10" , Value: AArch64::Q10) |
2649 | .Case(S: "v11" , Value: AArch64::Q11) |
2650 | .Case(S: "v12" , Value: AArch64::Q12) |
2651 | .Case(S: "v13" , Value: AArch64::Q13) |
2652 | .Case(S: "v14" , Value: AArch64::Q14) |
2653 | .Case(S: "v15" , Value: AArch64::Q15) |
2654 | .Case(S: "v16" , Value: AArch64::Q16) |
2655 | .Case(S: "v17" , Value: AArch64::Q17) |
2656 | .Case(S: "v18" , Value: AArch64::Q18) |
2657 | .Case(S: "v19" , Value: AArch64::Q19) |
2658 | .Case(S: "v20" , Value: AArch64::Q20) |
2659 | .Case(S: "v21" , Value: AArch64::Q21) |
2660 | .Case(S: "v22" , Value: AArch64::Q22) |
2661 | .Case(S: "v23" , Value: AArch64::Q23) |
2662 | .Case(S: "v24" , Value: AArch64::Q24) |
2663 | .Case(S: "v25" , Value: AArch64::Q25) |
2664 | .Case(S: "v26" , Value: AArch64::Q26) |
2665 | .Case(S: "v27" , Value: AArch64::Q27) |
2666 | .Case(S: "v28" , Value: AArch64::Q28) |
2667 | .Case(S: "v29" , Value: AArch64::Q29) |
2668 | .Case(S: "v30" , Value: AArch64::Q30) |
2669 | .Case(S: "v31" , Value: AArch64::Q31) |
2670 | .Default(Value: 0); |
2671 | } |
2672 | |
2673 | /// Returns an optional pair of (#elements, element-width) if Suffix |
2674 | /// is a valid vector kind. Where the number of elements in a vector |
2675 | /// or the vector width is implicit or explicitly unknown (but still a |
2676 | /// valid suffix kind), 0 is used. |
2677 | static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix, |
2678 | RegKind VectorKind) { |
2679 | std::pair<int, int> Res = {-1, -1}; |
2680 | |
2681 | switch (VectorKind) { |
2682 | case RegKind::NeonVector: |
2683 | Res = StringSwitch<std::pair<int, int>>(Suffix.lower()) |
2684 | .Case(S: "" , Value: {0, 0}) |
2685 | .Case(S: ".1d" , Value: {1, 64}) |
2686 | .Case(S: ".1q" , Value: {1, 128}) |
2687 | // '.2h' needed for fp16 scalar pairwise reductions |
2688 | .Case(S: ".2h" , Value: {2, 16}) |
2689 | .Case(S: ".2b" , Value: {2, 8}) |
2690 | .Case(S: ".2s" , Value: {2, 32}) |
2691 | .Case(S: ".2d" , Value: {2, 64}) |
2692 | // '.4b' is another special case for the ARMv8.2a dot product |
2693 | // operand |
2694 | .Case(S: ".4b" , Value: {4, 8}) |
2695 | .Case(S: ".4h" , Value: {4, 16}) |
2696 | .Case(S: ".4s" , Value: {4, 32}) |
2697 | .Case(S: ".8b" , Value: {8, 8}) |
2698 | .Case(S: ".8h" , Value: {8, 16}) |
2699 | .Case(S: ".16b" , Value: {16, 8}) |
2700 | // Accept the width neutral ones, too, for verbose syntax. If |
2701 | // those aren't used in the right places, the token operand won't |
2702 | // match so all will work out. |
2703 | .Case(S: ".b" , Value: {0, 8}) |
2704 | .Case(S: ".h" , Value: {0, 16}) |
2705 | .Case(S: ".s" , Value: {0, 32}) |
2706 | .Case(S: ".d" , Value: {0, 64}) |
2707 | .Default(Value: {-1, -1}); |
2708 | break; |
2709 | case RegKind::SVEPredicateAsCounter: |
2710 | case RegKind::SVEPredicateVector: |
2711 | case RegKind::SVEDataVector: |
2712 | case RegKind::Matrix: |
2713 | Res = StringSwitch<std::pair<int, int>>(Suffix.lower()) |
2714 | .Case(S: "" , Value: {0, 0}) |
2715 | .Case(S: ".b" , Value: {0, 8}) |
2716 | .Case(S: ".h" , Value: {0, 16}) |
2717 | .Case(S: ".s" , Value: {0, 32}) |
2718 | .Case(S: ".d" , Value: {0, 64}) |
2719 | .Case(S: ".q" , Value: {0, 128}) |
2720 | .Default(Value: {-1, -1}); |
2721 | break; |
2722 | default: |
2723 | llvm_unreachable("Unsupported RegKind" ); |
2724 | } |
2725 | |
2726 | if (Res == std::make_pair(x: -1, y: -1)) |
2727 | return std::nullopt; |
2728 | |
2729 | return std::optional<std::pair<int, int>>(Res); |
2730 | } |
2731 | |
2732 | static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) { |
2733 | return parseVectorKind(Suffix, VectorKind).has_value(); |
2734 | } |
2735 | |
2736 | static unsigned matchSVEDataVectorRegName(StringRef Name) { |
2737 | return StringSwitch<unsigned>(Name.lower()) |
2738 | .Case(S: "z0" , Value: AArch64::Z0) |
2739 | .Case(S: "z1" , Value: AArch64::Z1) |
2740 | .Case(S: "z2" , Value: AArch64::Z2) |
2741 | .Case(S: "z3" , Value: AArch64::Z3) |
2742 | .Case(S: "z4" , Value: AArch64::Z4) |
2743 | .Case(S: "z5" , Value: AArch64::Z5) |
2744 | .Case(S: "z6" , Value: AArch64::Z6) |
2745 | .Case(S: "z7" , Value: AArch64::Z7) |
2746 | .Case(S: "z8" , Value: AArch64::Z8) |
2747 | .Case(S: "z9" , Value: AArch64::Z9) |
2748 | .Case(S: "z10" , Value: AArch64::Z10) |
2749 | .Case(S: "z11" , Value: AArch64::Z11) |
2750 | .Case(S: "z12" , Value: AArch64::Z12) |
2751 | .Case(S: "z13" , Value: AArch64::Z13) |
2752 | .Case(S: "z14" , Value: AArch64::Z14) |
2753 | .Case(S: "z15" , Value: AArch64::Z15) |
2754 | .Case(S: "z16" , Value: AArch64::Z16) |
2755 | .Case(S: "z17" , Value: AArch64::Z17) |
2756 | .Case(S: "z18" , Value: AArch64::Z18) |
2757 | .Case(S: "z19" , Value: AArch64::Z19) |
2758 | .Case(S: "z20" , Value: AArch64::Z20) |
2759 | .Case(S: "z21" , Value: AArch64::Z21) |
2760 | .Case(S: "z22" , Value: AArch64::Z22) |
2761 | .Case(S: "z23" , Value: AArch64::Z23) |
2762 | .Case(S: "z24" , Value: AArch64::Z24) |
2763 | .Case(S: "z25" , Value: AArch64::Z25) |
2764 | .Case(S: "z26" , Value: AArch64::Z26) |
2765 | .Case(S: "z27" , Value: AArch64::Z27) |
2766 | .Case(S: "z28" , Value: AArch64::Z28) |
2767 | .Case(S: "z29" , Value: AArch64::Z29) |
2768 | .Case(S: "z30" , Value: AArch64::Z30) |
2769 | .Case(S: "z31" , Value: AArch64::Z31) |
2770 | .Default(Value: 0); |
2771 | } |
2772 | |
2773 | static unsigned matchSVEPredicateVectorRegName(StringRef Name) { |
2774 | return StringSwitch<unsigned>(Name.lower()) |
2775 | .Case(S: "p0" , Value: AArch64::P0) |
2776 | .Case(S: "p1" , Value: AArch64::P1) |
2777 | .Case(S: "p2" , Value: AArch64::P2) |
2778 | .Case(S: "p3" , Value: AArch64::P3) |
2779 | .Case(S: "p4" , Value: AArch64::P4) |
2780 | .Case(S: "p5" , Value: AArch64::P5) |
2781 | .Case(S: "p6" , Value: AArch64::P6) |
2782 | .Case(S: "p7" , Value: AArch64::P7) |
2783 | .Case(S: "p8" , Value: AArch64::P8) |
2784 | .Case(S: "p9" , Value: AArch64::P9) |
2785 | .Case(S: "p10" , Value: AArch64::P10) |
2786 | .Case(S: "p11" , Value: AArch64::P11) |
2787 | .Case(S: "p12" , Value: AArch64::P12) |
2788 | .Case(S: "p13" , Value: AArch64::P13) |
2789 | .Case(S: "p14" , Value: AArch64::P14) |
2790 | .Case(S: "p15" , Value: AArch64::P15) |
2791 | .Default(Value: 0); |
2792 | } |
2793 | |
2794 | static unsigned matchSVEPredicateAsCounterRegName(StringRef Name) { |
2795 | return StringSwitch<unsigned>(Name.lower()) |
2796 | .Case(S: "pn0" , Value: AArch64::PN0) |
2797 | .Case(S: "pn1" , Value: AArch64::PN1) |
2798 | .Case(S: "pn2" , Value: AArch64::PN2) |
2799 | .Case(S: "pn3" , Value: AArch64::PN3) |
2800 | .Case(S: "pn4" , Value: AArch64::PN4) |
2801 | .Case(S: "pn5" , Value: AArch64::PN5) |
2802 | .Case(S: "pn6" , Value: AArch64::PN6) |
2803 | .Case(S: "pn7" , Value: AArch64::PN7) |
2804 | .Case(S: "pn8" , Value: AArch64::PN8) |
2805 | .Case(S: "pn9" , Value: AArch64::PN9) |
2806 | .Case(S: "pn10" , Value: AArch64::PN10) |
2807 | .Case(S: "pn11" , Value: AArch64::PN11) |
2808 | .Case(S: "pn12" , Value: AArch64::PN12) |
2809 | .Case(S: "pn13" , Value: AArch64::PN13) |
2810 | .Case(S: "pn14" , Value: AArch64::PN14) |
2811 | .Case(S: "pn15" , Value: AArch64::PN15) |
2812 | .Default(Value: 0); |
2813 | } |
2814 | |
2815 | static unsigned matchMatrixTileListRegName(StringRef Name) { |
2816 | return StringSwitch<unsigned>(Name.lower()) |
2817 | .Case(S: "za0.d" , Value: AArch64::ZAD0) |
2818 | .Case(S: "za1.d" , Value: AArch64::ZAD1) |
2819 | .Case(S: "za2.d" , Value: AArch64::ZAD2) |
2820 | .Case(S: "za3.d" , Value: AArch64::ZAD3) |
2821 | .Case(S: "za4.d" , Value: AArch64::ZAD4) |
2822 | .Case(S: "za5.d" , Value: AArch64::ZAD5) |
2823 | .Case(S: "za6.d" , Value: AArch64::ZAD6) |
2824 | .Case(S: "za7.d" , Value: AArch64::ZAD7) |
2825 | .Case(S: "za0.s" , Value: AArch64::ZAS0) |
2826 | .Case(S: "za1.s" , Value: AArch64::ZAS1) |
2827 | .Case(S: "za2.s" , Value: AArch64::ZAS2) |
2828 | .Case(S: "za3.s" , Value: AArch64::ZAS3) |
2829 | .Case(S: "za0.h" , Value: AArch64::ZAH0) |
2830 | .Case(S: "za1.h" , Value: AArch64::ZAH1) |
2831 | .Case(S: "za0.b" , Value: AArch64::ZAB0) |
2832 | .Default(Value: 0); |
2833 | } |
2834 | |
2835 | static unsigned matchMatrixRegName(StringRef Name) { |
2836 | return StringSwitch<unsigned>(Name.lower()) |
2837 | .Case(S: "za" , Value: AArch64::ZA) |
2838 | .Case(S: "za0.q" , Value: AArch64::ZAQ0) |
2839 | .Case(S: "za1.q" , Value: AArch64::ZAQ1) |
2840 | .Case(S: "za2.q" , Value: AArch64::ZAQ2) |
2841 | .Case(S: "za3.q" , Value: AArch64::ZAQ3) |
2842 | .Case(S: "za4.q" , Value: AArch64::ZAQ4) |
2843 | .Case(S: "za5.q" , Value: AArch64::ZAQ5) |
2844 | .Case(S: "za6.q" , Value: AArch64::ZAQ6) |
2845 | .Case(S: "za7.q" , Value: AArch64::ZAQ7) |
2846 | .Case(S: "za8.q" , Value: AArch64::ZAQ8) |
2847 | .Case(S: "za9.q" , Value: AArch64::ZAQ9) |
2848 | .Case(S: "za10.q" , Value: AArch64::ZAQ10) |
2849 | .Case(S: "za11.q" , Value: AArch64::ZAQ11) |
2850 | .Case(S: "za12.q" , Value: AArch64::ZAQ12) |
2851 | .Case(S: "za13.q" , Value: AArch64::ZAQ13) |
2852 | .Case(S: "za14.q" , Value: AArch64::ZAQ14) |
2853 | .Case(S: "za15.q" , Value: AArch64::ZAQ15) |
2854 | .Case(S: "za0.d" , Value: AArch64::ZAD0) |
2855 | .Case(S: "za1.d" , Value: AArch64::ZAD1) |
2856 | .Case(S: "za2.d" , Value: AArch64::ZAD2) |
2857 | .Case(S: "za3.d" , Value: AArch64::ZAD3) |
2858 | .Case(S: "za4.d" , Value: AArch64::ZAD4) |
2859 | .Case(S: "za5.d" , Value: AArch64::ZAD5) |
2860 | .Case(S: "za6.d" , Value: AArch64::ZAD6) |
2861 | .Case(S: "za7.d" , Value: AArch64::ZAD7) |
2862 | .Case(S: "za0.s" , Value: AArch64::ZAS0) |
2863 | .Case(S: "za1.s" , Value: AArch64::ZAS1) |
2864 | .Case(S: "za2.s" , Value: AArch64::ZAS2) |
2865 | .Case(S: "za3.s" , Value: AArch64::ZAS3) |
2866 | .Case(S: "za0.h" , Value: AArch64::ZAH0) |
2867 | .Case(S: "za1.h" , Value: AArch64::ZAH1) |
2868 | .Case(S: "za0.b" , Value: AArch64::ZAB0) |
2869 | .Case(S: "za0h.q" , Value: AArch64::ZAQ0) |
2870 | .Case(S: "za1h.q" , Value: AArch64::ZAQ1) |
2871 | .Case(S: "za2h.q" , Value: AArch64::ZAQ2) |
2872 | .Case(S: "za3h.q" , Value: AArch64::ZAQ3) |
2873 | .Case(S: "za4h.q" , Value: AArch64::ZAQ4) |
2874 | .Case(S: "za5h.q" , Value: AArch64::ZAQ5) |
2875 | .Case(S: "za6h.q" , Value: AArch64::ZAQ6) |
2876 | .Case(S: "za7h.q" , Value: AArch64::ZAQ7) |
2877 | .Case(S: "za8h.q" , Value: AArch64::ZAQ8) |
2878 | .Case(S: "za9h.q" , Value: AArch64::ZAQ9) |
2879 | .Case(S: "za10h.q" , Value: AArch64::ZAQ10) |
2880 | .Case(S: "za11h.q" , Value: AArch64::ZAQ11) |
2881 | .Case(S: "za12h.q" , Value: AArch64::ZAQ12) |
2882 | .Case(S: "za13h.q" , Value: AArch64::ZAQ13) |
2883 | .Case(S: "za14h.q" , Value: AArch64::ZAQ14) |
2884 | .Case(S: "za15h.q" , Value: AArch64::ZAQ15) |
2885 | .Case(S: "za0h.d" , Value: AArch64::ZAD0) |
2886 | .Case(S: "za1h.d" , Value: AArch64::ZAD1) |
2887 | .Case(S: "za2h.d" , Value: AArch64::ZAD2) |
2888 | .Case(S: "za3h.d" , Value: AArch64::ZAD3) |
2889 | .Case(S: "za4h.d" , Value: AArch64::ZAD4) |
2890 | .Case(S: "za5h.d" , Value: AArch64::ZAD5) |
2891 | .Case(S: "za6h.d" , Value: AArch64::ZAD6) |
2892 | .Case(S: "za7h.d" , Value: AArch64::ZAD7) |
2893 | .Case(S: "za0h.s" , Value: AArch64::ZAS0) |
2894 | .Case(S: "za1h.s" , Value: AArch64::ZAS1) |
2895 | .Case(S: "za2h.s" , Value: AArch64::ZAS2) |
2896 | .Case(S: "za3h.s" , Value: AArch64::ZAS3) |
2897 | .Case(S: "za0h.h" , Value: AArch64::ZAH0) |
2898 | .Case(S: "za1h.h" , Value: AArch64::ZAH1) |
2899 | .Case(S: "za0h.b" , Value: AArch64::ZAB0) |
2900 | .Case(S: "za0v.q" , Value: AArch64::ZAQ0) |
2901 | .Case(S: "za1v.q" , Value: AArch64::ZAQ1) |
2902 | .Case(S: "za2v.q" , Value: AArch64::ZAQ2) |
2903 | .Case(S: "za3v.q" , Value: AArch64::ZAQ3) |
2904 | .Case(S: "za4v.q" , Value: AArch64::ZAQ4) |
2905 | .Case(S: "za5v.q" , Value: AArch64::ZAQ5) |
2906 | .Case(S: "za6v.q" , Value: AArch64::ZAQ6) |
2907 | .Case(S: "za7v.q" , Value: AArch64::ZAQ7) |
2908 | .Case(S: "za8v.q" , Value: AArch64::ZAQ8) |
2909 | .Case(S: "za9v.q" , Value: AArch64::ZAQ9) |
2910 | .Case(S: "za10v.q" , Value: AArch64::ZAQ10) |
2911 | .Case(S: "za11v.q" , Value: AArch64::ZAQ11) |
2912 | .Case(S: "za12v.q" , Value: AArch64::ZAQ12) |
2913 | .Case(S: "za13v.q" , Value: AArch64::ZAQ13) |
2914 | .Case(S: "za14v.q" , Value: AArch64::ZAQ14) |
2915 | .Case(S: "za15v.q" , Value: AArch64::ZAQ15) |
2916 | .Case(S: "za0v.d" , Value: AArch64::ZAD0) |
2917 | .Case(S: "za1v.d" , Value: AArch64::ZAD1) |
2918 | .Case(S: "za2v.d" , Value: AArch64::ZAD2) |
2919 | .Case(S: "za3v.d" , Value: AArch64::ZAD3) |
2920 | .Case(S: "za4v.d" , Value: AArch64::ZAD4) |
2921 | .Case(S: "za5v.d" , Value: AArch64::ZAD5) |
2922 | .Case(S: "za6v.d" , Value: AArch64::ZAD6) |
2923 | .Case(S: "za7v.d" , Value: AArch64::ZAD7) |
2924 | .Case(S: "za0v.s" , Value: AArch64::ZAS0) |
2925 | .Case(S: "za1v.s" , Value: AArch64::ZAS1) |
2926 | .Case(S: "za2v.s" , Value: AArch64::ZAS2) |
2927 | .Case(S: "za3v.s" , Value: AArch64::ZAS3) |
2928 | .Case(S: "za0v.h" , Value: AArch64::ZAH0) |
2929 | .Case(S: "za1v.h" , Value: AArch64::ZAH1) |
2930 | .Case(S: "za0v.b" , Value: AArch64::ZAB0) |
2931 | .Default(Value: 0); |
2932 | } |
2933 | |
2934 | bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc, |
2935 | SMLoc &EndLoc) { |
2936 | return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess(); |
2937 | } |
2938 | |
2939 | ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc, |
2940 | SMLoc &EndLoc) { |
2941 | StartLoc = getLoc(); |
2942 | ParseStatus Res = tryParseScalarRegister(Reg); |
2943 | EndLoc = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1); |
2944 | return Res; |
2945 | } |
2946 | |
2947 | // Matches a register name or register alias previously defined by '.req' |
2948 | unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name, |
2949 | RegKind Kind) { |
2950 | unsigned RegNum = 0; |
2951 | if ((RegNum = matchSVEDataVectorRegName(Name))) |
2952 | return Kind == RegKind::SVEDataVector ? RegNum : 0; |
2953 | |
2954 | if ((RegNum = matchSVEPredicateVectorRegName(Name))) |
2955 | return Kind == RegKind::SVEPredicateVector ? RegNum : 0; |
2956 | |
2957 | if ((RegNum = matchSVEPredicateAsCounterRegName(Name))) |
2958 | return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0; |
2959 | |
2960 | if ((RegNum = MatchNeonVectorRegName(Name))) |
2961 | return Kind == RegKind::NeonVector ? RegNum : 0; |
2962 | |
2963 | if ((RegNum = matchMatrixRegName(Name))) |
2964 | return Kind == RegKind::Matrix ? RegNum : 0; |
2965 | |
2966 | if (Name.equals_insensitive(RHS: "zt0" )) |
2967 | return Kind == RegKind::LookupTable ? AArch64::ZT0 : 0; |
2968 | |
2969 | // The parsed register must be of RegKind Scalar |
2970 | if ((RegNum = MatchRegisterName(Name))) |
2971 | return (Kind == RegKind::Scalar) ? RegNum : 0; |
2972 | |
2973 | if (!RegNum) { |
2974 | // Handle a few common aliases of registers. |
2975 | if (auto RegNum = StringSwitch<unsigned>(Name.lower()) |
2976 | .Case(S: "fp" , Value: AArch64::FP) |
2977 | .Case(S: "lr" , Value: AArch64::LR) |
2978 | .Case(S: "x31" , Value: AArch64::XZR) |
2979 | .Case(S: "w31" , Value: AArch64::WZR) |
2980 | .Default(Value: 0)) |
2981 | return Kind == RegKind::Scalar ? RegNum : 0; |
2982 | |
2983 | // Check for aliases registered via .req. Canonicalize to lower case. |
2984 | // That's more consistent since register names are case insensitive, and |
2985 | // it's how the original entry was passed in from MC/MCParser/AsmParser. |
2986 | auto Entry = RegisterReqs.find(Key: Name.lower()); |
2987 | if (Entry == RegisterReqs.end()) |
2988 | return 0; |
2989 | |
2990 | // set RegNum if the match is the right kind of register |
2991 | if (Kind == Entry->getValue().first) |
2992 | RegNum = Entry->getValue().second; |
2993 | } |
2994 | return RegNum; |
2995 | } |
2996 | |
2997 | unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) { |
2998 | switch (K) { |
2999 | case RegKind::Scalar: |
3000 | case RegKind::NeonVector: |
3001 | case RegKind::SVEDataVector: |
3002 | return 32; |
3003 | case RegKind::Matrix: |
3004 | case RegKind::SVEPredicateVector: |
3005 | case RegKind::SVEPredicateAsCounter: |
3006 | return 16; |
3007 | case RegKind::LookupTable: |
3008 | return 1; |
3009 | } |
3010 | llvm_unreachable("Unsupported RegKind" ); |
3011 | } |
3012 | |
3013 | /// tryParseScalarRegister - Try to parse a register name. The token must be an |
3014 | /// Identifier when called, and if it is a register name the token is eaten and |
3015 | /// the register is added to the operand list. |
3016 | ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) { |
3017 | const AsmToken &Tok = getTok(); |
3018 | if (Tok.isNot(K: AsmToken::Identifier)) |
3019 | return ParseStatus::NoMatch; |
3020 | |
3021 | std::string lowerCase = Tok.getString().lower(); |
3022 | unsigned Reg = matchRegisterNameAlias(Name: lowerCase, Kind: RegKind::Scalar); |
3023 | if (Reg == 0) |
3024 | return ParseStatus::NoMatch; |
3025 | |
3026 | RegNum = Reg; |
3027 | Lex(); // Eat identifier token. |
3028 | return ParseStatus::Success; |
3029 | } |
3030 | |
3031 | /// tryParseSysCROperand - Try to parse a system instruction CR operand name. |
3032 | ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) { |
3033 | SMLoc S = getLoc(); |
3034 | |
3035 | if (getTok().isNot(K: AsmToken::Identifier)) |
3036 | return Error(L: S, Msg: "Expected cN operand where 0 <= N <= 15" ); |
3037 | |
3038 | StringRef Tok = getTok().getIdentifier(); |
3039 | if (Tok[0] != 'c' && Tok[0] != 'C') |
3040 | return Error(L: S, Msg: "Expected cN operand where 0 <= N <= 15" ); |
3041 | |
3042 | uint32_t CRNum; |
3043 | bool BadNum = Tok.drop_front().getAsInteger(Radix: 10, Result&: CRNum); |
3044 | if (BadNum || CRNum > 15) |
3045 | return Error(L: S, Msg: "Expected cN operand where 0 <= N <= 15" ); |
3046 | |
3047 | Lex(); // Eat identifier token. |
3048 | Operands.push_back( |
3049 | Elt: AArch64Operand::CreateSysCR(Val: CRNum, S, E: getLoc(), Ctx&: getContext())); |
3050 | return ParseStatus::Success; |
3051 | } |
3052 | |
3053 | // Either an identifier for named values or a 6-bit immediate. |
3054 | ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) { |
3055 | SMLoc S = getLoc(); |
3056 | const AsmToken &Tok = getTok(); |
3057 | |
3058 | unsigned MaxVal = 63; |
3059 | |
3060 | // Immediate case, with optional leading hash: |
3061 | if (parseOptionalToken(T: AsmToken::Hash) || |
3062 | Tok.is(K: AsmToken::Integer)) { |
3063 | const MCExpr *ImmVal; |
3064 | if (getParser().parseExpression(Res&: ImmVal)) |
3065 | return ParseStatus::Failure; |
3066 | |
3067 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal); |
3068 | if (!MCE) |
3069 | return TokError(Msg: "immediate value expected for prefetch operand" ); |
3070 | unsigned prfop = MCE->getValue(); |
3071 | if (prfop > MaxVal) |
3072 | return TokError(Msg: "prefetch operand out of range, [0," + utostr(X: MaxVal) + |
3073 | "] expected" ); |
3074 | |
3075 | auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(Encoding: MCE->getValue()); |
3076 | Operands.push_back(Elt: AArch64Operand::CreatePrefetch( |
3077 | Val: prfop, Str: RPRFM ? RPRFM->Name : "" , S, Ctx&: getContext())); |
3078 | return ParseStatus::Success; |
3079 | } |
3080 | |
3081 | if (Tok.isNot(K: AsmToken::Identifier)) |
3082 | return TokError(Msg: "prefetch hint expected" ); |
3083 | |
3084 | auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Name: Tok.getString()); |
3085 | if (!RPRFM) |
3086 | return TokError(Msg: "prefetch hint expected" ); |
3087 | |
3088 | Operands.push_back(Elt: AArch64Operand::CreatePrefetch( |
3089 | Val: RPRFM->Encoding, Str: Tok.getString(), S, Ctx&: getContext())); |
3090 | Lex(); // Eat identifier token. |
3091 | return ParseStatus::Success; |
3092 | } |
3093 | |
3094 | /// tryParsePrefetch - Try to parse a prefetch operand. |
3095 | template <bool IsSVEPrefetch> |
3096 | ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) { |
3097 | SMLoc S = getLoc(); |
3098 | const AsmToken &Tok = getTok(); |
3099 | |
3100 | auto LookupByName = [](StringRef N) { |
3101 | if (IsSVEPrefetch) { |
3102 | if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(Name: N)) |
3103 | return std::optional<unsigned>(Res->Encoding); |
3104 | } else if (auto Res = AArch64PRFM::lookupPRFMByName(Name: N)) |
3105 | return std::optional<unsigned>(Res->Encoding); |
3106 | return std::optional<unsigned>(); |
3107 | }; |
3108 | |
3109 | auto LookupByEncoding = [](unsigned E) { |
3110 | if (IsSVEPrefetch) { |
3111 | if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(Encoding: E)) |
3112 | return std::optional<StringRef>(Res->Name); |
3113 | } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(Encoding: E)) |
3114 | return std::optional<StringRef>(Res->Name); |
3115 | return std::optional<StringRef>(); |
3116 | }; |
3117 | unsigned MaxVal = IsSVEPrefetch ? 15 : 31; |
3118 | |
3119 | // Either an identifier for named values or a 5-bit immediate. |
3120 | // Eat optional hash. |
3121 | if (parseOptionalToken(T: AsmToken::Hash) || |
3122 | Tok.is(K: AsmToken::Integer)) { |
3123 | const MCExpr *ImmVal; |
3124 | if (getParser().parseExpression(Res&: ImmVal)) |
3125 | return ParseStatus::Failure; |
3126 | |
3127 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal); |
3128 | if (!MCE) |
3129 | return TokError(Msg: "immediate value expected for prefetch operand" ); |
3130 | unsigned prfop = MCE->getValue(); |
3131 | if (prfop > MaxVal) |
3132 | return TokError(Msg: "prefetch operand out of range, [0," + utostr(X: MaxVal) + |
3133 | "] expected" ); |
3134 | |
3135 | auto PRFM = LookupByEncoding(MCE->getValue()); |
3136 | Operands.push_back(AArch64Operand::CreatePrefetch(Val: prfop, Str: PRFM.value_or("" ), |
3137 | S, Ctx&: getContext())); |
3138 | return ParseStatus::Success; |
3139 | } |
3140 | |
3141 | if (Tok.isNot(K: AsmToken::Identifier)) |
3142 | return TokError(Msg: "prefetch hint expected" ); |
3143 | |
3144 | auto PRFM = LookupByName(Tok.getString()); |
3145 | if (!PRFM) |
3146 | return TokError(Msg: "prefetch hint expected" ); |
3147 | |
3148 | Operands.push_back(AArch64Operand::CreatePrefetch( |
3149 | Val: *PRFM, Str: Tok.getString(), S, Ctx&: getContext())); |
3150 | Lex(); // Eat identifier token. |
3151 | return ParseStatus::Success; |
3152 | } |
3153 | |
3154 | /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command |
3155 | ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) { |
3156 | SMLoc S = getLoc(); |
3157 | const AsmToken &Tok = getTok(); |
3158 | if (Tok.isNot(K: AsmToken::Identifier)) |
3159 | return TokError(Msg: "invalid operand for instruction" ); |
3160 | |
3161 | auto PSB = AArch64PSBHint::lookupPSBByName(Name: Tok.getString()); |
3162 | if (!PSB) |
3163 | return TokError(Msg: "invalid operand for instruction" ); |
3164 | |
3165 | Operands.push_back(Elt: AArch64Operand::CreatePSBHint( |
3166 | Val: PSB->Encoding, Str: Tok.getString(), S, Ctx&: getContext())); |
3167 | Lex(); // Eat identifier token. |
3168 | return ParseStatus::Success; |
3169 | } |
3170 | |
3171 | ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) { |
3172 | SMLoc StartLoc = getLoc(); |
3173 | |
3174 | MCRegister RegNum; |
3175 | |
3176 | // The case where xzr, xzr is not present is handled by an InstAlias. |
3177 | |
3178 | auto RegTok = getTok(); // in case we need to backtrack |
3179 | if (!tryParseScalarRegister(RegNum).isSuccess()) |
3180 | return ParseStatus::NoMatch; |
3181 | |
3182 | if (RegNum != AArch64::XZR) { |
3183 | getLexer().UnLex(Token: RegTok); |
3184 | return ParseStatus::NoMatch; |
3185 | } |
3186 | |
3187 | if (parseComma()) |
3188 | return ParseStatus::Failure; |
3189 | |
3190 | if (!tryParseScalarRegister(RegNum).isSuccess()) |
3191 | return TokError(Msg: "expected register operand" ); |
3192 | |
3193 | if (RegNum != AArch64::XZR) |
3194 | return TokError(Msg: "xzr must be followed by xzr" ); |
3195 | |
3196 | // We need to push something, since we claim this is an operand in .td. |
3197 | // See also AArch64AsmParser::parseKeywordOperand. |
3198 | Operands.push_back(Elt: AArch64Operand::CreateReg( |
3199 | RegNum, Kind: RegKind::Scalar, S: StartLoc, E: getLoc(), Ctx&: getContext())); |
3200 | |
3201 | return ParseStatus::Success; |
3202 | } |
3203 | |
3204 | /// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command |
3205 | ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) { |
3206 | SMLoc S = getLoc(); |
3207 | const AsmToken &Tok = getTok(); |
3208 | if (Tok.isNot(K: AsmToken::Identifier)) |
3209 | return TokError(Msg: "invalid operand for instruction" ); |
3210 | |
3211 | auto BTI = AArch64BTIHint::lookupBTIByName(Name: Tok.getString()); |
3212 | if (!BTI) |
3213 | return TokError(Msg: "invalid operand for instruction" ); |
3214 | |
3215 | Operands.push_back(Elt: AArch64Operand::CreateBTIHint( |
3216 | Val: BTI->Encoding, Str: Tok.getString(), S, Ctx&: getContext())); |
3217 | Lex(); // Eat identifier token. |
3218 | return ParseStatus::Success; |
3219 | } |
3220 | |
3221 | /// tryParseAdrpLabel - Parse and validate a source label for the ADRP |
3222 | /// instruction. |
3223 | ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) { |
3224 | SMLoc S = getLoc(); |
3225 | const MCExpr *Expr = nullptr; |
3226 | |
3227 | if (getTok().is(K: AsmToken::Hash)) { |
3228 | Lex(); // Eat hash token. |
3229 | } |
3230 | |
3231 | if (parseSymbolicImmVal(ImmVal&: Expr)) |
3232 | return ParseStatus::Failure; |
3233 | |
3234 | AArch64MCExpr::VariantKind ELFRefKind; |
3235 | MCSymbolRefExpr::VariantKind DarwinRefKind; |
3236 | int64_t Addend; |
3237 | if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) { |
3238 | if (DarwinRefKind == MCSymbolRefExpr::VK_None && |
3239 | ELFRefKind == AArch64MCExpr::VK_INVALID) { |
3240 | // No modifier was specified at all; this is the syntax for an ELF basic |
3241 | // ADRP relocation (unfortunately). |
3242 | Expr = |
3243 | AArch64MCExpr::create(Expr, Kind: AArch64MCExpr::VK_ABS_PAGE, Ctx&: getContext()); |
3244 | } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE || |
3245 | DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) && |
3246 | Addend != 0) { |
3247 | return Error(L: S, Msg: "gotpage label reference not allowed an addend" ); |
3248 | } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE && |
3249 | DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE && |
3250 | DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE && |
3251 | ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC && |
3252 | ELFRefKind != AArch64MCExpr::VK_GOT_PAGE && |
3253 | ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 && |
3254 | ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE && |
3255 | ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) { |
3256 | // The operand must be an @page or @gotpage qualified symbolref. |
3257 | return Error(L: S, Msg: "page or gotpage label reference expected" ); |
3258 | } |
3259 | } |
3260 | |
3261 | // We have either a label reference possibly with addend or an immediate. The |
3262 | // addend is a raw value here. The linker will adjust it to only reference the |
3263 | // page. |
3264 | SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1); |
3265 | Operands.push_back(Elt: AArch64Operand::CreateImm(Val: Expr, S, E, Ctx&: getContext())); |
3266 | |
3267 | return ParseStatus::Success; |
3268 | } |
3269 | |
3270 | /// tryParseAdrLabel - Parse and validate a source label for the ADR |
3271 | /// instruction. |
3272 | ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) { |
3273 | SMLoc S = getLoc(); |
3274 | const MCExpr *Expr = nullptr; |
3275 | |
3276 | // Leave anything with a bracket to the default for SVE |
3277 | if (getTok().is(K: AsmToken::LBrac)) |
3278 | return ParseStatus::NoMatch; |
3279 | |
3280 | if (getTok().is(K: AsmToken::Hash)) |
3281 | Lex(); // Eat hash token. |
3282 | |
3283 | if (parseSymbolicImmVal(ImmVal&: Expr)) |
3284 | return ParseStatus::Failure; |
3285 | |
3286 | AArch64MCExpr::VariantKind ELFRefKind; |
3287 | MCSymbolRefExpr::VariantKind DarwinRefKind; |
3288 | int64_t Addend; |
3289 | if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) { |
3290 | if (DarwinRefKind == MCSymbolRefExpr::VK_None && |
3291 | ELFRefKind == AArch64MCExpr::VK_INVALID) { |
3292 | // No modifier was specified at all; this is the syntax for an ELF basic |
3293 | // ADR relocation (unfortunately). |
3294 | Expr = AArch64MCExpr::create(Expr, Kind: AArch64MCExpr::VK_ABS, Ctx&: getContext()); |
3295 | } else { |
3296 | return Error(L: S, Msg: "unexpected adr label" ); |
3297 | } |
3298 | } |
3299 | |
3300 | SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1); |
3301 | Operands.push_back(Elt: AArch64Operand::CreateImm(Val: Expr, S, E, Ctx&: getContext())); |
3302 | return ParseStatus::Success; |
3303 | } |
3304 | |
3305 | /// tryParseFPImm - A floating point immediate expression operand. |
3306 | template <bool AddFPZeroAsLiteral> |
3307 | ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) { |
3308 | SMLoc S = getLoc(); |
3309 | |
3310 | bool Hash = parseOptionalToken(T: AsmToken::Hash); |
3311 | |
3312 | // Handle negation, as that still comes through as a separate token. |
3313 | bool isNegative = parseOptionalToken(T: AsmToken::Minus); |
3314 | |
3315 | const AsmToken &Tok = getTok(); |
3316 | if (!Tok.is(K: AsmToken::Real) && !Tok.is(K: AsmToken::Integer)) { |
3317 | if (!Hash) |
3318 | return ParseStatus::NoMatch; |
3319 | return TokError(Msg: "invalid floating point immediate" ); |
3320 | } |
3321 | |
3322 | // Parse hexadecimal representation. |
3323 | if (Tok.is(K: AsmToken::Integer) && Tok.getString().starts_with(Prefix: "0x" )) { |
3324 | if (Tok.getIntVal() > 255 || isNegative) |
3325 | return TokError(Msg: "encoded floating point value out of range" ); |
3326 | |
3327 | APFloat F((double)AArch64_AM::getFPImmFloat(Imm: Tok.getIntVal())); |
3328 | Operands.push_back( |
3329 | Elt: AArch64Operand::CreateFPImm(Val: F, IsExact: true, S, Ctx&: getContext())); |
3330 | } else { |
3331 | // Parse FP representation. |
3332 | APFloat RealVal(APFloat::IEEEdouble()); |
3333 | auto StatusOrErr = |
3334 | RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero); |
3335 | if (errorToBool(Err: StatusOrErr.takeError())) |
3336 | return TokError(Msg: "invalid floating point representation" ); |
3337 | |
3338 | if (isNegative) |
3339 | RealVal.changeSign(); |
3340 | |
3341 | if (AddFPZeroAsLiteral && RealVal.isPosZero()) { |
3342 | Operands.push_back(Elt: AArch64Operand::CreateToken(Str: "#0" , S, Ctx&: getContext())); |
3343 | Operands.push_back(Elt: AArch64Operand::CreateToken(Str: ".0" , S, Ctx&: getContext())); |
3344 | } else |
3345 | Operands.push_back(Elt: AArch64Operand::CreateFPImm( |
3346 | Val: RealVal, IsExact: *StatusOrErr == APFloat::opOK, S, Ctx&: getContext())); |
3347 | } |
3348 | |
3349 | Lex(); // Eat the token. |
3350 | |
3351 | return ParseStatus::Success; |
3352 | } |
3353 | |
3354 | /// tryParseImmWithOptionalShift - Parse immediate operand, optionally with |
3355 | /// a shift suffix, for example '#1, lsl #12'. |
3356 | ParseStatus |
3357 | AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) { |
3358 | SMLoc S = getLoc(); |
3359 | |
3360 | if (getTok().is(K: AsmToken::Hash)) |
3361 | Lex(); // Eat '#' |
3362 | else if (getTok().isNot(K: AsmToken::Integer)) |
3363 | // Operand should start from # or should be integer, emit error otherwise. |
3364 | return ParseStatus::NoMatch; |
3365 | |
3366 | if (getTok().is(K: AsmToken::Integer) && |
3367 | getLexer().peekTok().is(K: AsmToken::Colon)) |
3368 | return tryParseImmRange(Operands); |
3369 | |
3370 | const MCExpr *Imm = nullptr; |
3371 | if (parseSymbolicImmVal(ImmVal&: Imm)) |
3372 | return ParseStatus::Failure; |
3373 | else if (getTok().isNot(K: AsmToken::Comma)) { |
3374 | Operands.push_back( |
3375 | Elt: AArch64Operand::CreateImm(Val: Imm, S, E: getLoc(), Ctx&: getContext())); |
3376 | return ParseStatus::Success; |
3377 | } |
3378 | |
3379 | // Eat ',' |
3380 | Lex(); |
3381 | StringRef VecGroup; |
3382 | if (!parseOptionalVGOperand(Operands, VecGroup)) { |
3383 | Operands.push_back( |
3384 | Elt: AArch64Operand::CreateImm(Val: Imm, S, E: getLoc(), Ctx&: getContext())); |
3385 | Operands.push_back( |
3386 | Elt: AArch64Operand::CreateToken(Str: VecGroup, S: getLoc(), Ctx&: getContext())); |
3387 | return ParseStatus::Success; |
3388 | } |
3389 | |
3390 | // The optional operand must be "lsl #N" where N is non-negative. |
3391 | if (!getTok().is(K: AsmToken::Identifier) || |
3392 | !getTok().getIdentifier().equals_insensitive(RHS: "lsl" )) |
3393 | return Error(L: getLoc(), Msg: "only 'lsl #+N' valid after immediate" ); |
3394 | |
3395 | // Eat 'lsl' |
3396 | Lex(); |
3397 | |
3398 | parseOptionalToken(T: AsmToken::Hash); |
3399 | |
3400 | if (getTok().isNot(K: AsmToken::Integer)) |
3401 | return Error(L: getLoc(), Msg: "only 'lsl #+N' valid after immediate" ); |
3402 | |
3403 | int64_t ShiftAmount = getTok().getIntVal(); |
3404 | |
3405 | if (ShiftAmount < 0) |
3406 | return Error(L: getLoc(), Msg: "positive shift amount required" ); |
3407 | Lex(); // Eat the number |
3408 | |
3409 | // Just in case the optional lsl #0 is used for immediates other than zero. |
3410 | if (ShiftAmount == 0 && Imm != nullptr) { |
3411 | Operands.push_back( |
3412 | Elt: AArch64Operand::CreateImm(Val: Imm, S, E: getLoc(), Ctx&: getContext())); |
3413 | return ParseStatus::Success; |
3414 | } |
3415 | |
3416 | Operands.push_back(Elt: AArch64Operand::CreateShiftedImm(Val: Imm, ShiftAmount, S, |
3417 | E: getLoc(), Ctx&: getContext())); |
3418 | return ParseStatus::Success; |
3419 | } |
3420 | |
3421 | /// parseCondCodeString - Parse a Condition Code string, optionally returning a |
3422 | /// suggestion to help common typos. |
3423 | AArch64CC::CondCode |
3424 | AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) { |
3425 | AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower()) |
3426 | .Case(S: "eq" , Value: AArch64CC::EQ) |
3427 | .Case(S: "ne" , Value: AArch64CC::NE) |
3428 | .Case(S: "cs" , Value: AArch64CC::HS) |
3429 | .Case(S: "hs" , Value: AArch64CC::HS) |
3430 | .Case(S: "cc" , Value: AArch64CC::LO) |
3431 | .Case(S: "lo" , Value: AArch64CC::LO) |
3432 | .Case(S: "mi" , Value: AArch64CC::MI) |
3433 | .Case(S: "pl" , Value: AArch64CC::PL) |
3434 | .Case(S: "vs" , Value: AArch64CC::VS) |
3435 | .Case(S: "vc" , Value: AArch64CC::VC) |
3436 | .Case(S: "hi" , Value: AArch64CC::HI) |
3437 | .Case(S: "ls" , Value: AArch64CC::LS) |
3438 | .Case(S: "ge" , Value: AArch64CC::GE) |
3439 | .Case(S: "lt" , Value: AArch64CC::LT) |
3440 | .Case(S: "gt" , Value: AArch64CC::GT) |
3441 | .Case(S: "le" , Value: AArch64CC::LE) |
3442 | .Case(S: "al" , Value: AArch64CC::AL) |
3443 | .Case(S: "nv" , Value: AArch64CC::NV) |
3444 | .Default(Value: AArch64CC::Invalid); |
3445 | |
3446 | if (CC == AArch64CC::Invalid && getSTI().hasFeature(Feature: AArch64::FeatureSVE)) { |
3447 | CC = StringSwitch<AArch64CC::CondCode>(Cond.lower()) |
3448 | .Case(S: "none" , Value: AArch64CC::EQ) |
3449 | .Case(S: "any" , Value: AArch64CC::NE) |
3450 | .Case(S: "nlast" , Value: AArch64CC::HS) |
3451 | .Case(S: "last" , Value: AArch64CC::LO) |
3452 | .Case(S: "first" , Value: AArch64CC::MI) |
3453 | .Case(S: "nfrst" , Value: AArch64CC::PL) |
3454 | .Case(S: "pmore" , Value: AArch64CC::HI) |
3455 | .Case(S: "plast" , Value: AArch64CC::LS) |
3456 | .Case(S: "tcont" , Value: AArch64CC::GE) |
3457 | .Case(S: "tstop" , Value: AArch64CC::LT) |
3458 | .Default(Value: AArch64CC::Invalid); |
3459 | |
3460 | if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst" ) |
3461 | Suggestion = "nfrst" ; |
3462 | } |
3463 | return CC; |
3464 | } |
3465 | |
3466 | /// parseCondCode - Parse a Condition Code operand. |
3467 | bool AArch64AsmParser::parseCondCode(OperandVector &Operands, |
3468 | bool invertCondCode) { |
3469 | SMLoc S = getLoc(); |
3470 | const AsmToken &Tok = getTok(); |
3471 | assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier" ); |
3472 | |
3473 | StringRef Cond = Tok.getString(); |
3474 | std::string Suggestion; |
3475 | AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion); |
3476 | if (CC == AArch64CC::Invalid) { |
3477 | std::string Msg = "invalid condition code" ; |
3478 | if (!Suggestion.empty()) |
3479 | Msg += ", did you mean " + Suggestion + "?" ; |
3480 | return TokError(Msg); |
3481 | } |
3482 | Lex(); // Eat identifier token. |
3483 | |
3484 | if (invertCondCode) { |
3485 | if (CC == AArch64CC::AL || CC == AArch64CC::NV) |
3486 | return TokError(Msg: "condition codes AL and NV are invalid for this instruction" ); |
3487 | CC = AArch64CC::getInvertedCondCode(Code: AArch64CC::CondCode(CC)); |
3488 | } |
3489 | |
3490 | Operands.push_back( |
3491 | Elt: AArch64Operand::CreateCondCode(Code: CC, S, E: getLoc(), Ctx&: getContext())); |
3492 | return false; |
3493 | } |
3494 | |
3495 | ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) { |
3496 | const AsmToken &Tok = getTok(); |
3497 | SMLoc S = getLoc(); |
3498 | |
3499 | if (Tok.isNot(K: AsmToken::Identifier)) |
3500 | return TokError(Msg: "invalid operand for instruction" ); |
3501 | |
3502 | unsigned PStateImm = -1; |
3503 | const auto *SVCR = AArch64SVCR::lookupSVCRByName(Name: Tok.getString()); |
3504 | if (!SVCR) |
3505 | return ParseStatus::NoMatch; |
3506 | if (SVCR->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) |
3507 | PStateImm = SVCR->Encoding; |
3508 | |
3509 | Operands.push_back( |
3510 | Elt: AArch64Operand::CreateSVCR(PStateField: PStateImm, Str: Tok.getString(), S, Ctx&: getContext())); |
3511 | Lex(); // Eat identifier token. |
3512 | return ParseStatus::Success; |
3513 | } |
3514 | |
3515 | ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) { |
3516 | const AsmToken &Tok = getTok(); |
3517 | SMLoc S = getLoc(); |
3518 | |
3519 | StringRef Name = Tok.getString(); |
3520 | |
3521 | if (Name.equals_insensitive(RHS: "za" ) || Name.starts_with_insensitive(Prefix: "za." )) { |
3522 | Lex(); // eat "za[.(b|h|s|d)]" |
3523 | unsigned ElementWidth = 0; |
3524 | auto DotPosition = Name.find(C: '.'); |
3525 | if (DotPosition != StringRef::npos) { |
3526 | const auto &KindRes = |
3527 | parseVectorKind(Suffix: Name.drop_front(N: DotPosition), VectorKind: RegKind::Matrix); |
3528 | if (!KindRes) |
3529 | return TokError( |
3530 | Msg: "Expected the register to be followed by element width suffix" ); |
3531 | ElementWidth = KindRes->second; |
3532 | } |
3533 | Operands.push_back(Elt: AArch64Operand::CreateMatrixRegister( |
3534 | RegNum: AArch64::ZA, ElementWidth, Kind: MatrixKind::Array, S, E: getLoc(), |
3535 | Ctx&: getContext())); |
3536 | if (getLexer().is(K: AsmToken::LBrac)) { |
3537 | // There's no comma after matrix operand, so we can parse the next operand |
3538 | // immediately. |
3539 | if (parseOperand(Operands, isCondCode: false, invertCondCode: false)) |
3540 | return ParseStatus::NoMatch; |
3541 | } |
3542 | return ParseStatus::Success; |
3543 | } |
3544 | |
3545 | // Try to parse matrix register. |
3546 | unsigned Reg = matchRegisterNameAlias(Name, Kind: RegKind::Matrix); |
3547 | if (!Reg) |
3548 | return ParseStatus::NoMatch; |
3549 | |
3550 | size_t DotPosition = Name.find(C: '.'); |
3551 | assert(DotPosition != StringRef::npos && "Unexpected register" ); |
3552 | |
3553 | StringRef Head = Name.take_front(N: DotPosition); |
3554 | StringRef Tail = Name.drop_front(N: DotPosition); |
3555 | StringRef RowOrColumn = Head.take_back(); |
3556 | |
3557 | MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower()) |
3558 | .Case(S: "h" , Value: MatrixKind::Row) |
3559 | .Case(S: "v" , Value: MatrixKind::Col) |
3560 | .Default(Value: MatrixKind::Tile); |
3561 | |
3562 | // Next up, parsing the suffix |
3563 | const auto &KindRes = parseVectorKind(Suffix: Tail, VectorKind: RegKind::Matrix); |
3564 | if (!KindRes) |
3565 | return TokError( |
3566 | Msg: "Expected the register to be followed by element width suffix" ); |
3567 | unsigned ElementWidth = KindRes->second; |
3568 | |
3569 | Lex(); |
3570 | |
3571 | Operands.push_back(Elt: AArch64Operand::CreateMatrixRegister( |
3572 | RegNum: Reg, ElementWidth, Kind, S, E: getLoc(), Ctx&: getContext())); |
3573 | |
3574 | if (getLexer().is(K: AsmToken::LBrac)) { |
3575 | // There's no comma after matrix operand, so we can parse the next operand |
3576 | // immediately. |
3577 | if (parseOperand(Operands, isCondCode: false, invertCondCode: false)) |
3578 | return ParseStatus::NoMatch; |
3579 | } |
3580 | return ParseStatus::Success; |
3581 | } |
3582 | |
3583 | /// tryParseOptionalShift - Some operands take an optional shift argument. Parse |
3584 | /// them if present. |
3585 | ParseStatus |
3586 | AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) { |
3587 | const AsmToken &Tok = getTok(); |
3588 | std::string LowerID = Tok.getString().lower(); |
3589 | AArch64_AM::ShiftExtendType ShOp = |
3590 | StringSwitch<AArch64_AM::ShiftExtendType>(LowerID) |
3591 | .Case(S: "lsl" , Value: AArch64_AM::LSL) |
3592 | .Case(S: "lsr" , Value: AArch64_AM::LSR) |
3593 | .Case(S: "asr" , Value: AArch64_AM::ASR) |
3594 | .Case(S: "ror" , Value: AArch64_AM::ROR) |
3595 | .Case(S: "msl" , Value: AArch64_AM::MSL) |
3596 | .Case(S: "uxtb" , Value: AArch64_AM::UXTB) |
3597 | .Case(S: "uxth" , Value: AArch64_AM::UXTH) |
3598 | .Case(S: "uxtw" , Value: AArch64_AM::UXTW) |
3599 | .Case(S: "uxtx" , Value: AArch64_AM::UXTX) |
3600 | .Case(S: "sxtb" , Value: AArch64_AM::SXTB) |
3601 | .Case(S: "sxth" , Value: AArch64_AM::SXTH) |
3602 | .Case(S: "sxtw" , Value: AArch64_AM::SXTW) |
3603 | .Case(S: "sxtx" , Value: AArch64_AM::SXTX) |
3604 | .Default(Value: AArch64_AM::InvalidShiftExtend); |
3605 | |
3606 | if (ShOp == AArch64_AM::InvalidShiftExtend) |
3607 | return ParseStatus::NoMatch; |
3608 | |
3609 | SMLoc S = Tok.getLoc(); |
3610 | Lex(); |
3611 | |
3612 | bool Hash = parseOptionalToken(T: AsmToken::Hash); |
3613 | |
3614 | if (!Hash && getLexer().isNot(K: AsmToken::Integer)) { |
3615 | if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR || |
3616 | ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR || |
3617 | ShOp == AArch64_AM::MSL) { |
3618 | // We expect a number here. |
3619 | return TokError(Msg: "expected #imm after shift specifier" ); |
3620 | } |
3621 | |
3622 | // "extend" type operations don't need an immediate, #0 is implicit. |
3623 | SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1); |
3624 | Operands.push_back( |
3625 | Elt: AArch64Operand::CreateShiftExtend(ShOp, Val: 0, HasExplicitAmount: false, S, E, Ctx&: getContext())); |
3626 | return ParseStatus::Success; |
3627 | } |
3628 | |
3629 | // Make sure we do actually have a number, identifier or a parenthesized |
3630 | // expression. |
3631 | SMLoc E = getLoc(); |
3632 | if (!getTok().is(K: AsmToken::Integer) && !getTok().is(K: AsmToken::LParen) && |
3633 | !getTok().is(K: AsmToken::Identifier)) |
3634 | return Error(L: E, Msg: "expected integer shift amount" ); |
3635 | |
3636 | const MCExpr *ImmVal; |
3637 | if (getParser().parseExpression(Res&: ImmVal)) |
3638 | return ParseStatus::Failure; |
3639 | |
3640 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal); |
3641 | if (!MCE) |
3642 | return Error(L: E, Msg: "expected constant '#imm' after shift specifier" ); |
3643 | |
3644 | E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1); |
3645 | Operands.push_back(Elt: AArch64Operand::CreateShiftExtend( |
3646 | ShOp, Val: MCE->getValue(), HasExplicitAmount: true, S, E, Ctx&: getContext())); |
3647 | return ParseStatus::Success; |
3648 | } |
3649 | |
3650 | static const struct Extension { |
3651 | const char *Name; |
3652 | const FeatureBitset Features; |
3653 | } ExtensionMap[] = { |
3654 | {.Name: "crc" , .Features: {AArch64::FeatureCRC}}, |
3655 | {.Name: "sm4" , .Features: {AArch64::FeatureSM4}}, |
3656 | {.Name: "sha3" , .Features: {AArch64::FeatureSHA3}}, |
3657 | {.Name: "sha2" , .Features: {AArch64::FeatureSHA2}}, |
3658 | {.Name: "aes" , .Features: {AArch64::FeatureAES}}, |
3659 | {.Name: "crypto" , .Features: {AArch64::FeatureCrypto}}, |
3660 | {.Name: "fp" , .Features: {AArch64::FeatureFPARMv8}}, |
3661 | {.Name: "simd" , .Features: {AArch64::FeatureNEON}}, |
3662 | {.Name: "ras" , .Features: {AArch64::FeatureRAS}}, |
3663 | {.Name: "rasv2" , .Features: {AArch64::FeatureRASv2}}, |
3664 | {.Name: "lse" , .Features: {AArch64::FeatureLSE}}, |
3665 | {.Name: "predres" , .Features: {AArch64::FeaturePredRes}}, |
3666 | {.Name: "predres2" , .Features: {AArch64::FeatureSPECRES2}}, |
3667 | {.Name: "ccdp" , .Features: {AArch64::FeatureCacheDeepPersist}}, |
3668 | {.Name: "mte" , .Features: {AArch64::FeatureMTE}}, |
3669 | {.Name: "memtag" , .Features: {AArch64::FeatureMTE}}, |
3670 | {.Name: "tlb-rmi" , .Features: {AArch64::FeatureTLB_RMI}}, |
3671 | {.Name: "pan" , .Features: {AArch64::FeaturePAN}}, |
3672 | {.Name: "pan-rwv" , .Features: {AArch64::FeaturePAN_RWV}}, |
3673 | {.Name: "ccpp" , .Features: {AArch64::FeatureCCPP}}, |
3674 | {.Name: "rcpc" , .Features: {AArch64::FeatureRCPC}}, |
3675 | {.Name: "rng" , .Features: {AArch64::FeatureRandGen}}, |
3676 | {.Name: "sve" , .Features: {AArch64::FeatureSVE}}, |
3677 | {.Name: "sve2" , .Features: {AArch64::FeatureSVE2}}, |
3678 | {.Name: "sve2-aes" , .Features: {AArch64::FeatureSVE2AES}}, |
3679 | {.Name: "sve2-sm4" , .Features: {AArch64::FeatureSVE2SM4}}, |
3680 | {.Name: "sve2-sha3" , .Features: {AArch64::FeatureSVE2SHA3}}, |
3681 | {.Name: "sve2-bitperm" , .Features: {AArch64::FeatureSVE2BitPerm}}, |
3682 | {.Name: "sve2p1" , .Features: {AArch64::FeatureSVE2p1}}, |
3683 | {.Name: "b16b16" , .Features: {AArch64::FeatureB16B16}}, |
3684 | {.Name: "ls64" , .Features: {AArch64::FeatureLS64}}, |
3685 | {.Name: "xs" , .Features: {AArch64::FeatureXS}}, |
3686 | {.Name: "pauth" , .Features: {AArch64::FeaturePAuth}}, |
3687 | {.Name: "flagm" , .Features: {AArch64::FeatureFlagM}}, |
3688 | {.Name: "rme" , .Features: {AArch64::FeatureRME}}, |
3689 | {.Name: "sme" , .Features: {AArch64::FeatureSME}}, |
3690 | {.Name: "sme-f64f64" , .Features: {AArch64::FeatureSMEF64F64}}, |
3691 | {.Name: "sme-f16f16" , .Features: {AArch64::FeatureSMEF16F16}}, |
3692 | {.Name: "sme-i16i64" , .Features: {AArch64::FeatureSMEI16I64}}, |
3693 | {.Name: "sme2" , .Features: {AArch64::FeatureSME2}}, |
3694 | {.Name: "sme2p1" , .Features: {AArch64::FeatureSME2p1}}, |
3695 | {.Name: "hbc" , .Features: {AArch64::FeatureHBC}}, |
3696 | {.Name: "mops" , .Features: {AArch64::FeatureMOPS}}, |
3697 | {.Name: "mec" , .Features: {AArch64::FeatureMEC}}, |
3698 | {.Name: "the" , .Features: {AArch64::FeatureTHE}}, |
3699 | {.Name: "d128" , .Features: {AArch64::FeatureD128}}, |
3700 | {.Name: "lse128" , .Features: {AArch64::FeatureLSE128}}, |
3701 | {.Name: "ite" , .Features: {AArch64::FeatureITE}}, |
3702 | {.Name: "cssc" , .Features: {AArch64::FeatureCSSC}}, |
3703 | {.Name: "rcpc3" , .Features: {AArch64::FeatureRCPC3}}, |
3704 | {.Name: "gcs" , .Features: {AArch64::FeatureGCS}}, |
3705 | {.Name: "bf16" , .Features: {AArch64::FeatureBF16}}, |
3706 | {.Name: "compnum" , .Features: {AArch64::FeatureComplxNum}}, |
3707 | {.Name: "dotprod" , .Features: {AArch64::FeatureDotProd}}, |
3708 | {.Name: "f32mm" , .Features: {AArch64::FeatureMatMulFP32}}, |
3709 | {.Name: "f64mm" , .Features: {AArch64::FeatureMatMulFP64}}, |
3710 | {.Name: "fp16" , .Features: {AArch64::FeatureFullFP16}}, |
3711 | {.Name: "fp16fml" , .Features: {AArch64::FeatureFP16FML}}, |
3712 | {.Name: "i8mm" , .Features: {AArch64::FeatureMatMulInt8}}, |
3713 | {.Name: "lor" , .Features: {AArch64::FeatureLOR}}, |
3714 | {.Name: "profile" , .Features: {AArch64::FeatureSPE}}, |
3715 | // "rdma" is the name documented by binutils for the feature, but |
3716 | // binutils also accepts incomplete prefixes of features, so "rdm" |
3717 | // works too. Support both spellings here. |
3718 | {.Name: "rdm" , .Features: {AArch64::FeatureRDM}}, |
3719 | {.Name: "rdma" , .Features: {AArch64::FeatureRDM}}, |
3720 | {.Name: "sb" , .Features: {AArch64::FeatureSB}}, |
3721 | {.Name: "ssbs" , .Features: {AArch64::FeatureSSBS}}, |
3722 | {.Name: "tme" , .Features: {AArch64::FeatureTME}}, |
3723 | {.Name: "fp8" , .Features: {AArch64::FeatureFP8}}, |
3724 | {.Name: "faminmax" , .Features: {AArch64::FeatureFAMINMAX}}, |
3725 | {.Name: "fp8fma" , .Features: {AArch64::FeatureFP8FMA}}, |
3726 | {.Name: "ssve-fp8fma" , .Features: {AArch64::FeatureSSVE_FP8FMA}}, |
3727 | {.Name: "fp8dot2" , .Features: {AArch64::FeatureFP8DOT2}}, |
3728 | {.Name: "ssve-fp8dot2" , .Features: {AArch64::FeatureSSVE_FP8DOT2}}, |
3729 | {.Name: "fp8dot4" , .Features: {AArch64::FeatureFP8DOT4}}, |
3730 | {.Name: "ssve-fp8dot4" , .Features: {AArch64::FeatureSSVE_FP8DOT4}}, |
3731 | {.Name: "lut" , .Features: {AArch64::FeatureLUT}}, |
3732 | {.Name: "sme-lutv2" , .Features: {AArch64::FeatureSME_LUTv2}}, |
3733 | {.Name: "sme-f8f16" , .Features: {AArch64::FeatureSMEF8F16}}, |
3734 | {.Name: "sme-f8f32" , .Features: {AArch64::FeatureSMEF8F32}}, |
3735 | {.Name: "sme-fa64" , .Features: {AArch64::FeatureSMEFA64}}, |
3736 | {.Name: "cpa" , .Features: {AArch64::FeatureCPA}}, |
3737 | {.Name: "tlbiw" , .Features: {AArch64::FeatureTLBIW}}, |
3738 | }; |
3739 | |
3740 | static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) { |
3741 | if (FBS[AArch64::HasV8_0aOps]) |
3742 | Str += "ARMv8a" ; |
3743 | if (FBS[AArch64::HasV8_1aOps]) |
3744 | Str += "ARMv8.1a" ; |
3745 | else if (FBS[AArch64::HasV8_2aOps]) |
3746 | Str += "ARMv8.2a" ; |
3747 | else if (FBS[AArch64::HasV8_3aOps]) |
3748 | Str += "ARMv8.3a" ; |
3749 | else if (FBS[AArch64::HasV8_4aOps]) |
3750 | Str += "ARMv8.4a" ; |
3751 | else if (FBS[AArch64::HasV8_5aOps]) |
3752 | Str += "ARMv8.5a" ; |
3753 | else if (FBS[AArch64::HasV8_6aOps]) |
3754 | Str += "ARMv8.6a" ; |
3755 | else if (FBS[AArch64::HasV8_7aOps]) |
3756 | Str += "ARMv8.7a" ; |
3757 | else if (FBS[AArch64::HasV8_8aOps]) |
3758 | Str += "ARMv8.8a" ; |
3759 | else if (FBS[AArch64::HasV8_9aOps]) |
3760 | Str += "ARMv8.9a" ; |
3761 | else if (FBS[AArch64::HasV9_0aOps]) |
3762 | Str += "ARMv9-a" ; |
3763 | else if (FBS[AArch64::HasV9_1aOps]) |
3764 | Str += "ARMv9.1a" ; |
3765 | else if (FBS[AArch64::HasV9_2aOps]) |
3766 | Str += "ARMv9.2a" ; |
3767 | else if (FBS[AArch64::HasV9_3aOps]) |
3768 | Str += "ARMv9.3a" ; |
3769 | else if (FBS[AArch64::HasV9_4aOps]) |
3770 | Str += "ARMv9.4a" ; |
3771 | else if (FBS[AArch64::HasV9_5aOps]) |
3772 | Str += "ARMv9.5a" ; |
3773 | else if (FBS[AArch64::HasV8_0rOps]) |
3774 | Str += "ARMv8r" ; |
3775 | else { |
3776 | SmallVector<std::string, 2> ExtMatches; |
3777 | for (const auto& Ext : ExtensionMap) { |
3778 | // Use & in case multiple features are enabled |
3779 | if ((FBS & Ext.Features) != FeatureBitset()) |
3780 | ExtMatches.push_back(Elt: Ext.Name); |
3781 | } |
3782 | Str += !ExtMatches.empty() ? llvm::join(R&: ExtMatches, Separator: ", " ) : "(unknown)" ; |
3783 | } |
3784 | } |
3785 | |
3786 | void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands, |
3787 | SMLoc S) { |
3788 | const uint16_t Op2 = Encoding & 7; |
3789 | const uint16_t Cm = (Encoding & 0x78) >> 3; |
3790 | const uint16_t Cn = (Encoding & 0x780) >> 7; |
3791 | const uint16_t Op1 = (Encoding & 0x3800) >> 11; |
3792 | |
3793 | const MCExpr *Expr = MCConstantExpr::create(Value: Op1, Ctx&: getContext()); |
3794 | |
3795 | Operands.push_back( |
3796 | Elt: AArch64Operand::CreateImm(Val: Expr, S, E: getLoc(), Ctx&: getContext())); |
3797 | Operands.push_back( |
3798 | Elt: AArch64Operand::CreateSysCR(Val: Cn, S, E: getLoc(), Ctx&: getContext())); |
3799 | Operands.push_back( |
3800 | Elt: AArch64Operand::CreateSysCR(Val: Cm, S, E: getLoc(), Ctx&: getContext())); |
3801 | Expr = MCConstantExpr::create(Value: Op2, Ctx&: getContext()); |
3802 | Operands.push_back( |
3803 | Elt: AArch64Operand::CreateImm(Val: Expr, S, E: getLoc(), Ctx&: getContext())); |
3804 | } |
3805 | |
3806 | /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for |
3807 | /// the SYS instruction. Parse them specially so that we create a SYS MCInst. |
3808 | bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc, |
3809 | OperandVector &Operands) { |
3810 | if (Name.contains(C: '.')) |
3811 | return TokError(Msg: "invalid operand" ); |
3812 | |
3813 | Mnemonic = Name; |
3814 | Operands.push_back(Elt: AArch64Operand::CreateToken(Str: "sys" , S: NameLoc, Ctx&: getContext())); |
3815 | |
3816 | const AsmToken &Tok = getTok(); |
3817 | StringRef Op = Tok.getString(); |
3818 | SMLoc S = Tok.getLoc(); |
3819 | |
3820 | if (Mnemonic == "ic" ) { |
3821 | const AArch64IC::IC *IC = AArch64IC::lookupICByName(Name: Op); |
3822 | if (!IC) |
3823 | return TokError(Msg: "invalid operand for IC instruction" ); |
3824 | else if (!IC->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) { |
3825 | std::string Str("IC " + std::string(IC->Name) + " requires: " ); |
3826 | setRequiredFeatureString(FBS: IC->getRequiredFeatures(), Str); |
3827 | return TokError(Msg: Str); |
3828 | } |
3829 | createSysAlias(Encoding: IC->Encoding, Operands, S); |
3830 | } else if (Mnemonic == "dc" ) { |
3831 | const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Name: Op); |
3832 | if (!DC) |
3833 | return TokError(Msg: "invalid operand for DC instruction" ); |
3834 | else if (!DC->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) { |
3835 | std::string Str("DC " + std::string(DC->Name) + " requires: " ); |
3836 | setRequiredFeatureString(FBS: DC->getRequiredFeatures(), Str); |
3837 | return TokError(Msg: Str); |
3838 | } |
3839 | createSysAlias(Encoding: DC->Encoding, Operands, S); |
3840 | } else if (Mnemonic == "at" ) { |
3841 | const AArch64AT::AT *AT = AArch64AT::lookupATByName(Name: Op); |
3842 | if (!AT) |
3843 | return TokError(Msg: "invalid operand for AT instruction" ); |
3844 | else if (!AT->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) { |
3845 | std::string Str("AT " + std::string(AT->Name) + " requires: " ); |
3846 | setRequiredFeatureString(FBS: AT->getRequiredFeatures(), Str); |
3847 | return TokError(Msg: Str); |
3848 | } |
3849 | createSysAlias(Encoding: AT->Encoding, Operands, S); |
3850 | } else if (Mnemonic == "tlbi" ) { |
3851 | const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Name: Op); |
3852 | if (!TLBI) |
3853 | return TokError(Msg: "invalid operand for TLBI instruction" ); |
3854 | else if (!TLBI->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) { |
3855 | std::string Str("TLBI " + std::string(TLBI->Name) + " requires: " ); |
3856 | setRequiredFeatureString(FBS: TLBI->getRequiredFeatures(), Str); |
3857 | return TokError(Msg: Str); |
3858 | } |
3859 | createSysAlias(Encoding: TLBI->Encoding, Operands, S); |
3860 | } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" || Mnemonic == "cosp" ) { |
3861 | |
3862 | if (Op.lower() != "rctx" ) |
3863 | return TokError(Msg: "invalid operand for prediction restriction instruction" ); |
3864 | |
3865 | bool hasAll = getSTI().hasFeature(Feature: AArch64::FeatureAll); |
3866 | bool hasPredres = hasAll || getSTI().hasFeature(Feature: AArch64::FeaturePredRes); |
3867 | bool hasSpecres2 = hasAll || getSTI().hasFeature(Feature: AArch64::FeatureSPECRES2); |
3868 | |
3869 | if (Mnemonic == "cosp" && !hasSpecres2) |
3870 | return TokError(Msg: "COSP requires: predres2" ); |
3871 | if (!hasPredres) |
3872 | return TokError(Msg: Mnemonic.upper() + "RCTX requires: predres" ); |
3873 | |
3874 | uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100 |
3875 | : Mnemonic == "dvp" ? 0b101 |
3876 | : Mnemonic == "cosp" ? 0b110 |
3877 | : Mnemonic == "cpp" ? 0b111 |
3878 | : 0; |
3879 | assert(PRCTX_Op2 && |
3880 | "Invalid mnemonic for prediction restriction instruction" ); |
3881 | const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3 |
3882 | const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2; |
3883 | |
3884 | createSysAlias(Encoding, Operands, S); |
3885 | } |
3886 | |
3887 | Lex(); // Eat operand. |
3888 | |
3889 | bool ExpectRegister = !Op.contains_insensitive(Other: "all" ); |
3890 | bool HasRegister = false; |
3891 | |
3892 | // Check for the optional register operand. |
3893 | if (parseOptionalToken(T: AsmToken::Comma)) { |
3894 | if (Tok.isNot(K: AsmToken::Identifier) || parseRegister(Operands)) |
3895 | return TokError(Msg: "expected register operand" ); |
3896 | HasRegister = true; |
3897 | } |
3898 | |
3899 | if (ExpectRegister && !HasRegister) |
3900 | return TokError(Msg: "specified " + Mnemonic + " op requires a register" ); |
3901 | else if (!ExpectRegister && HasRegister) |
3902 | return TokError(Msg: "specified " + Mnemonic + " op does not use a register" ); |
3903 | |
3904 | if (parseToken(T: AsmToken::EndOfStatement, Msg: "unexpected token in argument list" )) |
3905 | return true; |
3906 | |
3907 | return false; |
3908 | } |
3909 | |
3910 | /// parseSyspAlias - The TLBIP instructions are simple aliases for |
3911 | /// the SYSP instruction. Parse them specially so that we create a SYSP MCInst. |
3912 | bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc, |
3913 | OperandVector &Operands) { |
3914 | if (Name.contains(C: '.')) |
3915 | return TokError(Msg: "invalid operand" ); |
3916 | |
3917 | Mnemonic = Name; |
3918 | Operands.push_back( |
3919 | Elt: AArch64Operand::CreateToken(Str: "sysp" , S: NameLoc, Ctx&: getContext())); |
3920 | |
3921 | const AsmToken &Tok = getTok(); |
3922 | StringRef Op = Tok.getString(); |
3923 | SMLoc S = Tok.getLoc(); |
3924 | |
3925 | if (Mnemonic == "tlbip" ) { |
3926 | bool HasnXSQualifier = Op.ends_with_insensitive(Suffix: "nXS" ); |
3927 | if (HasnXSQualifier) { |
3928 | Op = Op.drop_back(N: 3); |
3929 | } |
3930 | const AArch64TLBI::TLBI *TLBIorig = AArch64TLBI::lookupTLBIByName(Name: Op); |
3931 | if (!TLBIorig) |
3932 | return TokError(Msg: "invalid operand for TLBIP instruction" ); |
3933 | const AArch64TLBI::TLBI TLBI( |
3934 | TLBIorig->Name, TLBIorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0), |
3935 | TLBIorig->NeedsReg, |
3936 | HasnXSQualifier |
3937 | ? TLBIorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS}) |
3938 | : TLBIorig->FeaturesRequired); |
3939 | if (!TLBI.haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) { |
3940 | std::string Name = |
3941 | std::string(TLBI.Name) + (HasnXSQualifier ? "nXS" : "" ); |
3942 | std::string Str("TLBIP " + Name + " requires: " ); |
3943 | setRequiredFeatureString(FBS: TLBI.getRequiredFeatures(), Str); |
3944 | return TokError(Msg: Str); |
3945 | } |
3946 | createSysAlias(Encoding: TLBI.Encoding, Operands, S); |
3947 | } |
3948 | |
3949 | Lex(); // Eat operand. |
3950 | |
3951 | if (parseComma()) |
3952 | return true; |
3953 | |
3954 | if (Tok.isNot(K: AsmToken::Identifier)) |
3955 | return TokError(Msg: "expected register identifier" ); |
3956 | auto Result = tryParseSyspXzrPair(Operands); |
3957 | if (Result.isNoMatch()) |
3958 | Result = tryParseGPRSeqPair(Operands); |
3959 | if (!Result.isSuccess()) |
3960 | return TokError(Msg: "specified " + Mnemonic + |
3961 | " op requires a pair of registers" ); |
3962 | |
3963 | if (parseToken(T: AsmToken::EndOfStatement, Msg: "unexpected token in argument list" )) |
3964 | return true; |
3965 | |
3966 | return false; |
3967 | } |
3968 | |
3969 | ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) { |
3970 | MCAsmParser &Parser = getParser(); |
3971 | const AsmToken &Tok = getTok(); |
3972 | |
3973 | if (Mnemonic == "tsb" && Tok.isNot(K: AsmToken::Identifier)) |
3974 | return TokError(Msg: "'csync' operand expected" ); |
3975 | if (parseOptionalToken(T: AsmToken::Hash) || Tok.is(K: AsmToken::Integer)) { |
3976 | // Immediate operand. |
3977 | const MCExpr *ImmVal; |
3978 | SMLoc ExprLoc = getLoc(); |
3979 | AsmToken IntTok = Tok; |
3980 | if (getParser().parseExpression(Res&: ImmVal)) |
3981 | return ParseStatus::Failure; |
3982 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal); |
3983 | if (!MCE) |
3984 | return Error(L: ExprLoc, Msg: "immediate value expected for barrier operand" ); |
3985 | int64_t Value = MCE->getValue(); |
3986 | if (Mnemonic == "dsb" && Value > 15) { |
3987 | // This case is a no match here, but it might be matched by the nXS |
3988 | // variant. Deliberately not unlex the optional '#' as it is not necessary |
3989 | // to characterize an integer immediate. |
3990 | Parser.getLexer().UnLex(Token: IntTok); |
3991 | return ParseStatus::NoMatch; |
3992 | } |
3993 | if (Value < 0 || Value > 15) |
3994 | return Error(L: ExprLoc, Msg: "barrier operand out of range" ); |
3995 | auto DB = AArch64DB::lookupDBByEncoding(Encoding: Value); |
3996 | Operands.push_back(Elt: AArch64Operand::CreateBarrier(Val: Value, Str: DB ? DB->Name : "" , |
3997 | S: ExprLoc, Ctx&: getContext(), |
3998 | HasnXSModifier: false /*hasnXSModifier*/)); |
3999 | return ParseStatus::Success; |
4000 | } |
4001 | |
4002 | if (Tok.isNot(K: AsmToken::Identifier)) |
4003 | return TokError(Msg: "invalid operand for instruction" ); |
4004 | |
4005 | StringRef Operand = Tok.getString(); |
4006 | auto TSB = AArch64TSB::lookupTSBByName(Name: Operand); |
4007 | auto DB = AArch64DB::lookupDBByName(Name: Operand); |
4008 | // The only valid named option for ISB is 'sy' |
4009 | if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) |
4010 | return TokError(Msg: "'sy' or #imm operand expected" ); |
4011 | // The only valid named option for TSB is 'csync' |
4012 | if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) |
4013 | return TokError(Msg: "'csync' operand expected" ); |
4014 | if (!DB && !TSB) { |
4015 | if (Mnemonic == "dsb" ) { |
4016 | // This case is a no match here, but it might be matched by the nXS |
4017 | // variant. |
4018 | return ParseStatus::NoMatch; |
4019 | } |
4020 | return TokError(Msg: "invalid barrier option name" ); |
4021 | } |
4022 | |
4023 | Operands.push_back(Elt: AArch64Operand::CreateBarrier( |
4024 | Val: DB ? DB->Encoding : TSB->Encoding, Str: Tok.getString(), S: getLoc(), |
4025 | Ctx&: getContext(), HasnXSModifier: false /*hasnXSModifier*/)); |
4026 | Lex(); // Consume the option |
4027 | |
4028 | return ParseStatus::Success; |
4029 | } |
4030 | |
4031 | ParseStatus |
4032 | AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) { |
4033 | const AsmToken &Tok = getTok(); |
4034 | |
4035 | assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands" ); |
4036 | if (Mnemonic != "dsb" ) |
4037 | return ParseStatus::Failure; |
4038 | |
4039 | if (parseOptionalToken(T: AsmToken::Hash) || Tok.is(K: AsmToken::Integer)) { |
4040 | // Immediate operand. |
4041 | const MCExpr *ImmVal; |
4042 | SMLoc ExprLoc = getLoc(); |
4043 | if (getParser().parseExpression(Res&: ImmVal)) |
4044 | return ParseStatus::Failure; |
4045 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal); |
4046 | if (!MCE) |
4047 | return Error(L: ExprLoc, Msg: "immediate value expected for barrier operand" ); |
4048 | int64_t Value = MCE->getValue(); |
4049 | // v8.7-A DSB in the nXS variant accepts only the following immediate |
4050 | // values: 16, 20, 24, 28. |
4051 | if (Value != 16 && Value != 20 && Value != 24 && Value != 28) |
4052 | return Error(L: ExprLoc, Msg: "barrier operand out of range" ); |
4053 | auto DB = AArch64DBnXS::lookupDBnXSByImmValue(ImmValue: Value); |
4054 | Operands.push_back(Elt: AArch64Operand::CreateBarrier(Val: DB->Encoding, Str: DB->Name, |
4055 | S: ExprLoc, Ctx&: getContext(), |
4056 | HasnXSModifier: true /*hasnXSModifier*/)); |
4057 | return ParseStatus::Success; |
4058 | } |
4059 | |
4060 | if (Tok.isNot(K: AsmToken::Identifier)) |
4061 | return TokError(Msg: "invalid operand for instruction" ); |
4062 | |
4063 | StringRef Operand = Tok.getString(); |
4064 | auto DB = AArch64DBnXS::lookupDBnXSByName(Name: Operand); |
4065 | |
4066 | if (!DB) |
4067 | return TokError(Msg: "invalid barrier option name" ); |
4068 | |
4069 | Operands.push_back( |
4070 | Elt: AArch64Operand::CreateBarrier(Val: DB->Encoding, Str: Tok.getString(), S: getLoc(), |
4071 | Ctx&: getContext(), HasnXSModifier: true /*hasnXSModifier*/)); |
4072 | Lex(); // Consume the option |
4073 | |
4074 | return ParseStatus::Success; |
4075 | } |
4076 | |
4077 | ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) { |
4078 | const AsmToken &Tok = getTok(); |
4079 | |
4080 | if (Tok.isNot(K: AsmToken::Identifier)) |
4081 | return ParseStatus::NoMatch; |
4082 | |
4083 | if (AArch64SVCR::lookupSVCRByName(Name: Tok.getString())) |
4084 | return ParseStatus::NoMatch; |
4085 | |
4086 | int MRSReg, MSRReg; |
4087 | auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString()); |
4088 | if (SysReg && SysReg->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) { |
4089 | MRSReg = SysReg->Readable ? SysReg->Encoding : -1; |
4090 | MSRReg = SysReg->Writeable ? SysReg->Encoding : -1; |
4091 | } else |
4092 | MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Name: Tok.getString()); |
4093 | |
4094 | unsigned PStateImm = -1; |
4095 | auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Name: Tok.getString()); |
4096 | if (PState15 && PState15->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) |
4097 | PStateImm = PState15->Encoding; |
4098 | if (!PState15) { |
4099 | auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Name: Tok.getString()); |
4100 | if (PState1 && PState1->haveFeatures(ActiveFeatures: getSTI().getFeatureBits())) |
4101 | PStateImm = PState1->Encoding; |
4102 | } |
4103 | |
4104 | Operands.push_back( |
4105 | Elt: AArch64Operand::CreateSysReg(Str: Tok.getString(), S: getLoc(), MRSReg, MSRReg, |
4106 | PStateField: PStateImm, Ctx&: getContext())); |
4107 | Lex(); // Eat identifier |
4108 | |
4109 | return ParseStatus::Success; |
4110 | } |
4111 | |
4112 | /// tryParseNeonVectorRegister - Parse a vector register operand. |
4113 | bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) { |
4114 | if (getTok().isNot(K: AsmToken::Identifier)) |
4115 | return true; |
4116 | |
4117 | SMLoc S = getLoc(); |
4118 | // Check for a vector register specifier first. |
4119 | StringRef Kind; |
4120 | MCRegister Reg; |
4121 | ParseStatus Res = tryParseVectorRegister(Reg, Kind, MatchKind: RegKind::NeonVector); |
4122 | if (!Res.isSuccess()) |
4123 | return true; |
4124 | |
4125 | const auto &KindRes = parseVectorKind(Suffix: Kind, VectorKind: RegKind::NeonVector); |
4126 | if (!KindRes) |
4127 | return true; |
4128 | |
4129 | unsigned ElementWidth = KindRes->second; |
4130 | Operands.push_back( |
4131 | Elt: AArch64Operand::CreateVectorReg(RegNum: Reg, Kind: RegKind::NeonVector, ElementWidth, |
4132 | S, E: getLoc(), Ctx&: getContext())); |
4133 | |
4134 | // If there was an explicit qualifier, that goes on as a literal text |
4135 | // operand. |
4136 | if (!Kind.empty()) |
4137 | Operands.push_back(Elt: AArch64Operand::CreateToken(Str: Kind, S, Ctx&: getContext())); |
4138 | |
4139 | return tryParseVectorIndex(Operands).isFailure(); |
4140 | } |
4141 | |
4142 | ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) { |
4143 | SMLoc SIdx = getLoc(); |
4144 | if (parseOptionalToken(T: AsmToken::LBrac)) { |
4145 | const MCExpr *ImmVal; |
4146 | if (getParser().parseExpression(Res&: ImmVal)) |
4147 | return ParseStatus::NoMatch; |
4148 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal); |
4149 | if (!MCE) |
4150 | return TokError(Msg: "immediate value expected for vector index" ); |
4151 | |
4152 | SMLoc E = getLoc(); |
4153 | |
4154 | if (parseToken(T: AsmToken::RBrac, Msg: "']' expected" )) |
4155 | return ParseStatus::Failure; |
4156 | |
4157 | Operands.push_back(Elt: AArch64Operand::CreateVectorIndex(Idx: MCE->getValue(), S: SIdx, |
4158 | E, Ctx&: getContext())); |
4159 | return ParseStatus::Success; |
4160 | } |
4161 | |
4162 | return ParseStatus::NoMatch; |
4163 | } |
4164 | |
4165 | // tryParseVectorRegister - Try to parse a vector register name with |
4166 | // optional kind specifier. If it is a register specifier, eat the token |
4167 | // and return it. |
4168 | ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg, |
4169 | StringRef &Kind, |
4170 | RegKind MatchKind) { |
4171 | const AsmToken &Tok = getTok(); |
4172 | |
4173 | if (Tok.isNot(K: AsmToken::Identifier)) |
4174 | return ParseStatus::NoMatch; |
4175 | |
4176 | StringRef Name = Tok.getString(); |
4177 | // If there is a kind specifier, it's separated from the register name by |
4178 | // a '.'. |
4179 | size_t Start = 0, Next = Name.find(C: '.'); |
4180 | StringRef Head = Name.slice(Start, End: Next); |
4181 | unsigned RegNum = matchRegisterNameAlias(Name: Head, Kind: MatchKind); |
4182 | |
4183 | if (RegNum) { |
4184 | if (Next != StringRef::npos) { |
4185 | Kind = Name.slice(Start: Next, End: StringRef::npos); |
4186 | if (!isValidVectorKind(Suffix: Kind, VectorKind: MatchKind)) |
4187 | return TokError(Msg: "invalid vector kind qualifier" ); |
4188 | } |
4189 | Lex(); // Eat the register token. |
4190 | |
4191 | Reg = RegNum; |
4192 | return ParseStatus::Success; |
4193 | } |
4194 | |
4195 | return ParseStatus::NoMatch; |
4196 | } |
4197 | |
4198 | ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector( |
4199 | OperandVector &Operands) { |
4200 | ParseStatus Status = |
4201 | tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(Operands); |
4202 | if (!Status.isSuccess()) |
4203 | Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(Operands); |
4204 | return Status; |
4205 | } |
4206 | |
4207 | /// tryParseSVEPredicateVector - Parse a SVE predicate register operand. |
4208 | template <RegKind RK> |
4209 | ParseStatus |
4210 | AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) { |
4211 | // Check for a SVE predicate register specifier first. |
4212 | const SMLoc S = getLoc(); |
4213 | StringRef Kind; |
4214 | MCRegister RegNum; |
4215 | auto Res = tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RK); |
4216 | if (!Res.isSuccess()) |
4217 | return Res; |
4218 | |
4219 | const auto &KindRes = parseVectorKind(Suffix: Kind, VectorKind: RK); |
4220 | if (!KindRes) |
4221 | return ParseStatus::NoMatch; |
4222 | |
4223 | unsigned ElementWidth = KindRes->second; |
4224 | Operands.push_back(Elt: AArch64Operand::CreateVectorReg( |
4225 | RegNum, Kind: RK, ElementWidth, S, |
4226 | E: getLoc(), Ctx&: getContext())); |
4227 | |
4228 | if (getLexer().is(K: AsmToken::LBrac)) { |
4229 | if (RK == RegKind::SVEPredicateAsCounter) { |
4230 | ParseStatus ResIndex = tryParseVectorIndex(Operands); |
4231 | if (ResIndex.isSuccess()) |
4232 | return ParseStatus::Success; |
4233 | } else { |
4234 | // Indexed predicate, there's no comma so try parse the next operand |
4235 | // immediately. |
4236 | if (parseOperand(Operands, isCondCode: false, invertCondCode: false)) |
4237 | return ParseStatus::NoMatch; |
4238 | } |
4239 | } |
4240 | |
4241 | // Not all predicates are followed by a '/m' or '/z'. |
4242 | if (getTok().isNot(K: AsmToken::Slash)) |
4243 | return ParseStatus::Success; |
4244 | |
4245 | // But when they do they shouldn't have an element type suffix. |
4246 | if (!Kind.empty()) |
4247 | return Error(L: S, Msg: "not expecting size suffix" ); |
4248 | |
4249 | // Add a literal slash as operand |
4250 | Operands.push_back(Elt: AArch64Operand::CreateToken(Str: "/" , S: getLoc(), Ctx&: getContext())); |
4251 | |
4252 | Lex(); // Eat the slash. |
4253 | |
4254 | // Zeroing or merging? |
4255 | auto Pred = getTok().getString().lower(); |
4256 | if (RK == RegKind::SVEPredicateAsCounter && Pred != "z" ) |
4257 | return Error(L: getLoc(), Msg: "expecting 'z' predication" ); |
4258 | |
4259 | if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m" ) |
4260 | return Error(L: getLoc(), Msg: "expecting 'm' or 'z' predication" ); |
4261 | |
4262 | // Add zero/merge token. |
4263 | const char *ZM = Pred == "z" ? "z" : "m" ; |
4264 | Operands.push_back(Elt: AArch64Operand::CreateToken(Str: ZM, S: getLoc(), Ctx&: getContext())); |
4265 | |
4266 | Lex(); // Eat zero/merge token. |
4267 | return ParseStatus::Success; |
4268 | } |
4269 | |
4270 | /// parseRegister - Parse a register operand. |
4271 | bool AArch64AsmParser::parseRegister(OperandVector &Operands) { |
4272 | // Try for a Neon vector register. |
4273 | if (!tryParseNeonVectorRegister(Operands)) |
4274 | return false; |
4275 | |
4276 | if (tryParseZTOperand(Operands).isSuccess()) |
4277 | return false; |
4278 | |
4279 | // Otherwise try for a scalar register. |
4280 | if (tryParseGPROperand<false>(Operands).isSuccess()) |
4281 | return false; |
4282 | |
4283 | return true; |
4284 | } |
4285 | |
4286 | bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) { |
4287 | bool HasELFModifier = false; |
4288 | AArch64MCExpr::VariantKind RefKind; |
4289 | |
4290 | if (parseOptionalToken(T: AsmToken::Colon)) { |
4291 | HasELFModifier = true; |
4292 | |
4293 | if (getTok().isNot(K: AsmToken::Identifier)) |
4294 | return TokError(Msg: "expect relocation specifier in operand after ':'" ); |
4295 | |
4296 | std::string LowerCase = getTok().getIdentifier().lower(); |
4297 | RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase) |
4298 | .Case(S: "lo12" , Value: AArch64MCExpr::VK_LO12) |
4299 | .Case(S: "abs_g3" , Value: AArch64MCExpr::VK_ABS_G3) |
4300 | .Case(S: "abs_g2" , Value: AArch64MCExpr::VK_ABS_G2) |
4301 | .Case(S: "abs_g2_s" , Value: AArch64MCExpr::VK_ABS_G2_S) |
4302 | .Case(S: "abs_g2_nc" , Value: AArch64MCExpr::VK_ABS_G2_NC) |
4303 | .Case(S: "abs_g1" , Value: AArch64MCExpr::VK_ABS_G1) |
4304 | .Case(S: "abs_g1_s" , Value: AArch64MCExpr::VK_ABS_G1_S) |
4305 | .Case(S: "abs_g1_nc" , Value: AArch64MCExpr::VK_ABS_G1_NC) |
4306 | .Case(S: "abs_g0" , Value: AArch64MCExpr::VK_ABS_G0) |
4307 | .Case(S: "abs_g0_s" , Value: AArch64MCExpr::VK_ABS_G0_S) |
4308 | .Case(S: "abs_g0_nc" , Value: AArch64MCExpr::VK_ABS_G0_NC) |
4309 | .Case(S: "prel_g3" , Value: AArch64MCExpr::VK_PREL_G3) |
4310 | .Case(S: "prel_g2" , Value: AArch64MCExpr::VK_PREL_G2) |
4311 | .Case(S: "prel_g2_nc" , Value: AArch64MCExpr::VK_PREL_G2_NC) |
4312 | .Case(S: "prel_g1" , Value: AArch64MCExpr::VK_PREL_G1) |
4313 | .Case(S: "prel_g1_nc" , Value: AArch64MCExpr::VK_PREL_G1_NC) |
4314 | .Case(S: "prel_g0" , Value: AArch64MCExpr::VK_PREL_G0) |
4315 | .Case(S: "prel_g0_nc" , Value: AArch64MCExpr::VK_PREL_G0_NC) |
4316 | .Case(S: "dtprel_g2" , Value: AArch64MCExpr::VK_DTPREL_G2) |
4317 | .Case(S: "dtprel_g1" , Value: AArch64MCExpr::VK_DTPREL_G1) |
4318 | .Case(S: "dtprel_g1_nc" , Value: AArch64MCExpr::VK_DTPREL_G1_NC) |
4319 | .Case(S: "dtprel_g0" , Value: AArch64MCExpr::VK_DTPREL_G0) |
4320 | .Case(S: "dtprel_g0_nc" , Value: AArch64MCExpr::VK_DTPREL_G0_NC) |
4321 | .Case(S: "dtprel_hi12" , Value: AArch64MCExpr::VK_DTPREL_HI12) |
4322 | .Case(S: "dtprel_lo12" , Value: AArch64MCExpr::VK_DTPREL_LO12) |
4323 | .Case(S: "dtprel_lo12_nc" , Value: AArch64MCExpr::VK_DTPREL_LO12_NC) |
4324 | .Case(S: "pg_hi21_nc" , Value: AArch64MCExpr::VK_ABS_PAGE_NC) |
4325 | .Case(S: "tprel_g2" , Value: AArch64MCExpr::VK_TPREL_G2) |
4326 | .Case(S: "tprel_g1" , Value: AArch64MCExpr::VK_TPREL_G1) |
4327 | .Case(S: "tprel_g1_nc" , Value: AArch64MCExpr::VK_TPREL_G1_NC) |
4328 | .Case(S: "tprel_g0" , Value: AArch64MCExpr::VK_TPREL_G0) |
4329 | .Case(S: "tprel_g0_nc" , Value: AArch64MCExpr::VK_TPREL_G0_NC) |
4330 | .Case(S: "tprel_hi12" , Value: AArch64MCExpr::VK_TPREL_HI12) |
4331 | .Case(S: "tprel_lo12" , Value: AArch64MCExpr::VK_TPREL_LO12) |
4332 | .Case(S: "tprel_lo12_nc" , Value: AArch64MCExpr::VK_TPREL_LO12_NC) |
4333 | .Case(S: "tlsdesc_lo12" , Value: AArch64MCExpr::VK_TLSDESC_LO12) |
4334 | .Case(S: "got" , Value: AArch64MCExpr::VK_GOT_PAGE) |
4335 | .Case(S: "gotpage_lo15" , Value: AArch64MCExpr::VK_GOT_PAGE_LO15) |
4336 | .Case(S: "got_lo12" , Value: AArch64MCExpr::VK_GOT_LO12) |
4337 | .Case(S: "gottprel" , Value: AArch64MCExpr::VK_GOTTPREL_PAGE) |
4338 | .Case(S: "gottprel_lo12" , Value: AArch64MCExpr::VK_GOTTPREL_LO12_NC) |
4339 | .Case(S: "gottprel_g1" , Value: AArch64MCExpr::VK_GOTTPREL_G1) |
4340 | .Case(S: "gottprel_g0_nc" , Value: AArch64MCExpr::VK_GOTTPREL_G0_NC) |
4341 | .Case(S: "tlsdesc" , Value: AArch64MCExpr::VK_TLSDESC_PAGE) |
4342 | .Case(S: "secrel_lo12" , Value: AArch64MCExpr::VK_SECREL_LO12) |
4343 | .Case(S: "secrel_hi12" , Value: AArch64MCExpr::VK_SECREL_HI12) |
4344 | .Default(Value: AArch64MCExpr::VK_INVALID); |
4345 | |
4346 | if (RefKind == AArch64MCExpr::VK_INVALID) |
4347 | return TokError(Msg: "expect relocation specifier in operand after ':'" ); |
4348 | |
4349 | Lex(); // Eat identifier |
4350 | |
4351 | if (parseToken(T: AsmToken::Colon, Msg: "expect ':' after relocation specifier" )) |
4352 | return true; |
4353 | } |
4354 | |
4355 | if (getParser().parseExpression(Res&: ImmVal)) |
4356 | return true; |
4357 | |
4358 | if (HasELFModifier) |
4359 | ImmVal = AArch64MCExpr::create(Expr: ImmVal, Kind: RefKind, Ctx&: getContext()); |
4360 | |
4361 | return false; |
4362 | } |
4363 | |
4364 | ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) { |
4365 | if (getTok().isNot(K: AsmToken::LCurly)) |
4366 | return ParseStatus::NoMatch; |
4367 | |
4368 | auto ParseMatrixTile = [this](unsigned &Reg, |
4369 | unsigned &ElementWidth) -> ParseStatus { |
4370 | StringRef Name = getTok().getString(); |
4371 | size_t DotPosition = Name.find(C: '.'); |
4372 | if (DotPosition == StringRef::npos) |
4373 | return ParseStatus::NoMatch; |
4374 | |
4375 | unsigned RegNum = matchMatrixTileListRegName(Name); |
4376 | if (!RegNum) |
4377 | return ParseStatus::NoMatch; |
4378 | |
4379 | StringRef Tail = Name.drop_front(N: DotPosition); |
4380 | const std::optional<std::pair<int, int>> &KindRes = |
4381 | parseVectorKind(Suffix: Tail, VectorKind: RegKind::Matrix); |
4382 | if (!KindRes) |
4383 | return TokError( |
4384 | Msg: "Expected the register to be followed by element width suffix" ); |
4385 | ElementWidth = KindRes->second; |
4386 | Reg = RegNum; |
4387 | Lex(); // Eat the register. |
4388 | return ParseStatus::Success; |
4389 | }; |
4390 | |
4391 | SMLoc S = getLoc(); |
4392 | auto LCurly = getTok(); |
4393 | Lex(); // Eat left bracket token. |
4394 | |
4395 | // Empty matrix list |
4396 | if (parseOptionalToken(T: AsmToken::RCurly)) { |
4397 | Operands.push_back(Elt: AArch64Operand::CreateMatrixTileList( |
4398 | /*RegMask=*/0, S, E: getLoc(), Ctx&: getContext())); |
4399 | return ParseStatus::Success; |
4400 | } |
4401 | |
4402 | // Try parse {za} alias early |
4403 | if (getTok().getString().equals_insensitive(RHS: "za" )) { |
4404 | Lex(); // Eat 'za' |
4405 | |
4406 | if (parseToken(T: AsmToken::RCurly, Msg: "'}' expected" )) |
4407 | return ParseStatus::Failure; |
4408 | |
4409 | Operands.push_back(Elt: AArch64Operand::CreateMatrixTileList( |
4410 | /*RegMask=*/0xFF, S, E: getLoc(), Ctx&: getContext())); |
4411 | return ParseStatus::Success; |
4412 | } |
4413 | |
4414 | SMLoc TileLoc = getLoc(); |
4415 | |
4416 | unsigned FirstReg, ElementWidth; |
4417 | auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth); |
4418 | if (!ParseRes.isSuccess()) { |
4419 | getLexer().UnLex(Token: LCurly); |
4420 | return ParseRes; |
4421 | } |
4422 | |
4423 | const MCRegisterInfo *RI = getContext().getRegisterInfo(); |
4424 | |
4425 | unsigned PrevReg = FirstReg; |
4426 | |
4427 | SmallSet<unsigned, 8> DRegs; |
4428 | AArch64Operand::ComputeRegsForAlias(Reg: FirstReg, OutRegs&: DRegs, ElementWidth); |
4429 | |
4430 | SmallSet<unsigned, 8> SeenRegs; |
4431 | SeenRegs.insert(V: FirstReg); |
4432 | |
4433 | while (parseOptionalToken(T: AsmToken::Comma)) { |
4434 | TileLoc = getLoc(); |
4435 | unsigned Reg, NextElementWidth; |
4436 | ParseRes = ParseMatrixTile(Reg, NextElementWidth); |
4437 | if (!ParseRes.isSuccess()) |
4438 | return ParseRes; |
4439 | |
4440 | // Element size must match on all regs in the list. |
4441 | if (ElementWidth != NextElementWidth) |
4442 | return Error(L: TileLoc, Msg: "mismatched register size suffix" ); |
4443 | |
4444 | if (RI->getEncodingValue(RegNo: Reg) <= (RI->getEncodingValue(RegNo: PrevReg))) |
4445 | Warning(L: TileLoc, Msg: "tile list not in ascending order" ); |
4446 | |
4447 | if (SeenRegs.contains(V: Reg)) |
4448 | Warning(L: TileLoc, Msg: "duplicate tile in list" ); |
4449 | else { |
4450 | SeenRegs.insert(V: Reg); |
4451 | AArch64Operand::ComputeRegsForAlias(Reg, OutRegs&: DRegs, ElementWidth); |
4452 | } |
4453 | |
4454 | PrevReg = Reg; |
4455 | } |
4456 | |
4457 | if (parseToken(T: AsmToken::RCurly, Msg: "'}' expected" )) |
4458 | return ParseStatus::Failure; |
4459 | |
4460 | unsigned RegMask = 0; |
4461 | for (auto Reg : DRegs) |
4462 | RegMask |= 0x1 << (RI->getEncodingValue(RegNo: Reg) - |
4463 | RI->getEncodingValue(RegNo: AArch64::ZAD0)); |
4464 | Operands.push_back( |
4465 | Elt: AArch64Operand::CreateMatrixTileList(RegMask, S, E: getLoc(), Ctx&: getContext())); |
4466 | |
4467 | return ParseStatus::Success; |
4468 | } |
4469 | |
4470 | template <RegKind VectorKind> |
4471 | ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands, |
4472 | bool ExpectMatch) { |
4473 | MCAsmParser &Parser = getParser(); |
4474 | if (!getTok().is(K: AsmToken::LCurly)) |
4475 | return ParseStatus::NoMatch; |
4476 | |
4477 | // Wrapper around parse function |
4478 | auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc, |
4479 | bool NoMatchIsError) -> ParseStatus { |
4480 | auto RegTok = getTok(); |
4481 | auto ParseRes = tryParseVectorRegister(Reg, Kind, MatchKind: VectorKind); |
4482 | if (ParseRes.isSuccess()) { |
4483 | if (parseVectorKind(Suffix: Kind, VectorKind)) |
4484 | return ParseRes; |
4485 | llvm_unreachable("Expected a valid vector kind" ); |
4486 | } |
4487 | |
4488 | if (RegTok.is(K: AsmToken::Identifier) && ParseRes.isNoMatch() && |
4489 | RegTok.getString().equals_insensitive(RHS: "zt0" )) |
4490 | return ParseStatus::NoMatch; |
4491 | |
4492 | if (RegTok.isNot(K: AsmToken::Identifier) || ParseRes.isFailure() || |
4493 | (ParseRes.isNoMatch() && NoMatchIsError && |
4494 | !RegTok.getString().starts_with_insensitive(Prefix: "za" ))) |
4495 | return Error(L: Loc, Msg: "vector register expected" ); |
4496 | |
4497 | return ParseStatus::NoMatch; |
4498 | }; |
4499 | |
4500 | int NumRegs = getNumRegsForRegKind(K: VectorKind); |
4501 | SMLoc S = getLoc(); |
4502 | auto LCurly = getTok(); |
4503 | Lex(); // Eat left bracket token. |
4504 | |
4505 | StringRef Kind; |
4506 | MCRegister FirstReg; |
4507 | auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch); |
4508 | |
4509 | // Put back the original left bracket if there was no match, so that |
4510 | // different types of list-operands can be matched (e.g. SVE, Neon). |
4511 | if (ParseRes.isNoMatch()) |
4512 | Parser.getLexer().UnLex(Token: LCurly); |
4513 | |
4514 | if (!ParseRes.isSuccess()) |
4515 | return ParseRes; |
4516 | |
4517 | int64_t PrevReg = FirstReg; |
4518 | unsigned Count = 1; |
4519 | |
4520 | int Stride = 1; |
4521 | if (parseOptionalToken(T: AsmToken::Minus)) { |
4522 | SMLoc Loc = getLoc(); |
4523 | StringRef NextKind; |
4524 | |
4525 | MCRegister Reg; |
4526 | ParseRes = ParseVector(Reg, NextKind, getLoc(), true); |
4527 | if (!ParseRes.isSuccess()) |
4528 | return ParseRes; |
4529 | |
4530 | // Any Kind suffices must match on all regs in the list. |
4531 | if (Kind != NextKind) |
4532 | return Error(L: Loc, Msg: "mismatched register size suffix" ); |
4533 | |
4534 | unsigned Space = |
4535 | (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + NumRegs - PrevReg); |
4536 | |
4537 | if (Space == 0 || Space > 3) |
4538 | return Error(L: Loc, Msg: "invalid number of vectors" ); |
4539 | |
4540 | Count += Space; |
4541 | } |
4542 | else { |
4543 | bool HasCalculatedStride = false; |
4544 | while (parseOptionalToken(T: AsmToken::Comma)) { |
4545 | SMLoc Loc = getLoc(); |
4546 | StringRef NextKind; |
4547 | MCRegister Reg; |
4548 | ParseRes = ParseVector(Reg, NextKind, getLoc(), true); |
4549 | if (!ParseRes.isSuccess()) |
4550 | return ParseRes; |
4551 | |
4552 | // Any Kind suffices must match on all regs in the list. |
4553 | if (Kind != NextKind) |
4554 | return Error(L: Loc, Msg: "mismatched register size suffix" ); |
4555 | |
4556 | unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(RegNo: Reg); |
4557 | unsigned PrevRegVal = |
4558 | getContext().getRegisterInfo()->getEncodingValue(RegNo: PrevReg); |
4559 | if (!HasCalculatedStride) { |
4560 | Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal) |
4561 | : (RegVal + NumRegs - PrevRegVal); |
4562 | HasCalculatedStride = true; |
4563 | } |
4564 | |
4565 | // Register must be incremental (with a wraparound at last register). |
4566 | if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs)) |
4567 | return Error(L: Loc, Msg: "registers must have the same sequential stride" ); |
4568 | |
4569 | PrevReg = Reg; |
4570 | ++Count; |
4571 | } |
4572 | } |
4573 | |
4574 | if (parseToken(T: AsmToken::RCurly, Msg: "'}' expected" )) |
4575 | return ParseStatus::Failure; |
4576 | |
4577 | if (Count > 4) |
4578 | return Error(L: S, Msg: "invalid number of vectors" ); |
4579 | |
4580 | unsigned NumElements = 0; |
4581 | unsigned ElementWidth = 0; |
4582 | if (!Kind.empty()) { |
4583 | if (const auto &VK = parseVectorKind(Suffix: Kind, VectorKind)) |
4584 | std::tie(args&: NumElements, args&: ElementWidth) = *VK; |
4585 | } |
4586 | |
4587 | Operands.push_back(Elt: AArch64Operand::CreateVectorList( |
4588 | RegNum: FirstReg, Count, Stride, NumElements, ElementWidth, RegisterKind: VectorKind, S, |
4589 | E: getLoc(), Ctx&: getContext())); |
4590 | |
4591 | return ParseStatus::Success; |
4592 | } |
4593 | |
4594 | /// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions. |
4595 | bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) { |
4596 | auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, ExpectMatch: true); |
4597 | if (!ParseRes.isSuccess()) |
4598 | return true; |
4599 | |
4600 | return tryParseVectorIndex(Operands).isFailure(); |
4601 | } |
4602 | |
4603 | ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) { |
4604 | SMLoc StartLoc = getLoc(); |
4605 | |
4606 | MCRegister RegNum; |
4607 | ParseStatus Res = tryParseScalarRegister(RegNum); |
4608 | if (!Res.isSuccess()) |
4609 | return Res; |
4610 | |
4611 | if (!parseOptionalToken(T: AsmToken::Comma)) { |
4612 | Operands.push_back(Elt: AArch64Operand::CreateReg( |
4613 | RegNum, Kind: RegKind::Scalar, S: StartLoc, E: getLoc(), Ctx&: getContext())); |
4614 | return ParseStatus::Success; |
4615 | } |
4616 | |
4617 | parseOptionalToken(T: AsmToken::Hash); |
4618 | |
4619 | if (getTok().isNot(K: AsmToken::Integer)) |
4620 | return Error(L: getLoc(), Msg: "index must be absent or #0" ); |
4621 | |
4622 | const MCExpr *ImmVal; |
4623 | if (getParser().parseExpression(Res&: ImmVal) || !isa<MCConstantExpr>(Val: ImmVal) || |
4624 | cast<MCConstantExpr>(Val: ImmVal)->getValue() != 0) |
4625 | return Error(L: getLoc(), Msg: "index must be absent or #0" ); |
4626 | |
4627 | Operands.push_back(Elt: AArch64Operand::CreateReg( |
4628 | RegNum, Kind: RegKind::Scalar, S: StartLoc, E: getLoc(), Ctx&: getContext())); |
4629 | return ParseStatus::Success; |
4630 | } |
4631 | |
4632 | ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) { |
4633 | SMLoc StartLoc = getLoc(); |
4634 | const AsmToken &Tok = getTok(); |
4635 | std::string Name = Tok.getString().lower(); |
4636 | |
4637 | unsigned RegNum = matchRegisterNameAlias(Name, Kind: RegKind::LookupTable); |
4638 | |
4639 | if (RegNum == 0) |
4640 | return ParseStatus::NoMatch; |
4641 | |
4642 | Operands.push_back(Elt: AArch64Operand::CreateReg( |
4643 | RegNum, Kind: RegKind::LookupTable, S: StartLoc, E: getLoc(), Ctx&: getContext())); |
4644 | Lex(); // Eat register. |
4645 | |
4646 | // Check if register is followed by an index |
4647 | if (parseOptionalToken(T: AsmToken::LBrac)) { |
4648 | Operands.push_back( |
4649 | Elt: AArch64Operand::CreateToken(Str: "[" , S: getLoc(), Ctx&: getContext())); |
4650 | const MCExpr *ImmVal; |
4651 | if (getParser().parseExpression(Res&: ImmVal)) |
4652 | return ParseStatus::NoMatch; |
4653 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal); |
4654 | if (!MCE) |
4655 | return TokError(Msg: "immediate value expected for vector index" ); |
4656 | Operands.push_back(Elt: AArch64Operand::CreateImm( |
4657 | Val: MCConstantExpr::create(Value: MCE->getValue(), Ctx&: getContext()), S: StartLoc, |
4658 | E: getLoc(), Ctx&: getContext())); |
4659 | if (parseOptionalToken(T: AsmToken::Comma)) |
4660 | if (parseOptionalMulOperand(Operands)) |
4661 | return ParseStatus::Failure; |
4662 | if (parseToken(T: AsmToken::RBrac, Msg: "']' expected" )) |
4663 | return ParseStatus::Failure; |
4664 | Operands.push_back( |
4665 | Elt: AArch64Operand::CreateToken(Str: "]" , S: getLoc(), Ctx&: getContext())); |
4666 | } |
4667 | return ParseStatus::Success; |
4668 | } |
4669 | |
4670 | template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy> |
4671 | ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) { |
4672 | SMLoc StartLoc = getLoc(); |
4673 | |
4674 | MCRegister RegNum; |
4675 | ParseStatus Res = tryParseScalarRegister(RegNum); |
4676 | if (!Res.isSuccess()) |
4677 | return Res; |
4678 | |
4679 | // No shift/extend is the default. |
4680 | if (!ParseShiftExtend || getTok().isNot(K: AsmToken::Comma)) { |
4681 | Operands.push_back(Elt: AArch64Operand::CreateReg( |
4682 | RegNum, Kind: RegKind::Scalar, S: StartLoc, E: getLoc(), Ctx&: getContext(), EqTy)); |
4683 | return ParseStatus::Success; |
4684 | } |
4685 | |
4686 | // Eat the comma |
4687 | Lex(); |
4688 | |
4689 | // Match the shift |
4690 | SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd; |
4691 | Res = tryParseOptionalShiftExtend(Operands&: ExtOpnd); |
4692 | if (!Res.isSuccess()) |
4693 | return Res; |
4694 | |
4695 | auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get()); |
4696 | Operands.push_back(Elt: AArch64Operand::CreateReg( |
4697 | RegNum, Kind: RegKind::Scalar, S: StartLoc, E: Ext->getEndLoc(), Ctx&: getContext(), EqTy, |
4698 | ExtTy: Ext->getShiftExtendType(), ShiftAmount: Ext->getShiftExtendAmount(), |
4699 | HasExplicitAmount: Ext->hasShiftExtendAmount())); |
4700 | |
4701 | return ParseStatus::Success; |
4702 | } |
4703 | |
4704 | bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) { |
4705 | MCAsmParser &Parser = getParser(); |
4706 | |
4707 | // Some SVE instructions have a decoration after the immediate, i.e. |
4708 | // "mul vl". We parse them here and add tokens, which must be present in the |
4709 | // asm string in the tablegen instruction. |
4710 | bool NextIsVL = |
4711 | Parser.getLexer().peekTok().getString().equals_insensitive(RHS: "vl" ); |
4712 | bool NextIsHash = Parser.getLexer().peekTok().is(K: AsmToken::Hash); |
4713 | if (!getTok().getString().equals_insensitive(RHS: "mul" ) || |
4714 | !(NextIsVL || NextIsHash)) |
4715 | return true; |
4716 | |
4717 | Operands.push_back( |
4718 | Elt: AArch64Operand::CreateToken(Str: "mul" , S: getLoc(), Ctx&: getContext())); |
4719 | Lex(); // Eat the "mul" |
4720 | |
4721 | if (NextIsVL) { |
4722 | Operands.push_back( |
4723 | Elt: AArch64Operand::CreateToken(Str: "vl" , S: getLoc(), Ctx&: getContext())); |
4724 | Lex(); // Eat the "vl" |
4725 | return false; |
4726 | } |
4727 | |
4728 | if (NextIsHash) { |
4729 | Lex(); // Eat the # |
4730 | SMLoc S = getLoc(); |
4731 | |
4732 | // Parse immediate operand. |
4733 | const MCExpr *ImmVal; |
4734 | if (!Parser.parseExpression(Res&: ImmVal)) |
4735 | if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal)) { |
4736 | Operands.push_back(Elt: AArch64Operand::CreateImm( |
4737 | Val: MCConstantExpr::create(Value: MCE->getValue(), Ctx&: getContext()), S, E: getLoc(), |
4738 | Ctx&: getContext())); |
4739 | return false; |
4740 | } |
4741 | } |
4742 | |
4743 | return Error(L: getLoc(), Msg: "expected 'vl' or '#<imm>'" ); |
4744 | } |
4745 | |
4746 | bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands, |
4747 | StringRef &VecGroup) { |
4748 | MCAsmParser &Parser = getParser(); |
4749 | auto Tok = Parser.getTok(); |
4750 | if (Tok.isNot(K: AsmToken::Identifier)) |
4751 | return true; |
4752 | |
4753 | StringRef VG = StringSwitch<StringRef>(Tok.getString().lower()) |
4754 | .Case(S: "vgx2" , Value: "vgx2" ) |
4755 | .Case(S: "vgx4" , Value: "vgx4" ) |
4756 | .Default(Value: "" ); |
4757 | |
4758 | if (VG.empty()) |
4759 | return true; |
4760 | |
4761 | VecGroup = VG; |
4762 | Parser.Lex(); // Eat vgx[2|4] |
4763 | return false; |
4764 | } |
4765 | |
4766 | bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) { |
4767 | auto Tok = getTok(); |
4768 | if (Tok.isNot(K: AsmToken::Identifier)) |
4769 | return true; |
4770 | |
4771 | auto Keyword = Tok.getString(); |
4772 | Keyword = StringSwitch<StringRef>(Keyword.lower()) |
4773 | .Case(S: "sm" , Value: "sm" ) |
4774 | .Case(S: "za" , Value: "za" ) |
4775 | .Default(Value: Keyword); |
4776 | Operands.push_back( |
4777 | Elt: AArch64Operand::CreateToken(Str: Keyword, S: Tok.getLoc(), Ctx&: getContext())); |
4778 | |
4779 | Lex(); |
4780 | return false; |
4781 | } |
4782 | |
4783 | /// parseOperand - Parse a arm instruction operand. For now this parses the |
4784 | /// operand regardless of the mnemonic. |
4785 | bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode, |
4786 | bool invertCondCode) { |
4787 | MCAsmParser &Parser = getParser(); |
4788 | |
4789 | ParseStatus ResTy = |
4790 | MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true); |
4791 | |
4792 | // Check if the current operand has a custom associated parser, if so, try to |
4793 | // custom parse the operand, or fallback to the general approach. |
4794 | if (ResTy.isSuccess()) |
4795 | return false; |
4796 | // If there wasn't a custom match, try the generic matcher below. Otherwise, |
4797 | // there was a match, but an error occurred, in which case, just return that |
4798 | // the operand parsing failed. |
4799 | if (ResTy.isFailure()) |
4800 | return true; |
4801 | |
4802 | // Nothing custom, so do general case parsing. |
4803 | SMLoc S, E; |
4804 | auto parseOptionalShiftExtend = [&](AsmToken SavedTok) { |
4805 | if (parseOptionalToken(T: AsmToken::Comma)) { |
4806 | ParseStatus Res = tryParseOptionalShiftExtend(Operands); |
4807 | if (!Res.isNoMatch()) |
4808 | return Res.isFailure(); |
4809 | getLexer().UnLex(Token: SavedTok); |
4810 | } |
4811 | return false; |
4812 | }; |
4813 | switch (getLexer().getKind()) { |
4814 | default: { |
4815 | SMLoc S = getLoc(); |
4816 | const MCExpr *Expr; |
4817 | if (parseSymbolicImmVal(ImmVal&: Expr)) |
4818 | return Error(L: S, Msg: "invalid operand" ); |
4819 | |
4820 | SMLoc E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1); |
4821 | Operands.push_back(Elt: AArch64Operand::CreateImm(Val: Expr, S, E, Ctx&: getContext())); |
4822 | return parseOptionalShiftExtend(getTok()); |
4823 | } |
4824 | case AsmToken::LBrac: { |
4825 | Operands.push_back( |
4826 | Elt: AArch64Operand::CreateToken(Str: "[" , S: getLoc(), Ctx&: getContext())); |
4827 | Lex(); // Eat '[' |
4828 | |
4829 | // There's no comma after a '[', so we can parse the next operand |
4830 | // immediately. |
4831 | return parseOperand(Operands, isCondCode: false, invertCondCode: false); |
4832 | } |
4833 | case AsmToken::LCurly: { |
4834 | if (!parseNeonVectorList(Operands)) |
4835 | return false; |
4836 | |
4837 | Operands.push_back( |
4838 | Elt: AArch64Operand::CreateToken(Str: "{" , S: getLoc(), Ctx&: getContext())); |
4839 | Lex(); // Eat '{' |
4840 | |
4841 | // There's no comma after a '{', so we can parse the next operand |
4842 | // immediately. |
4843 | return parseOperand(Operands, isCondCode: false, invertCondCode: false); |
4844 | } |
4845 | case AsmToken::Identifier: { |
4846 | // See if this is a "VG" decoration used by SME instructions. |
4847 | StringRef VecGroup; |
4848 | if (!parseOptionalVGOperand(Operands, VecGroup)) { |
4849 | Operands.push_back( |
4850 | Elt: AArch64Operand::CreateToken(Str: VecGroup, S: getLoc(), Ctx&: getContext())); |
4851 | return false; |
4852 | } |
4853 | // If we're expecting a Condition Code operand, then just parse that. |
4854 | if (isCondCode) |
4855 | return parseCondCode(Operands, invertCondCode); |
4856 | |
4857 | // If it's a register name, parse it. |
4858 | if (!parseRegister(Operands)) { |
4859 | // Parse an optional shift/extend modifier. |
4860 | AsmToken SavedTok = getTok(); |
4861 | if (parseOptionalToken(T: AsmToken::Comma)) { |
4862 | // The operand after the register may be a label (e.g. ADR/ADRP). Check |
4863 | // such cases and don't report an error when <label> happens to match a |
4864 | // shift/extend modifier. |
4865 | ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic, |
4866 | /*ParseForAllFeatures=*/true); |
4867 | if (!Res.isNoMatch()) |
4868 | return Res.isFailure(); |
4869 | Res = tryParseOptionalShiftExtend(Operands); |
4870 | if (!Res.isNoMatch()) |
4871 | return Res.isFailure(); |
4872 | getLexer().UnLex(Token: SavedTok); |
4873 | } |
4874 | return false; |
4875 | } |
4876 | |
4877 | // See if this is a "mul vl" decoration or "mul #<int>" operand used |
4878 | // by SVE instructions. |
4879 | if (!parseOptionalMulOperand(Operands)) |
4880 | return false; |
4881 | |
4882 | // If this is a two-word mnemonic, parse its special keyword |
4883 | // operand as an identifier. |
4884 | if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" || |
4885 | Mnemonic == "gcsb" ) |
4886 | return parseKeywordOperand(Operands); |
4887 | |
4888 | // This was not a register so parse other operands that start with an |
4889 | // identifier (like labels) as expressions and create them as immediates. |
4890 | const MCExpr *IdVal; |
4891 | S = getLoc(); |
4892 | if (getParser().parseExpression(Res&: IdVal)) |
4893 | return true; |
4894 | E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1); |
4895 | Operands.push_back(Elt: AArch64Operand::CreateImm(Val: IdVal, S, E, Ctx&: getContext())); |
4896 | return false; |
4897 | } |
4898 | case AsmToken::Integer: |
4899 | case AsmToken::Real: |
4900 | case AsmToken::Hash: { |
4901 | // #42 -> immediate. |
4902 | S = getLoc(); |
4903 | |
4904 | parseOptionalToken(T: AsmToken::Hash); |
4905 | |
4906 | // Parse a negative sign |
4907 | bool isNegative = false; |
4908 | if (getTok().is(K: AsmToken::Minus)) { |
4909 | isNegative = true; |
4910 | // We need to consume this token only when we have a Real, otherwise |
4911 | // we let parseSymbolicImmVal take care of it |
4912 | if (Parser.getLexer().peekTok().is(K: AsmToken::Real)) |
4913 | Lex(); |
4914 | } |
4915 | |
4916 | // The only Real that should come through here is a literal #0.0 for |
4917 | // the fcmp[e] r, #0.0 instructions. They expect raw token operands, |
4918 | // so convert the value. |
4919 | const AsmToken &Tok = getTok(); |
4920 | if (Tok.is(K: AsmToken::Real)) { |
4921 | APFloat RealVal(APFloat::IEEEdouble(), Tok.getString()); |
4922 | uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); |
4923 | if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" && |
4924 | Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" && |
4925 | Mnemonic != "fcmlt" && Mnemonic != "fcmne" ) |
4926 | return TokError(Msg: "unexpected floating point literal" ); |
4927 | else if (IntVal != 0 || isNegative) |
4928 | return TokError(Msg: "expected floating-point constant #0.0" ); |
4929 | Lex(); // Eat the token. |
4930 | |
4931 | Operands.push_back(Elt: AArch64Operand::CreateToken(Str: "#0" , S, Ctx&: getContext())); |
4932 | Operands.push_back(Elt: AArch64Operand::CreateToken(Str: ".0" , S, Ctx&: getContext())); |
4933 | return false; |
4934 | } |
4935 | |
4936 | const MCExpr *ImmVal; |
4937 | if (parseSymbolicImmVal(ImmVal)) |
4938 | return true; |
4939 | |
4940 | E = SMLoc::getFromPointer(Ptr: getLoc().getPointer() - 1); |
4941 | Operands.push_back(Elt: AArch64Operand::CreateImm(Val: ImmVal, S, E, Ctx&: getContext())); |
4942 | |
4943 | // Parse an optional shift/extend modifier. |
4944 | return parseOptionalShiftExtend(Tok); |
4945 | } |
4946 | case AsmToken::Equal: { |
4947 | SMLoc Loc = getLoc(); |
4948 | if (Mnemonic != "ldr" ) // only parse for ldr pseudo (e.g. ldr r0, =val) |
4949 | return TokError(Msg: "unexpected token in operand" ); |
4950 | Lex(); // Eat '=' |
4951 | const MCExpr *SubExprVal; |
4952 | if (getParser().parseExpression(Res&: SubExprVal)) |
4953 | return true; |
4954 | |
4955 | if (Operands.size() < 2 || |
4956 | !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg()) |
4957 | return Error(L: Loc, Msg: "Only valid when first operand is register" ); |
4958 | |
4959 | bool IsXReg = |
4960 | AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( |
4961 | Reg: Operands[1]->getReg()); |
4962 | |
4963 | MCContext& Ctx = getContext(); |
4964 | E = SMLoc::getFromPointer(Ptr: Loc.getPointer() - 1); |
4965 | // If the op is an imm and can be fit into a mov, then replace ldr with mov. |
4966 | if (isa<MCConstantExpr>(Val: SubExprVal)) { |
4967 | uint64_t Imm = (cast<MCConstantExpr>(Val: SubExprVal))->getValue(); |
4968 | uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16; |
4969 | while (Imm > 0xFFFF && llvm::countr_zero(Val: Imm) >= 16) { |
4970 | ShiftAmt += 16; |
4971 | Imm >>= 16; |
4972 | } |
4973 | if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) { |
4974 | Operands[0] = AArch64Operand::CreateToken(Str: "movz" , S: Loc, Ctx); |
4975 | Operands.push_back(Elt: AArch64Operand::CreateImm( |
4976 | Val: MCConstantExpr::create(Value: Imm, Ctx), S, E, Ctx)); |
4977 | if (ShiftAmt) |
4978 | Operands.push_back(Elt: AArch64Operand::CreateShiftExtend(ShOp: AArch64_AM::LSL, |
4979 | Val: ShiftAmt, HasExplicitAmount: true, S, E, Ctx)); |
4980 | return false; |
4981 | } |
4982 | APInt Simm = APInt(64, Imm << ShiftAmt); |
4983 | // check if the immediate is an unsigned or signed 32-bit int for W regs |
4984 | if (!IsXReg && !(Simm.isIntN(N: 32) || Simm.isSignedIntN(N: 32))) |
4985 | return Error(L: Loc, Msg: "Immediate too large for register" ); |
4986 | } |
4987 | // If it is a label or an imm that cannot fit in a movz, put it into CP. |
4988 | const MCExpr *CPLoc = |
4989 | getTargetStreamer().addConstantPoolEntry(SubExprVal, Size: IsXReg ? 8 : 4, Loc); |
4990 | Operands.push_back(Elt: AArch64Operand::CreateImm(Val: CPLoc, S, E, Ctx)); |
4991 | return false; |
4992 | } |
4993 | } |
4994 | } |
4995 | |
4996 | bool AArch64AsmParser::parseImmExpr(int64_t &Out) { |
4997 | const MCExpr *Expr = nullptr; |
4998 | SMLoc L = getLoc(); |
4999 | if (check(P: getParser().parseExpression(Res&: Expr), Loc: L, Msg: "expected expression" )) |
5000 | return true; |
5001 | const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Val: Expr); |
5002 | if (check(P: !Value, Loc: L, Msg: "expected constant expression" )) |
5003 | return true; |
5004 | Out = Value->getValue(); |
5005 | return false; |
5006 | } |
5007 | |
5008 | bool AArch64AsmParser::parseComma() { |
5009 | if (check(P: getTok().isNot(K: AsmToken::Comma), Loc: getLoc(), Msg: "expected comma" )) |
5010 | return true; |
5011 | // Eat the comma |
5012 | Lex(); |
5013 | return false; |
5014 | } |
5015 | |
5016 | bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base, |
5017 | unsigned First, unsigned Last) { |
5018 | MCRegister Reg; |
5019 | SMLoc Start, End; |
5020 | if (check(P: parseRegister(Reg, StartLoc&: Start, EndLoc&: End), Loc: getLoc(), Msg: "expected register" )) |
5021 | return true; |
5022 | |
5023 | // Special handling for FP and LR; they aren't linearly after x28 in |
5024 | // the registers enum. |
5025 | unsigned RangeEnd = Last; |
5026 | if (Base == AArch64::X0) { |
5027 | if (Last == AArch64::FP) { |
5028 | RangeEnd = AArch64::X28; |
5029 | if (Reg == AArch64::FP) { |
5030 | Out = 29; |
5031 | return false; |
5032 | } |
5033 | } |
5034 | if (Last == AArch64::LR) { |
5035 | RangeEnd = AArch64::X28; |
5036 | if (Reg == AArch64::FP) { |
5037 | Out = 29; |
5038 | return false; |
5039 | } else if (Reg == AArch64::LR) { |
5040 | Out = 30; |
5041 | return false; |
5042 | } |
5043 | } |
5044 | } |
5045 | |
5046 | if (check(P: Reg < First || Reg > RangeEnd, Loc: Start, |
5047 | Msg: Twine("expected register in range " ) + |
5048 | AArch64InstPrinter::getRegisterName(Reg: First) + " to " + |
5049 | AArch64InstPrinter::getRegisterName(Reg: Last))) |
5050 | return true; |
5051 | Out = Reg - Base; |
5052 | return false; |
5053 | } |
5054 | |
5055 | bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1, |
5056 | const MCParsedAsmOperand &Op2) const { |
5057 | auto &AOp1 = static_cast<const AArch64Operand&>(Op1); |
5058 | auto &AOp2 = static_cast<const AArch64Operand&>(Op2); |
5059 | |
5060 | if (AOp1.isVectorList() && AOp2.isVectorList()) |
5061 | return AOp1.getVectorListCount() == AOp2.getVectorListCount() && |
5062 | AOp1.getVectorListStart() == AOp2.getVectorListStart() && |
5063 | AOp1.getVectorListStride() == AOp2.getVectorListStride(); |
5064 | |
5065 | if (!AOp1.isReg() || !AOp2.isReg()) |
5066 | return false; |
5067 | |
5068 | if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg && |
5069 | AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg) |
5070 | return MCTargetAsmParser::areEqualRegs(Op1, Op2); |
5071 | |
5072 | assert(AOp1.isScalarReg() && AOp2.isScalarReg() && |
5073 | "Testing equality of non-scalar registers not supported" ); |
5074 | |
5075 | // Check if a registers match their sub/super register classes. |
5076 | if (AOp1.getRegEqualityTy() == EqualsSuperReg) |
5077 | return getXRegFromWReg(Reg: Op1.getReg()) == Op2.getReg(); |
5078 | if (AOp1.getRegEqualityTy() == EqualsSubReg) |
5079 | return getWRegFromXReg(Reg: Op1.getReg()) == Op2.getReg(); |
5080 | if (AOp2.getRegEqualityTy() == EqualsSuperReg) |
5081 | return getXRegFromWReg(Reg: Op2.getReg()) == Op1.getReg(); |
5082 | if (AOp2.getRegEqualityTy() == EqualsSubReg) |
5083 | return getWRegFromXReg(Reg: Op2.getReg()) == Op1.getReg(); |
5084 | |
5085 | return false; |
5086 | } |
5087 | |
5088 | /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its |
5089 | /// operands. |
5090 | bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info, |
5091 | StringRef Name, SMLoc NameLoc, |
5092 | OperandVector &Operands) { |
5093 | Name = StringSwitch<StringRef>(Name.lower()) |
5094 | .Case(S: "beq" , Value: "b.eq" ) |
5095 | .Case(S: "bne" , Value: "b.ne" ) |
5096 | .Case(S: "bhs" , Value: "b.hs" ) |
5097 | .Case(S: "bcs" , Value: "b.cs" ) |
5098 | .Case(S: "blo" , Value: "b.lo" ) |
5099 | .Case(S: "bcc" , Value: "b.cc" ) |
5100 | .Case(S: "bmi" , Value: "b.mi" ) |
5101 | .Case(S: "bpl" , Value: "b.pl" ) |
5102 | .Case(S: "bvs" , Value: "b.vs" ) |
5103 | .Case(S: "bvc" , Value: "b.vc" ) |
5104 | .Case(S: "bhi" , Value: "b.hi" ) |
5105 | .Case(S: "bls" , Value: "b.ls" ) |
5106 | .Case(S: "bge" , Value: "b.ge" ) |
5107 | .Case(S: "blt" , Value: "b.lt" ) |
5108 | .Case(S: "bgt" , Value: "b.gt" ) |
5109 | .Case(S: "ble" , Value: "b.le" ) |
5110 | .Case(S: "bal" , Value: "b.al" ) |
5111 | .Case(S: "bnv" , Value: "b.nv" ) |
5112 | .Default(Value: Name); |
5113 | |
5114 | // First check for the AArch64-specific .req directive. |
5115 | if (getTok().is(K: AsmToken::Identifier) && |
5116 | getTok().getIdentifier().lower() == ".req" ) { |
5117 | parseDirectiveReq(Name, L: NameLoc); |
5118 | // We always return 'error' for this, as we're done with this |
5119 | // statement and don't need to match the 'instruction." |
5120 | return true; |
5121 | } |
5122 | |
5123 | // Create the leading tokens for the mnemonic, split by '.' characters. |
5124 | size_t Start = 0, Next = Name.find(C: '.'); |
5125 | StringRef Head = Name.slice(Start, End: Next); |
5126 | |
5127 | // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for |
5128 | // the SYS instruction. |
5129 | if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" || |
5130 | Head == "cfp" || Head == "dvp" || Head == "cpp" || Head == "cosp" ) |
5131 | return parseSysAlias(Name: Head, NameLoc, Operands); |
5132 | |
5133 | // TLBIP instructions are aliases for the SYSP instruction. |
5134 | if (Head == "tlbip" ) |
5135 | return parseSyspAlias(Name: Head, NameLoc, Operands); |
5136 | |
5137 | Operands.push_back(Elt: AArch64Operand::CreateToken(Str: Head, S: NameLoc, Ctx&: getContext())); |
5138 | Mnemonic = Head; |
5139 | |
5140 | // Handle condition codes for a branch mnemonic |
5141 | if ((Head == "b" || Head == "bc" ) && Next != StringRef::npos) { |
5142 | Start = Next; |
5143 | Next = Name.find(C: '.', From: Start + 1); |
5144 | Head = Name.slice(Start: Start + 1, End: Next); |
5145 | |
5146 | SMLoc SuffixLoc = SMLoc::getFromPointer(Ptr: NameLoc.getPointer() + |
5147 | (Head.data() - Name.data())); |
5148 | std::string Suggestion; |
5149 | AArch64CC::CondCode CC = parseCondCodeString(Cond: Head, Suggestion); |
5150 | if (CC == AArch64CC::Invalid) { |
5151 | std::string Msg = "invalid condition code" ; |
5152 | if (!Suggestion.empty()) |
5153 | Msg += ", did you mean " + Suggestion + "?" ; |
5154 | return Error(L: SuffixLoc, Msg); |
5155 | } |
5156 | Operands.push_back(Elt: AArch64Operand::CreateToken(Str: "." , S: SuffixLoc, Ctx&: getContext(), |
5157 | /*IsSuffix=*/true)); |
5158 | Operands.push_back( |
5159 | Elt: AArch64Operand::CreateCondCode(Code: CC, S: NameLoc, E: NameLoc, Ctx&: getContext())); |
5160 | } |
5161 | |
5162 | // Add the remaining tokens in the mnemonic. |
5163 | while (Next != StringRef::npos) { |
5164 | Start = Next; |
5165 | Next = Name.find(C: '.', From: Start + 1); |
5166 | Head = Name.slice(Start, End: Next); |
5167 | SMLoc SuffixLoc = SMLoc::getFromPointer(Ptr: NameLoc.getPointer() + |
5168 | (Head.data() - Name.data()) + 1); |
5169 | Operands.push_back(Elt: AArch64Operand::CreateToken( |
5170 | Str: Head, S: SuffixLoc, Ctx&: getContext(), /*IsSuffix=*/true)); |
5171 | } |
5172 | |
5173 | // Conditional compare instructions have a Condition Code operand, which needs |
5174 | // to be parsed and an immediate operand created. |
5175 | bool condCodeFourthOperand = |
5176 | (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" || |
5177 | Head == "fccmpe" || Head == "fcsel" || Head == "csel" || |
5178 | Head == "csinc" || Head == "csinv" || Head == "csneg" ); |
5179 | |
5180 | // These instructions are aliases to some of the conditional select |
5181 | // instructions. However, the condition code is inverted in the aliased |
5182 | // instruction. |
5183 | // |
5184 | // FIXME: Is this the correct way to handle these? Or should the parser |
5185 | // generate the aliased instructions directly? |
5186 | bool condCodeSecondOperand = (Head == "cset" || Head == "csetm" ); |
5187 | bool condCodeThirdOperand = |
5188 | (Head == "cinc" || Head == "cinv" || Head == "cneg" ); |
5189 | |
5190 | // Read the remaining operands. |
5191 | if (getLexer().isNot(K: AsmToken::EndOfStatement)) { |
5192 | |
5193 | unsigned N = 1; |
5194 | do { |
5195 | // Parse and remember the operand. |
5196 | if (parseOperand(Operands, isCondCode: (N == 4 && condCodeFourthOperand) || |
5197 | (N == 3 && condCodeThirdOperand) || |
5198 | (N == 2 && condCodeSecondOperand), |
5199 | invertCondCode: condCodeSecondOperand || condCodeThirdOperand)) { |
5200 | return true; |
5201 | } |
5202 | |
5203 | // After successfully parsing some operands there are three special cases |
5204 | // to consider (i.e. notional operands not separated by commas). Two are |
5205 | // due to memory specifiers: |
5206 | // + An RBrac will end an address for load/store/prefetch |
5207 | // + An '!' will indicate a pre-indexed operation. |
5208 | // |
5209 | // And a further case is '}', which ends a group of tokens specifying the |
5210 | // SME accumulator array 'ZA' or tile vector, i.e. |
5211 | // |
5212 | // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }' |
5213 | // |
5214 | // It's someone else's responsibility to make sure these tokens are sane |
5215 | // in the given context! |
5216 | |
5217 | if (parseOptionalToken(T: AsmToken::RBrac)) |
5218 | Operands.push_back( |
5219 | Elt: AArch64Operand::CreateToken(Str: "]" , S: getLoc(), Ctx&: getContext())); |
5220 | if (parseOptionalToken(T: AsmToken::Exclaim)) |
5221 | Operands.push_back( |
5222 | Elt: AArch64Operand::CreateToken(Str: "!" , S: getLoc(), Ctx&: getContext())); |
5223 | if (parseOptionalToken(T: AsmToken::RCurly)) |
5224 | Operands.push_back( |
5225 | Elt: AArch64Operand::CreateToken(Str: "}" , S: getLoc(), Ctx&: getContext())); |
5226 | |
5227 | ++N; |
5228 | } while (parseOptionalToken(T: AsmToken::Comma)); |
5229 | } |
5230 | |
5231 | if (parseToken(T: AsmToken::EndOfStatement, Msg: "unexpected token in argument list" )) |
5232 | return true; |
5233 | |
5234 | return false; |
5235 | } |
5236 | |
5237 | static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) { |
5238 | assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31)); |
5239 | return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) || |
5240 | (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) || |
5241 | (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) || |
5242 | (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) || |
5243 | (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) || |
5244 | (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0)); |
5245 | } |
5246 | |
5247 | // FIXME: This entire function is a giant hack to provide us with decent |
5248 | // operand range validation/diagnostics until TableGen/MC can be extended |
5249 | // to support autogeneration of this kind of validation. |
5250 | bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc, |
5251 | SmallVectorImpl<SMLoc> &Loc) { |
5252 | const MCRegisterInfo *RI = getContext().getRegisterInfo(); |
5253 | const MCInstrDesc &MCID = MII.get(Opcode: Inst.getOpcode()); |
5254 | |
5255 | // A prefix only applies to the instruction following it. Here we extract |
5256 | // prefix information for the next instruction before validating the current |
5257 | // one so that in the case of failure we don't erronously continue using the |
5258 | // current prefix. |
5259 | PrefixInfo Prefix = NextPrefix; |
5260 | NextPrefix = PrefixInfo::CreateFromInst(Inst, TSFlags: MCID.TSFlags); |
5261 | |
5262 | // Before validating the instruction in isolation we run through the rules |
5263 | // applicable when it follows a prefix instruction. |
5264 | // NOTE: brk & hlt can be prefixed but require no additional validation. |
5265 | if (Prefix.isActive() && |
5266 | (Inst.getOpcode() != AArch64::BRK) && |
5267 | (Inst.getOpcode() != AArch64::HLT)) { |
5268 | |
5269 | // Prefixed intructions must have a destructive operand. |
5270 | if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) == |
5271 | AArch64::NotDestructive) |
5272 | return Error(L: IDLoc, Msg: "instruction is unpredictable when following a" |
5273 | " movprfx, suggest replacing movprfx with mov" ); |
5274 | |
5275 | // Destination operands must match. |
5276 | if (Inst.getOperand(i: 0).getReg() != Prefix.getDstReg()) |
5277 | return Error(L: Loc[0], Msg: "instruction is unpredictable when following a" |
5278 | " movprfx writing to a different destination" ); |
5279 | |
5280 | // Destination operand must not be used in any other location. |
5281 | for (unsigned i = 1; i < Inst.getNumOperands(); ++i) { |
5282 | if (Inst.getOperand(i).isReg() && |
5283 | (MCID.getOperandConstraint(OpNum: i, Constraint: MCOI::TIED_TO) == -1) && |
5284 | isMatchingOrAlias(ZReg: Prefix.getDstReg(), Reg: Inst.getOperand(i).getReg())) |
5285 | return Error(L: Loc[0], Msg: "instruction is unpredictable when following a" |
5286 | " movprfx and destination also used as non-destructive" |
5287 | " source" ); |
5288 | } |
5289 | |
5290 | auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID]; |
5291 | if (Prefix.isPredicated()) { |
5292 | int PgIdx = -1; |
5293 | |
5294 | // Find the instructions general predicate. |
5295 | for (unsigned i = 1; i < Inst.getNumOperands(); ++i) |
5296 | if (Inst.getOperand(i).isReg() && |
5297 | PPRRegClass.contains(Reg: Inst.getOperand(i).getReg())) { |
5298 | PgIdx = i; |
5299 | break; |
5300 | } |
5301 | |
5302 | // Instruction must be predicated if the movprfx is predicated. |
5303 | if (PgIdx == -1 || |
5304 | (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone) |
5305 | return Error(L: IDLoc, Msg: "instruction is unpredictable when following a" |
5306 | " predicated movprfx, suggest using unpredicated movprfx" ); |
5307 | |
5308 | // Instruction must use same general predicate as the movprfx. |
5309 | if (Inst.getOperand(i: PgIdx).getReg() != Prefix.getPgReg()) |
5310 | return Error(L: IDLoc, Msg: "instruction is unpredictable when following a" |
5311 | " predicated movprfx using a different general predicate" ); |
5312 | |
5313 | // Instruction element type must match the movprfx. |
5314 | if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize()) |
5315 | return Error(L: IDLoc, Msg: "instruction is unpredictable when following a" |
5316 | " predicated movprfx with a different element size" ); |
5317 | } |
5318 | } |
5319 | |
5320 | // On ARM64EC, only valid registers may be used. Warn against using |
5321 | // explicitly disallowed registers. |
5322 | if (IsWindowsArm64EC) { |
5323 | for (unsigned i = 0; i < Inst.getNumOperands(); ++i) { |
5324 | if (Inst.getOperand(i).isReg()) { |
5325 | unsigned Reg = Inst.getOperand(i).getReg(); |
5326 | // At this point, vector registers are matched to their |
5327 | // appropriately sized alias. |
5328 | if ((Reg == AArch64::W13 || Reg == AArch64::X13) || |
5329 | (Reg == AArch64::W14 || Reg == AArch64::X14) || |
5330 | (Reg == AArch64::W23 || Reg == AArch64::X23) || |
5331 | (Reg == AArch64::W24 || Reg == AArch64::X24) || |
5332 | (Reg == AArch64::W28 || Reg == AArch64::X28) || |
5333 | (Reg >= AArch64::Q16 && Reg <= AArch64::Q31) || |
5334 | (Reg >= AArch64::D16 && Reg <= AArch64::D31) || |
5335 | (Reg >= AArch64::S16 && Reg <= AArch64::S31) || |
5336 | (Reg >= AArch64::H16 && Reg <= AArch64::H31) || |
5337 | (Reg >= AArch64::B16 && Reg <= AArch64::B31)) { |
5338 | Warning(L: IDLoc, Msg: "register " + Twine(RI->getName(RegNo: Reg)) + |
5339 | " is disallowed on ARM64EC." ); |
5340 | } |
5341 | } |
5342 | } |
5343 | } |
5344 | |
5345 | // Check for indexed addressing modes w/ the base register being the |
5346 | // same as a destination/source register or pair load where |
5347 | // the Rt == Rt2. All of those are undefined behaviour. |
5348 | switch (Inst.getOpcode()) { |
5349 | case AArch64::LDPSWpre: |
5350 | case AArch64::LDPWpost: |
5351 | case AArch64::LDPWpre: |
5352 | case AArch64::LDPXpost: |
5353 | case AArch64::LDPXpre: { |
5354 | unsigned Rt = Inst.getOperand(i: 1).getReg(); |
5355 | unsigned Rt2 = Inst.getOperand(i: 2).getReg(); |
5356 | unsigned Rn = Inst.getOperand(i: 3).getReg(); |
5357 | if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt)) |
5358 | return Error(L: Loc[0], Msg: "unpredictable LDP instruction, writeback base " |
5359 | "is also a destination" ); |
5360 | if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt2)) |
5361 | return Error(L: Loc[1], Msg: "unpredictable LDP instruction, writeback base " |
5362 | "is also a destination" ); |
5363 | [[fallthrough]]; |
5364 | } |
5365 | case AArch64::LDR_ZA: |
5366 | case AArch64::STR_ZA: { |
5367 | if (Inst.getOperand(i: 2).isImm() && Inst.getOperand(i: 4).isImm() && |
5368 | Inst.getOperand(i: 2).getImm() != Inst.getOperand(i: 4).getImm()) |
5369 | return Error(L: Loc[1], |
5370 | Msg: "unpredictable instruction, immediate and offset mismatch." ); |
5371 | break; |
5372 | } |
5373 | case AArch64::LDPDi: |
5374 | case AArch64::LDPQi: |
5375 | case AArch64::LDPSi: |
5376 | case AArch64::LDPSWi: |
5377 | case AArch64::LDPWi: |
5378 | case AArch64::LDPXi: { |
5379 | unsigned Rt = Inst.getOperand(i: 0).getReg(); |
5380 | unsigned Rt2 = Inst.getOperand(i: 1).getReg(); |
5381 | if (Rt == Rt2) |
5382 | return Error(L: Loc[1], Msg: "unpredictable LDP instruction, Rt2==Rt" ); |
5383 | break; |
5384 | } |
5385 | case AArch64::LDPDpost: |
5386 | case AArch64::LDPDpre: |
5387 | case AArch64::LDPQpost: |
5388 | case AArch64::LDPQpre: |
5389 | case AArch64::LDPSpost: |
5390 | case AArch64::LDPSpre: |
5391 | case AArch64::LDPSWpost: { |
5392 | unsigned Rt = Inst.getOperand(i: 1).getReg(); |
5393 | unsigned Rt2 = Inst.getOperand(i: 2).getReg(); |
5394 | if (Rt == Rt2) |
5395 | return Error(L: Loc[1], Msg: "unpredictable LDP instruction, Rt2==Rt" ); |
5396 | break; |
5397 | } |
5398 | case AArch64::STPDpost: |
5399 | case AArch64::STPDpre: |
5400 | case AArch64::STPQpost: |
5401 | case AArch64::STPQpre: |
5402 | case AArch64::STPSpost: |
5403 | case AArch64::STPSpre: |
5404 | case AArch64::STPWpost: |
5405 | case AArch64::STPWpre: |
5406 | case AArch64::STPXpost: |
5407 | case AArch64::STPXpre: { |
5408 | unsigned Rt = Inst.getOperand(i: 1).getReg(); |
5409 | unsigned Rt2 = Inst.getOperand(i: 2).getReg(); |
5410 | unsigned Rn = Inst.getOperand(i: 3).getReg(); |
5411 | if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt)) |
5412 | return Error(L: Loc[0], Msg: "unpredictable STP instruction, writeback base " |
5413 | "is also a source" ); |
5414 | if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt2)) |
5415 | return Error(L: Loc[1], Msg: "unpredictable STP instruction, writeback base " |
5416 | "is also a source" ); |
5417 | break; |
5418 | } |
5419 | case AArch64::LDRBBpre: |
5420 | case AArch64::LDRBpre: |
5421 | case AArch64::LDRHHpre: |
5422 | case AArch64::LDRHpre: |
5423 | case AArch64::LDRSBWpre: |
5424 | case AArch64::LDRSBXpre: |
5425 | case AArch64::LDRSHWpre: |
5426 | case AArch64::LDRSHXpre: |
5427 | case AArch64::LDRSWpre: |
5428 | case AArch64::LDRWpre: |
5429 | case AArch64::LDRXpre: |
5430 | case AArch64::LDRBBpost: |
5431 | case AArch64::LDRBpost: |
5432 | case AArch64::LDRHHpost: |
5433 | case AArch64::LDRHpost: |
5434 | case AArch64::LDRSBWpost: |
5435 | case AArch64::LDRSBXpost: |
5436 | case AArch64::LDRSHWpost: |
5437 | case AArch64::LDRSHXpost: |
5438 | case AArch64::LDRSWpost: |
5439 | case AArch64::LDRWpost: |
5440 | case AArch64::LDRXpost: { |
5441 | unsigned Rt = Inst.getOperand(i: 1).getReg(); |
5442 | unsigned Rn = Inst.getOperand(i: 2).getReg(); |
5443 | if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt)) |
5444 | return Error(L: Loc[0], Msg: "unpredictable LDR instruction, writeback base " |
5445 | "is also a source" ); |
5446 | break; |
5447 | } |
5448 | case AArch64::STRBBpost: |
5449 | case AArch64::STRBpost: |
5450 | case AArch64::STRHHpost: |
5451 | case AArch64::STRHpost: |
5452 | case AArch64::STRWpost: |
5453 | case AArch64::STRXpost: |
5454 | case AArch64::STRBBpre: |
5455 | case AArch64::STRBpre: |
5456 | case AArch64::STRHHpre: |
5457 | case AArch64::STRHpre: |
5458 | case AArch64::STRWpre: |
5459 | case AArch64::STRXpre: { |
5460 | unsigned Rt = Inst.getOperand(i: 1).getReg(); |
5461 | unsigned Rn = Inst.getOperand(i: 2).getReg(); |
5462 | if (RI->isSubRegisterEq(RegA: Rn, RegB: Rt)) |
5463 | return Error(L: Loc[0], Msg: "unpredictable STR instruction, writeback base " |
5464 | "is also a source" ); |
5465 | break; |
5466 | } |
5467 | case AArch64::STXRB: |
5468 | case AArch64::STXRH: |
5469 | case AArch64::STXRW: |
5470 | case AArch64::STXRX: |
5471 | case AArch64::STLXRB: |
5472 | case AArch64::STLXRH: |
5473 | case AArch64::STLXRW: |
5474 | case AArch64::STLXRX: { |
5475 | unsigned Rs = Inst.getOperand(i: 0).getReg(); |
5476 | unsigned Rt = Inst.getOperand(i: 1).getReg(); |
5477 | unsigned Rn = Inst.getOperand(i: 2).getReg(); |
5478 | if (RI->isSubRegisterEq(RegA: Rt, RegB: Rs) || |
5479 | (RI->isSubRegisterEq(RegA: Rn, RegB: Rs) && Rn != AArch64::SP)) |
5480 | return Error(L: Loc[0], |
5481 | Msg: "unpredictable STXR instruction, status is also a source" ); |
5482 | break; |
5483 | } |
5484 | case AArch64::STXPW: |
5485 | case AArch64::STXPX: |
5486 | case AArch64::STLXPW: |
5487 | case AArch64::STLXPX: { |
5488 | unsigned Rs = Inst.getOperand(i: 0).getReg(); |
5489 | unsigned Rt1 = Inst.getOperand(i: 1).getReg(); |
5490 | unsigned Rt2 = Inst.getOperand(i: 2).getReg(); |
5491 | unsigned Rn = Inst.getOperand(i: 3).getReg(); |
5492 | if (RI->isSubRegisterEq(RegA: Rt1, RegB: Rs) || RI->isSubRegisterEq(RegA: Rt2, RegB: Rs) || |
5493 | (RI->isSubRegisterEq(RegA: Rn, RegB: Rs) && Rn != AArch64::SP)) |
5494 | return Error(L: Loc[0], |
5495 | Msg: "unpredictable STXP instruction, status is also a source" ); |
5496 | break; |
5497 | } |
5498 | case AArch64::LDRABwriteback: |
5499 | case AArch64::LDRAAwriteback: { |
5500 | unsigned Xt = Inst.getOperand(i: 0).getReg(); |
5501 | unsigned Xn = Inst.getOperand(i: 1).getReg(); |
5502 | if (Xt == Xn) |
5503 | return Error(L: Loc[0], |
5504 | Msg: "unpredictable LDRA instruction, writeback base" |
5505 | " is also a destination" ); |
5506 | break; |
5507 | } |
5508 | } |
5509 | |
5510 | // Check v8.8-A memops instructions. |
5511 | switch (Inst.getOpcode()) { |
5512 | case AArch64::CPYFP: |
5513 | case AArch64::CPYFPWN: |
5514 | case AArch64::CPYFPRN: |
5515 | case AArch64::CPYFPN: |
5516 | case AArch64::CPYFPWT: |
5517 | case AArch64::CPYFPWTWN: |
5518 | case AArch64::CPYFPWTRN: |
5519 | case AArch64::CPYFPWTN: |
5520 | case AArch64::CPYFPRT: |
5521 | case AArch64::CPYFPRTWN: |
5522 | case AArch64::CPYFPRTRN: |
5523 | case AArch64::CPYFPRTN: |
5524 | case AArch64::CPYFPT: |
5525 | case AArch64::CPYFPTWN: |
5526 | case AArch64::CPYFPTRN: |
5527 | case AArch64::CPYFPTN: |
5528 | case AArch64::CPYFM: |
5529 | case AArch64::CPYFMWN: |
5530 | case AArch64::CPYFMRN: |
5531 | case AArch64::CPYFMN: |
5532 | case AArch64::CPYFMWT: |
5533 | case AArch64::CPYFMWTWN: |
5534 | case AArch64::CPYFMWTRN: |
5535 | case AArch64::CPYFMWTN: |
5536 | case AArch64::CPYFMRT: |
5537 | case AArch64::CPYFMRTWN: |
5538 | case AArch64::CPYFMRTRN: |
5539 | case AArch64::CPYFMRTN: |
5540 | case AArch64::CPYFMT: |
5541 | case AArch64::CPYFMTWN: |
5542 | case AArch64::CPYFMTRN: |
5543 | case AArch64::CPYFMTN: |
5544 | case AArch64::CPYFE: |
5545 | case AArch64::CPYFEWN: |
5546 | case AArch64::CPYFERN: |
5547 | case AArch64::CPYFEN: |
5548 | case AArch64::CPYFEWT: |
5549 | case AArch64::CPYFEWTWN: |
5550 | case AArch64::CPYFEWTRN: |
5551 | case AArch64::CPYFEWTN: |
5552 | case AArch64::CPYFERT: |
5553 | case AArch64::CPYFERTWN: |
5554 | case AArch64::CPYFERTRN: |
5555 | case AArch64::CPYFERTN: |
5556 | case AArch64::CPYFET: |
5557 | case AArch64::CPYFETWN: |
5558 | case AArch64::CPYFETRN: |
5559 | case AArch64::CPYFETN: |
5560 | case AArch64::CPYP: |
5561 | case AArch64::CPYPWN: |
5562 | case AArch64::CPYPRN: |
5563 | case AArch64::CPYPN: |
5564 | case AArch64::CPYPWT: |
5565 | case AArch64::CPYPWTWN: |
5566 | case AArch64::CPYPWTRN: |
5567 | case AArch64::CPYPWTN: |
5568 | case AArch64::CPYPRT: |
5569 | case AArch64::CPYPRTWN: |
5570 | case AArch64::CPYPRTRN: |
5571 | case AArch64::CPYPRTN: |
5572 | case AArch64::CPYPT: |
5573 | case AArch64::CPYPTWN: |
5574 | case AArch64::CPYPTRN: |
5575 | case AArch64::CPYPTN: |
5576 | case AArch64::CPYM: |
5577 | case AArch64::CPYMWN: |
5578 | case AArch64::CPYMRN: |
5579 | case AArch64::CPYMN: |
5580 | case AArch64::CPYMWT: |
5581 | case AArch64::CPYMWTWN: |
5582 | case AArch64::CPYMWTRN: |
5583 | case AArch64::CPYMWTN: |
5584 | case AArch64::CPYMRT: |
5585 | case AArch64::CPYMRTWN: |
5586 | case AArch64::CPYMRTRN: |
5587 | case AArch64::CPYMRTN: |
5588 | case AArch64::CPYMT: |
5589 | case AArch64::CPYMTWN: |
5590 | case AArch64::CPYMTRN: |
5591 | case AArch64::CPYMTN: |
5592 | case AArch64::CPYE: |
5593 | case AArch64::CPYEWN: |
5594 | case AArch64::CPYERN: |
5595 | case AArch64::CPYEN: |
5596 | case AArch64::CPYEWT: |
5597 | case AArch64::CPYEWTWN: |
5598 | case AArch64::CPYEWTRN: |
5599 | case AArch64::CPYEWTN: |
5600 | case AArch64::CPYERT: |
5601 | case AArch64::CPYERTWN: |
5602 | case AArch64::CPYERTRN: |
5603 | case AArch64::CPYERTN: |
5604 | case AArch64::CPYET: |
5605 | case AArch64::CPYETWN: |
5606 | case AArch64::CPYETRN: |
5607 | case AArch64::CPYETN: { |
5608 | unsigned Xd_wb = Inst.getOperand(i: 0).getReg(); |
5609 | unsigned Xs_wb = Inst.getOperand(i: 1).getReg(); |
5610 | unsigned Xn_wb = Inst.getOperand(i: 2).getReg(); |
5611 | unsigned Xd = Inst.getOperand(i: 3).getReg(); |
5612 | unsigned Xs = Inst.getOperand(i: 4).getReg(); |
5613 | unsigned Xn = Inst.getOperand(i: 5).getReg(); |
5614 | if (Xd_wb != Xd) |
5615 | return Error(L: Loc[0], |
5616 | Msg: "invalid CPY instruction, Xd_wb and Xd do not match" ); |
5617 | if (Xs_wb != Xs) |
5618 | return Error(L: Loc[0], |
5619 | Msg: "invalid CPY instruction, Xs_wb and Xs do not match" ); |
5620 | if (Xn_wb != Xn) |
5621 | return Error(L: Loc[0], |
5622 | Msg: "invalid CPY instruction, Xn_wb and Xn do not match" ); |
5623 | if (Xd == Xs) |
5624 | return Error(L: Loc[0], Msg: "invalid CPY instruction, destination and source" |
5625 | " registers are the same" ); |
5626 | if (Xd == Xn) |
5627 | return Error(L: Loc[0], Msg: "invalid CPY instruction, destination and size" |
5628 | " registers are the same" ); |
5629 | if (Xs == Xn) |
5630 | return Error(L: Loc[0], Msg: "invalid CPY instruction, source and size" |
5631 | " registers are the same" ); |
5632 | break; |
5633 | } |
5634 | case AArch64::SETP: |
5635 | case AArch64::SETPT: |
5636 | case AArch64::SETPN: |
5637 | case AArch64::SETPTN: |
5638 | case AArch64::SETM: |
5639 | case AArch64::SETMT: |
5640 | case AArch64::SETMN: |
5641 | case AArch64::SETMTN: |
5642 | case AArch64::SETE: |
5643 | case AArch64::SETET: |
5644 | case AArch64::SETEN: |
5645 | case AArch64::SETETN: |
5646 | case AArch64::SETGP: |
5647 | case AArch64::SETGPT: |
5648 | case AArch64::SETGPN: |
5649 | case AArch64::SETGPTN: |
5650 | case AArch64::SETGM: |
5651 | case AArch64::SETGMT: |
5652 | case AArch64::SETGMN: |
5653 | case AArch64::SETGMTN: |
5654 | case AArch64::MOPSSETGE: |
5655 | case AArch64::MOPSSETGET: |
5656 | case AArch64::MOPSSETGEN: |
5657 | case AArch64::MOPSSETGETN: { |
5658 | unsigned Xd_wb = Inst.getOperand(i: 0).getReg(); |
5659 | unsigned Xn_wb = Inst.getOperand(i: 1).getReg(); |
5660 | unsigned Xd = Inst.getOperand(i: 2).getReg(); |
5661 | unsigned Xn = Inst.getOperand(i: 3).getReg(); |
5662 | unsigned Xm = Inst.getOperand(i: 4).getReg(); |
5663 | if (Xd_wb != Xd) |
5664 | return Error(L: Loc[0], |
5665 | Msg: "invalid SET instruction, Xd_wb and Xd do not match" ); |
5666 | if (Xn_wb != Xn) |
5667 | return Error(L: Loc[0], |
5668 | Msg: "invalid SET instruction, Xn_wb and Xn do not match" ); |
5669 | if (Xd == Xn) |
5670 | return Error(L: Loc[0], Msg: "invalid SET instruction, destination and size" |
5671 | " registers are the same" ); |
5672 | if (Xd == Xm) |
5673 | return Error(L: Loc[0], Msg: "invalid SET instruction, destination and source" |
5674 | " registers are the same" ); |
5675 | if (Xn == Xm) |
5676 | return Error(L: Loc[0], Msg: "invalid SET instruction, source and size" |
5677 | " registers are the same" ); |
5678 | break; |
5679 | } |
5680 | } |
5681 | |
5682 | // Now check immediate ranges. Separate from the above as there is overlap |
5683 | // in the instructions being checked and this keeps the nested conditionals |
5684 | // to a minimum. |
5685 | switch (Inst.getOpcode()) { |
5686 | case AArch64::ADDSWri: |
5687 | case AArch64::ADDSXri: |
5688 | case AArch64::ADDWri: |
5689 | case AArch64::ADDXri: |
5690 | case AArch64::SUBSWri: |
5691 | case AArch64::SUBSXri: |
5692 | case AArch64::SUBWri: |
5693 | case AArch64::SUBXri: { |
5694 | // Annoyingly we can't do this in the isAddSubImm predicate, so there is |
5695 | // some slight duplication here. |
5696 | if (Inst.getOperand(i: 2).isExpr()) { |
5697 | const MCExpr *Expr = Inst.getOperand(i: 2).getExpr(); |
5698 | AArch64MCExpr::VariantKind ELFRefKind; |
5699 | MCSymbolRefExpr::VariantKind DarwinRefKind; |
5700 | int64_t Addend; |
5701 | if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) { |
5702 | |
5703 | // Only allow these with ADDXri. |
5704 | if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF || |
5705 | DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) && |
5706 | Inst.getOpcode() == AArch64::ADDXri) |
5707 | return false; |
5708 | |
5709 | // Only allow these with ADDXri/ADDWri |
5710 | if ((ELFRefKind == AArch64MCExpr::VK_LO12 || |
5711 | ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 || |
5712 | ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 || |
5713 | ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC || |
5714 | ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 || |
5715 | ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 || |
5716 | ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC || |
5717 | ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 || |
5718 | ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 || |
5719 | ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) && |
5720 | (Inst.getOpcode() == AArch64::ADDXri || |
5721 | Inst.getOpcode() == AArch64::ADDWri)) |
5722 | return false; |
5723 | |
5724 | // Don't allow symbol refs in the immediate field otherwise |
5725 | // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of |
5726 | // operands of the original instruction (i.e. 'add w0, w1, borked' vs |
5727 | // 'cmp w0, 'borked') |
5728 | return Error(L: Loc.back(), Msg: "invalid immediate expression" ); |
5729 | } |
5730 | // We don't validate more complex expressions here |
5731 | } |
5732 | return false; |
5733 | } |
5734 | default: |
5735 | return false; |
5736 | } |
5737 | } |
5738 | |
5739 | static std::string AArch64MnemonicSpellCheck(StringRef S, |
5740 | const FeatureBitset &FBS, |
5741 | unsigned VariantID = 0); |
5742 | |
5743 | bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode, |
5744 | uint64_t ErrorInfo, |
5745 | OperandVector &Operands) { |
5746 | switch (ErrCode) { |
5747 | case Match_InvalidTiedOperand: { |
5748 | auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]); |
5749 | if (Op.isVectorList()) |
5750 | return Error(L: Loc, Msg: "operand must match destination register list" ); |
5751 | |
5752 | assert(Op.isReg() && "Unexpected operand type" ); |
5753 | switch (Op.getRegEqualityTy()) { |
5754 | case RegConstraintEqualityTy::EqualsSubReg: |
5755 | return Error(L: Loc, Msg: "operand must be 64-bit form of destination register" ); |
5756 | case RegConstraintEqualityTy::EqualsSuperReg: |
5757 | return Error(L: Loc, Msg: "operand must be 32-bit form of destination register" ); |
5758 | case RegConstraintEqualityTy::EqualsReg: |
5759 | return Error(L: Loc, Msg: "operand must match destination register" ); |
5760 | } |
5761 | llvm_unreachable("Unknown RegConstraintEqualityTy" ); |
5762 | } |
5763 | case Match_MissingFeature: |
5764 | return Error(L: Loc, |
5765 | Msg: "instruction requires a CPU feature not currently enabled" ); |
5766 | case Match_InvalidOperand: |
5767 | return Error(L: Loc, Msg: "invalid operand for instruction" ); |
5768 | case Match_InvalidSuffix: |
5769 | return Error(L: Loc, Msg: "invalid type suffix for instruction" ); |
5770 | case Match_InvalidCondCode: |
5771 | return Error(L: Loc, Msg: "expected AArch64 condition code" ); |
5772 | case Match_AddSubRegExtendSmall: |
5773 | return Error(L: Loc, |
5774 | Msg: "expected '[su]xt[bhw]' with optional integer in range [0, 4]" ); |
5775 | case Match_AddSubRegExtendLarge: |
5776 | return Error(L: Loc, |
5777 | Msg: "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]" ); |
5778 | case Match_AddSubSecondSource: |
5779 | return Error(L: Loc, |
5780 | Msg: "expected compatible register, symbol or integer in range [0, 4095]" ); |
5781 | case Match_LogicalSecondSource: |
5782 | return Error(L: Loc, Msg: "expected compatible register or logical immediate" ); |
5783 | case Match_InvalidMovImm32Shift: |
5784 | return Error(L: Loc, Msg: "expected 'lsl' with optional integer 0 or 16" ); |
5785 | case Match_InvalidMovImm64Shift: |
5786 | return Error(L: Loc, Msg: "expected 'lsl' with optional integer 0, 16, 32 or 48" ); |
5787 | case Match_AddSubRegShift32: |
5788 | return Error(L: Loc, |
5789 | Msg: "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]" ); |
5790 | case Match_AddSubRegShift64: |
5791 | return Error(L: Loc, |
5792 | Msg: "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]" ); |
5793 | case Match_InvalidFPImm: |
5794 | return Error(L: Loc, |
5795 | Msg: "expected compatible register or floating-point constant" ); |
5796 | case Match_InvalidMemoryIndexedSImm6: |
5797 | return Error(L: Loc, Msg: "index must be an integer in range [-32, 31]." ); |
5798 | case Match_InvalidMemoryIndexedSImm5: |
5799 | return Error(L: Loc, Msg: "index must be an integer in range [-16, 15]." ); |
5800 | case Match_InvalidMemoryIndexed1SImm4: |
5801 | return Error(L: Loc, Msg: "index must be an integer in range [-8, 7]." ); |
5802 | case Match_InvalidMemoryIndexed2SImm4: |
5803 | return Error(L: Loc, Msg: "index must be a multiple of 2 in range [-16, 14]." ); |
5804 | case Match_InvalidMemoryIndexed3SImm4: |
5805 | return Error(L: Loc, Msg: "index must be a multiple of 3 in range [-24, 21]." ); |
5806 | case Match_InvalidMemoryIndexed4SImm4: |
5807 | return Error(L: Loc, Msg: "index must be a multiple of 4 in range [-32, 28]." ); |
5808 | case Match_InvalidMemoryIndexed16SImm4: |
5809 | return Error(L: Loc, Msg: "index must be a multiple of 16 in range [-128, 112]." ); |
5810 | case Match_InvalidMemoryIndexed32SImm4: |
5811 | return Error(L: Loc, Msg: "index must be a multiple of 32 in range [-256, 224]." ); |
5812 | case Match_InvalidMemoryIndexed1SImm6: |
5813 | return Error(L: Loc, Msg: "index must be an integer in range [-32, 31]." ); |
5814 | case Match_InvalidMemoryIndexedSImm8: |
5815 | return Error(L: Loc, Msg: "index must be an integer in range [-128, 127]." ); |
5816 | case Match_InvalidMemoryIndexedSImm9: |
5817 | return Error(L: Loc, Msg: "index must be an integer in range [-256, 255]." ); |
5818 | case Match_InvalidMemoryIndexed16SImm9: |
5819 | return Error(L: Loc, Msg: "index must be a multiple of 16 in range [-4096, 4080]." ); |
5820 | case Match_InvalidMemoryIndexed8SImm10: |
5821 | return Error(L: Loc, Msg: "index must be a multiple of 8 in range [-4096, 4088]." ); |
5822 | case Match_InvalidMemoryIndexed4SImm7: |
5823 | return Error(L: Loc, Msg: "index must be a multiple of 4 in range [-256, 252]." ); |
5824 | case Match_InvalidMemoryIndexed8SImm7: |
5825 | return Error(L: Loc, Msg: "index must be a multiple of 8 in range [-512, 504]." ); |
5826 | case Match_InvalidMemoryIndexed16SImm7: |
5827 | return Error(L: Loc, Msg: "index must be a multiple of 16 in range [-1024, 1008]." ); |
5828 | case Match_InvalidMemoryIndexed8UImm5: |
5829 | return Error(L: Loc, Msg: "index must be a multiple of 8 in range [0, 248]." ); |
5830 | case Match_InvalidMemoryIndexed8UImm3: |
5831 | return Error(L: Loc, Msg: "index must be a multiple of 8 in range [0, 56]." ); |
5832 | case Match_InvalidMemoryIndexed4UImm5: |
5833 | return Error(L: Loc, Msg: "index must be a multiple of 4 in range [0, 124]." ); |
5834 | case Match_InvalidMemoryIndexed2UImm5: |
5835 | return Error(L: Loc, Msg: "index must be a multiple of 2 in range [0, 62]." ); |
5836 | case Match_InvalidMemoryIndexed8UImm6: |
5837 | return Error(L: Loc, Msg: "index must be a multiple of 8 in range [0, 504]." ); |
5838 | case Match_InvalidMemoryIndexed16UImm6: |
5839 | return Error(L: Loc, Msg: "index must be a multiple of 16 in range [0, 1008]." ); |
5840 | case Match_InvalidMemoryIndexed4UImm6: |
5841 | return Error(L: Loc, Msg: "index must be a multiple of 4 in range [0, 252]." ); |
5842 | case Match_InvalidMemoryIndexed2UImm6: |
5843 | return Error(L: Loc, Msg: "index must be a multiple of 2 in range [0, 126]." ); |
5844 | case Match_InvalidMemoryIndexed1UImm6: |
5845 | return Error(L: Loc, Msg: "index must be in range [0, 63]." ); |
5846 | case Match_InvalidMemoryWExtend8: |
5847 | return Error(L: Loc, |
5848 | Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0" ); |
5849 | case Match_InvalidMemoryWExtend16: |
5850 | return Error(L: Loc, |
5851 | Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1" ); |
5852 | case Match_InvalidMemoryWExtend32: |
5853 | return Error(L: Loc, |
5854 | Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2" ); |
5855 | case Match_InvalidMemoryWExtend64: |
5856 | return Error(L: Loc, |
5857 | Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3" ); |
5858 | case Match_InvalidMemoryWExtend128: |
5859 | return Error(L: Loc, |
5860 | Msg: "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4" ); |
5861 | case Match_InvalidMemoryXExtend8: |
5862 | return Error(L: Loc, |
5863 | Msg: "expected 'lsl' or 'sxtx' with optional shift of #0" ); |
5864 | case Match_InvalidMemoryXExtend16: |
5865 | return Error(L: Loc, |
5866 | Msg: "expected 'lsl' or 'sxtx' with optional shift of #0 or #1" ); |
5867 | case Match_InvalidMemoryXExtend32: |
5868 | return Error(L: Loc, |
5869 | Msg: "expected 'lsl' or 'sxtx' with optional shift of #0 or #2" ); |
5870 | case Match_InvalidMemoryXExtend64: |
5871 | return Error(L: Loc, |
5872 | Msg: "expected 'lsl' or 'sxtx' with optional shift of #0 or #3" ); |
5873 | case Match_InvalidMemoryXExtend128: |
5874 | return Error(L: Loc, |
5875 | Msg: "expected 'lsl' or 'sxtx' with optional shift of #0 or #4" ); |
5876 | case Match_InvalidMemoryIndexed1: |
5877 | return Error(L: Loc, Msg: "index must be an integer in range [0, 4095]." ); |
5878 | case Match_InvalidMemoryIndexed2: |
5879 | return Error(L: Loc, Msg: "index must be a multiple of 2 in range [0, 8190]." ); |
5880 | case Match_InvalidMemoryIndexed4: |
5881 | return Error(L: Loc, Msg: "index must be a multiple of 4 in range [0, 16380]." ); |
5882 | case Match_InvalidMemoryIndexed8: |
5883 | return Error(L: Loc, Msg: "index must be a multiple of 8 in range [0, 32760]." ); |
5884 | case Match_InvalidMemoryIndexed16: |
5885 | return Error(L: Loc, Msg: "index must be a multiple of 16 in range [0, 65520]." ); |
5886 | case Match_InvalidImm0_0: |
5887 | return Error(L: Loc, Msg: "immediate must be 0." ); |
5888 | case Match_InvalidImm0_1: |
5889 | return Error(L: Loc, Msg: "immediate must be an integer in range [0, 1]." ); |
5890 | case Match_InvalidImm0_3: |
5891 | return Error(L: Loc, Msg: "immediate must be an integer in range [0, 3]." ); |
5892 | case Match_InvalidImm0_7: |
5893 | return Error(L: Loc, Msg: "immediate must be an integer in range [0, 7]." ); |
5894 | case Match_InvalidImm0_15: |
5895 | return Error(L: Loc, Msg: "immediate must be an integer in range [0, 15]." ); |
5896 | case Match_InvalidImm0_31: |
5897 | return Error(L: Loc, Msg: "immediate must be an integer in range [0, 31]." ); |
5898 | case Match_InvalidImm0_63: |
5899 | return Error(L: Loc, Msg: "immediate must be an integer in range [0, 63]." ); |
5900 | case Match_InvalidImm0_127: |
5901 | return Error(L: Loc, Msg: "immediate must be an integer in range [0, 127]." ); |
5902 | case Match_InvalidImm0_255: |
5903 | return Error(L: Loc, Msg: "immediate must be an integer in range [0, 255]." ); |
5904 | case Match_InvalidImm0_65535: |
5905 | return Error(L: Loc, Msg: "immediate must be an integer in range [0, 65535]." ); |
5906 | case Match_InvalidImm1_8: |
5907 | return Error(L: Loc, Msg: "immediate must be an integer in range [1, 8]." ); |
5908 | case Match_InvalidImm1_16: |
5909 | return Error(L: Loc, Msg: "immediate must be an integer in range [1, 16]." ); |
5910 | case Match_InvalidImm1_32: |
5911 | return Error(L: Loc, Msg: "immediate must be an integer in range [1, 32]." ); |
5912 | case Match_InvalidImm1_64: |
5913 | return Error(L: Loc, Msg: "immediate must be an integer in range [1, 64]." ); |
5914 | case Match_InvalidMemoryIndexedRange2UImm0: |
5915 | return Error(L: Loc, Msg: "vector select offset must be the immediate range 0:1." ); |
5916 | case Match_InvalidMemoryIndexedRange2UImm1: |
5917 | return Error(L: Loc, Msg: "vector select offset must be an immediate range of the " |
5918 | "form <immf>:<imml>, where the first " |
5919 | "immediate is a multiple of 2 in the range [0, 2], and " |
5920 | "the second immediate is immf + 1." ); |
5921 | case Match_InvalidMemoryIndexedRange2UImm2: |
5922 | case Match_InvalidMemoryIndexedRange2UImm3: |
5923 | return Error( |
5924 | L: Loc, |
5925 | Msg: "vector select offset must be an immediate range of the form " |
5926 | "<immf>:<imml>, " |
5927 | "where the first immediate is a multiple of 2 in the range [0, 6] or " |
5928 | "[0, 14] " |
5929 | "depending on the instruction, and the second immediate is immf + 1." ); |
5930 | case Match_InvalidMemoryIndexedRange4UImm0: |
5931 | return Error(L: Loc, Msg: "vector select offset must be the immediate range 0:3." ); |
5932 | case Match_InvalidMemoryIndexedRange4UImm1: |
5933 | case Match_InvalidMemoryIndexedRange4UImm2: |
5934 | return Error( |
5935 | L: Loc, |
5936 | Msg: "vector select offset must be an immediate range of the form " |
5937 | "<immf>:<imml>, " |
5938 | "where the first immediate is a multiple of 4 in the range [0, 4] or " |
5939 | "[0, 12] " |
5940 | "depending on the instruction, and the second immediate is immf + 3." ); |
5941 | case Match_InvalidSVEAddSubImm8: |
5942 | return Error(L: Loc, Msg: "immediate must be an integer in range [0, 255]" |
5943 | " with a shift amount of 0" ); |
5944 | case Match_InvalidSVEAddSubImm16: |
5945 | case Match_InvalidSVEAddSubImm32: |
5946 | case Match_InvalidSVEAddSubImm64: |
5947 | return Error(L: Loc, Msg: "immediate must be an integer in range [0, 255] or a " |
5948 | "multiple of 256 in range [256, 65280]" ); |
5949 | case Match_InvalidSVECpyImm8: |
5950 | return Error(L: Loc, Msg: "immediate must be an integer in range [-128, 255]" |
5951 | " with a shift amount of 0" ); |
5952 | case Match_InvalidSVECpyImm16: |
5953 | return Error(L: Loc, Msg: "immediate must be an integer in range [-128, 127] or a " |
5954 | "multiple of 256 in range [-32768, 65280]" ); |
5955 | case Match_InvalidSVECpyImm32: |
5956 | case Match_InvalidSVECpyImm64: |
5957 | return Error(L: Loc, Msg: "immediate must be an integer in range [-128, 127] or a " |
5958 | "multiple of 256 in range [-32768, 32512]" ); |
5959 | case Match_InvalidIndexRange0_0: |
5960 | return Error(L: Loc, Msg: "expected lane specifier '[0]'" ); |
5961 | case Match_InvalidIndexRange1_1: |
5962 | return Error(L: Loc, Msg: "expected lane specifier '[1]'" ); |
5963 | case Match_InvalidIndexRange0_15: |
5964 | return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 15]." ); |
5965 | case Match_InvalidIndexRange0_7: |
5966 | return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 7]." ); |
5967 | case Match_InvalidIndexRange0_3: |
5968 | return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 3]." ); |
5969 | case Match_InvalidIndexRange0_1: |
5970 | return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 1]." ); |
5971 | case Match_InvalidSVEIndexRange0_63: |
5972 | return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 63]." ); |
5973 | case Match_InvalidSVEIndexRange0_31: |
5974 | return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 31]." ); |
5975 | case Match_InvalidSVEIndexRange0_15: |
5976 | return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 15]." ); |
5977 | case Match_InvalidSVEIndexRange0_7: |
5978 | return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 7]." ); |
5979 | case Match_InvalidSVEIndexRange0_3: |
5980 | return Error(L: Loc, Msg: "vector lane must be an integer in range [0, 3]." ); |
5981 | case Match_InvalidLabel: |
5982 | return Error(L: Loc, Msg: "expected label or encodable integer pc offset" ); |
5983 | case Match_MRS: |
5984 | return Error(L: Loc, Msg: "expected readable system register" ); |
5985 | case Match_MSR: |
5986 | case Match_InvalidSVCR: |
5987 | return Error(L: Loc, Msg: "expected writable system register or pstate" ); |
5988 | case Match_InvalidComplexRotationEven: |
5989 | return Error(L: Loc, Msg: "complex rotation must be 0, 90, 180 or 270." ); |
5990 | case Match_InvalidComplexRotationOdd: |
5991 | return Error(L: Loc, Msg: "complex rotation must be 90 or 270." ); |
5992 | case Match_MnemonicFail: { |
5993 | std::string Suggestion = AArch64MnemonicSpellCheck( |
5994 | S: ((AArch64Operand &)*Operands[0]).getToken(), |
5995 | FBS: ComputeAvailableFeatures(FB: STI->getFeatureBits())); |
5996 | return Error(L: Loc, Msg: "unrecognized instruction mnemonic" + Suggestion); |
5997 | } |
5998 | case Match_InvalidGPR64shifted8: |
5999 | return Error(L: Loc, Msg: "register must be x0..x30 or xzr, without shift" ); |
6000 | case Match_InvalidGPR64shifted16: |
6001 | return Error(L: Loc, Msg: "register must be x0..x30 or xzr, with required shift 'lsl #1'" ); |
6002 | case Match_InvalidGPR64shifted32: |
6003 | return Error(L: Loc, Msg: "register must be x0..x30 or xzr, with required shift 'lsl #2'" ); |
6004 | case Match_InvalidGPR64shifted64: |
6005 | return Error(L: Loc, Msg: "register must be x0..x30 or xzr, with required shift 'lsl #3'" ); |
6006 | case Match_InvalidGPR64shifted128: |
6007 | return Error( |
6008 | L: Loc, Msg: "register must be x0..x30 or xzr, with required shift 'lsl #4'" ); |
6009 | case Match_InvalidGPR64NoXZRshifted8: |
6010 | return Error(L: Loc, Msg: "register must be x0..x30 without shift" ); |
6011 | case Match_InvalidGPR64NoXZRshifted16: |
6012 | return Error(L: Loc, Msg: "register must be x0..x30 with required shift 'lsl #1'" ); |
6013 | case Match_InvalidGPR64NoXZRshifted32: |
6014 | return Error(L: Loc, Msg: "register must be x0..x30 with required shift 'lsl #2'" ); |
6015 | case Match_InvalidGPR64NoXZRshifted64: |
6016 | return Error(L: Loc, Msg: "register must be x0..x30 with required shift 'lsl #3'" ); |
6017 | case Match_InvalidGPR64NoXZRshifted128: |
6018 | return Error(L: Loc, Msg: "register must be x0..x30 with required shift 'lsl #4'" ); |
6019 | case Match_InvalidZPR32UXTW8: |
6020 | case Match_InvalidZPR32SXTW8: |
6021 | return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'" ); |
6022 | case Match_InvalidZPR32UXTW16: |
6023 | case Match_InvalidZPR32SXTW16: |
6024 | return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'" ); |
6025 | case Match_InvalidZPR32UXTW32: |
6026 | case Match_InvalidZPR32SXTW32: |
6027 | return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'" ); |
6028 | case Match_InvalidZPR32UXTW64: |
6029 | case Match_InvalidZPR32SXTW64: |
6030 | return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'" ); |
6031 | case Match_InvalidZPR64UXTW8: |
6032 | case Match_InvalidZPR64SXTW8: |
6033 | return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'" ); |
6034 | case Match_InvalidZPR64UXTW16: |
6035 | case Match_InvalidZPR64SXTW16: |
6036 | return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'" ); |
6037 | case Match_InvalidZPR64UXTW32: |
6038 | case Match_InvalidZPR64SXTW32: |
6039 | return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'" ); |
6040 | case Match_InvalidZPR64UXTW64: |
6041 | case Match_InvalidZPR64SXTW64: |
6042 | return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'" ); |
6043 | case Match_InvalidZPR32LSL8: |
6044 | return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s'" ); |
6045 | case Match_InvalidZPR32LSL16: |
6046 | return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'" ); |
6047 | case Match_InvalidZPR32LSL32: |
6048 | return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'" ); |
6049 | case Match_InvalidZPR32LSL64: |
6050 | return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'" ); |
6051 | case Match_InvalidZPR64LSL8: |
6052 | return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d'" ); |
6053 | case Match_InvalidZPR64LSL16: |
6054 | return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'" ); |
6055 | case Match_InvalidZPR64LSL32: |
6056 | return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'" ); |
6057 | case Match_InvalidZPR64LSL64: |
6058 | return Error(L: Loc, Msg: "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'" ); |
6059 | case Match_InvalidZPR0: |
6060 | return Error(L: Loc, Msg: "expected register without element width suffix" ); |
6061 | case Match_InvalidZPR8: |
6062 | case Match_InvalidZPR16: |
6063 | case Match_InvalidZPR32: |
6064 | case Match_InvalidZPR64: |
6065 | case Match_InvalidZPR128: |
6066 | return Error(L: Loc, Msg: "invalid element width" ); |
6067 | case Match_InvalidZPR_3b8: |
6068 | return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.b..z7.b" ); |
6069 | case Match_InvalidZPR_3b16: |
6070 | return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.h..z7.h" ); |
6071 | case Match_InvalidZPR_3b32: |
6072 | return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.s..z7.s" ); |
6073 | case Match_InvalidZPR_4b8: |
6074 | return Error(L: Loc, |
6075 | Msg: "Invalid restricted vector register, expected z0.b..z15.b" ); |
6076 | case Match_InvalidZPR_4b16: |
6077 | return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.h..z15.h" ); |
6078 | case Match_InvalidZPR_4b32: |
6079 | return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.s..z15.s" ); |
6080 | case Match_InvalidZPR_4b64: |
6081 | return Error(L: Loc, Msg: "Invalid restricted vector register, expected z0.d..z15.d" ); |
6082 | case Match_InvalidSVEPattern: |
6083 | return Error(L: Loc, Msg: "invalid predicate pattern" ); |
6084 | case Match_InvalidSVEPPRorPNRAnyReg: |
6085 | case Match_InvalidSVEPPRorPNRBReg: |
6086 | case Match_InvalidSVEPredicateAnyReg: |
6087 | case Match_InvalidSVEPredicateBReg: |
6088 | case Match_InvalidSVEPredicateHReg: |
6089 | case Match_InvalidSVEPredicateSReg: |
6090 | case Match_InvalidSVEPredicateDReg: |
6091 | return Error(L: Loc, Msg: "invalid predicate register." ); |
6092 | case Match_InvalidSVEPredicate3bAnyReg: |
6093 | return Error(L: Loc, Msg: "invalid restricted predicate register, expected p0..p7 (without element suffix)" ); |
6094 | case Match_InvalidSVEPNPredicateB_p8to15Reg: |
6095 | case Match_InvalidSVEPNPredicateH_p8to15Reg: |
6096 | case Match_InvalidSVEPNPredicateS_p8to15Reg: |
6097 | case Match_InvalidSVEPNPredicateD_p8to15Reg: |
6098 | return Error(L: Loc, Msg: "Invalid predicate register, expected PN in range " |
6099 | "pn8..pn15 with element suffix." ); |
6100 | case Match_InvalidSVEPNPredicateAny_p8to15Reg: |
6101 | return Error(L: Loc, Msg: "invalid restricted predicate-as-counter register " |
6102 | "expected pn8..pn15" ); |
6103 | case Match_InvalidSVEPNPredicateBReg: |
6104 | case Match_InvalidSVEPNPredicateHReg: |
6105 | case Match_InvalidSVEPNPredicateSReg: |
6106 | case Match_InvalidSVEPNPredicateDReg: |
6107 | return Error(L: Loc, Msg: "Invalid predicate register, expected PN in range " |
6108 | "pn0..pn15 with element suffix." ); |
6109 | case Match_InvalidSVEVecLenSpecifier: |
6110 | return Error(L: Loc, Msg: "Invalid vector length specifier, expected VLx2 or VLx4" ); |
6111 | case Match_InvalidSVEPredicateListMul2x8: |
6112 | case Match_InvalidSVEPredicateListMul2x16: |
6113 | case Match_InvalidSVEPredicateListMul2x32: |
6114 | case Match_InvalidSVEPredicateListMul2x64: |
6115 | return Error(L: Loc, Msg: "Invalid vector list, expected list with 2 consecutive " |
6116 | "predicate registers, where the first vector is a multiple of 2 " |
6117 | "and with correct element type" ); |
6118 | case Match_InvalidSVEExactFPImmOperandHalfOne: |
6119 | return Error(L: Loc, Msg: "Invalid floating point constant, expected 0.5 or 1.0." ); |
6120 | case Match_InvalidSVEExactFPImmOperandHalfTwo: |
6121 | return Error(L: Loc, Msg: "Invalid floating point constant, expected 0.5 or 2.0." ); |
6122 | case Match_InvalidSVEExactFPImmOperandZeroOne: |
6123 | return Error(L: Loc, Msg: "Invalid floating point constant, expected 0.0 or 1.0." ); |
6124 | case Match_InvalidMatrixTileVectorH8: |
6125 | case Match_InvalidMatrixTileVectorV8: |
6126 | return Error(L: Loc, Msg: "invalid matrix operand, expected za0h.b or za0v.b" ); |
6127 | case Match_InvalidMatrixTileVectorH16: |
6128 | case Match_InvalidMatrixTileVectorV16: |
6129 | return Error(L: Loc, |
6130 | Msg: "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h" ); |
6131 | case Match_InvalidMatrixTileVectorH32: |
6132 | case Match_InvalidMatrixTileVectorV32: |
6133 | return Error(L: Loc, |
6134 | Msg: "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s" ); |
6135 | case Match_InvalidMatrixTileVectorH64: |
6136 | case Match_InvalidMatrixTileVectorV64: |
6137 | return Error(L: Loc, |
6138 | Msg: "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d" ); |
6139 | case Match_InvalidMatrixTileVectorH128: |
6140 | case Match_InvalidMatrixTileVectorV128: |
6141 | return Error(L: Loc, |
6142 | Msg: "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q" ); |
6143 | case Match_InvalidMatrixTile32: |
6144 | return Error(L: Loc, Msg: "invalid matrix operand, expected za[0-3].s" ); |
6145 | case Match_InvalidMatrixTile64: |
6146 | return Error(L: Loc, Msg: "invalid matrix operand, expected za[0-7].d" ); |
6147 | case Match_InvalidMatrix: |
6148 | return Error(L: Loc, Msg: "invalid matrix operand, expected za" ); |
6149 | case Match_InvalidMatrix8: |
6150 | return Error(L: Loc, Msg: "invalid matrix operand, expected suffix .b" ); |
6151 | case Match_InvalidMatrix16: |
6152 | return Error(L: Loc, Msg: "invalid matrix operand, expected suffix .h" ); |
6153 | case Match_InvalidMatrix32: |
6154 | return Error(L: Loc, Msg: "invalid matrix operand, expected suffix .s" ); |
6155 | case Match_InvalidMatrix64: |
6156 | return Error(L: Loc, Msg: "invalid matrix operand, expected suffix .d" ); |
6157 | case Match_InvalidMatrixIndexGPR32_12_15: |
6158 | return Error(L: Loc, Msg: "operand must be a register in range [w12, w15]" ); |
6159 | case Match_InvalidMatrixIndexGPR32_8_11: |
6160 | return Error(L: Loc, Msg: "operand must be a register in range [w8, w11]" ); |
6161 | case Match_InvalidSVEVectorListMul2x8: |
6162 | case Match_InvalidSVEVectorListMul2x16: |
6163 | case Match_InvalidSVEVectorListMul2x32: |
6164 | case Match_InvalidSVEVectorListMul2x64: |
6165 | case Match_InvalidSVEVectorListMul2x128: |
6166 | return Error(L: Loc, Msg: "Invalid vector list, expected list with 2 consecutive " |
6167 | "SVE vectors, where the first vector is a multiple of 2 " |
6168 | "and with matching element types" ); |
6169 | case Match_InvalidSVEVectorListMul4x8: |
6170 | case Match_InvalidSVEVectorListMul4x16: |
6171 | case Match_InvalidSVEVectorListMul4x32: |
6172 | case Match_InvalidSVEVectorListMul4x64: |
6173 | case Match_InvalidSVEVectorListMul4x128: |
6174 | return Error(L: Loc, Msg: "Invalid vector list, expected list with 4 consecutive " |
6175 | "SVE vectors, where the first vector is a multiple of 4 " |
6176 | "and with matching element types" ); |
6177 | case Match_InvalidLookupTable: |
6178 | return Error(L: Loc, Msg: "Invalid lookup table, expected zt0" ); |
6179 | case Match_InvalidSVEVectorListStrided2x8: |
6180 | case Match_InvalidSVEVectorListStrided2x16: |
6181 | case Match_InvalidSVEVectorListStrided2x32: |
6182 | case Match_InvalidSVEVectorListStrided2x64: |
6183 | return Error( |
6184 | L: Loc, |
6185 | Msg: "Invalid vector list, expected list with each SVE vector in the list " |
6186 | "8 registers apart, and the first register in the range [z0, z7] or " |
6187 | "[z16, z23] and with correct element type" ); |
6188 | case Match_InvalidSVEVectorListStrided4x8: |
6189 | case Match_InvalidSVEVectorListStrided4x16: |
6190 | case Match_InvalidSVEVectorListStrided4x32: |
6191 | case Match_InvalidSVEVectorListStrided4x64: |
6192 | return Error( |
6193 | L: Loc, |
6194 | Msg: "Invalid vector list, expected list with each SVE vector in the list " |
6195 | "4 registers apart, and the first register in the range [z0, z3] or " |
6196 | "[z16, z19] and with correct element type" ); |
6197 | case Match_AddSubLSLImm3ShiftLarge: |
6198 | return Error(L: Loc, |
6199 | Msg: "expected 'lsl' with optional integer in range [0, 7]" ); |
6200 | default: |
6201 | llvm_unreachable("unexpected error code!" ); |
6202 | } |
6203 | } |
6204 | |
6205 | static const char *getSubtargetFeatureName(uint64_t Val); |
6206 | |
6207 | bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, |
6208 | OperandVector &Operands, |
6209 | MCStreamer &Out, |
6210 | uint64_t &ErrorInfo, |
6211 | bool MatchingInlineAsm) { |
6212 | assert(!Operands.empty() && "Unexpect empty operand list!" ); |
6213 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]); |
6214 | assert(Op.isToken() && "Leading operand should always be a mnemonic!" ); |
6215 | |
6216 | StringRef Tok = Op.getToken(); |
6217 | unsigned NumOperands = Operands.size(); |
6218 | |
6219 | if (NumOperands == 4 && Tok == "lsl" ) { |
6220 | AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]); |
6221 | AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]); |
6222 | if (Op2.isScalarReg() && Op3.isImm()) { |
6223 | const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Val: Op3.getImm()); |
6224 | if (Op3CE) { |
6225 | uint64_t Op3Val = Op3CE->getValue(); |
6226 | uint64_t NewOp3Val = 0; |
6227 | uint64_t NewOp4Val = 0; |
6228 | if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains( |
6229 | Reg: Op2.getReg())) { |
6230 | NewOp3Val = (32 - Op3Val) & 0x1f; |
6231 | NewOp4Val = 31 - Op3Val; |
6232 | } else { |
6233 | NewOp3Val = (64 - Op3Val) & 0x3f; |
6234 | NewOp4Val = 63 - Op3Val; |
6235 | } |
6236 | |
6237 | const MCExpr *NewOp3 = MCConstantExpr::create(Value: NewOp3Val, Ctx&: getContext()); |
6238 | const MCExpr *NewOp4 = MCConstantExpr::create(Value: NewOp4Val, Ctx&: getContext()); |
6239 | |
6240 | Operands[0] = |
6241 | AArch64Operand::CreateToken(Str: "ubfm" , S: Op.getStartLoc(), Ctx&: getContext()); |
6242 | Operands.push_back(Elt: AArch64Operand::CreateImm( |
6243 | Val: NewOp4, S: Op3.getStartLoc(), E: Op3.getEndLoc(), Ctx&: getContext())); |
6244 | Operands[3] = AArch64Operand::CreateImm(Val: NewOp3, S: Op3.getStartLoc(), |
6245 | E: Op3.getEndLoc(), Ctx&: getContext()); |
6246 | } |
6247 | } |
6248 | } else if (NumOperands == 4 && Tok == "bfc" ) { |
6249 | // FIXME: Horrible hack to handle BFC->BFM alias. |
6250 | AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]); |
6251 | AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]); |
6252 | AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]); |
6253 | |
6254 | if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) { |
6255 | const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(Val: LSBOp.getImm()); |
6256 | const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(Val: WidthOp.getImm()); |
6257 | |
6258 | if (LSBCE && WidthCE) { |
6259 | uint64_t LSB = LSBCE->getValue(); |
6260 | uint64_t Width = WidthCE->getValue(); |
6261 | |
6262 | uint64_t RegWidth = 0; |
6263 | if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( |
6264 | Reg: Op1.getReg())) |
6265 | RegWidth = 64; |
6266 | else |
6267 | RegWidth = 32; |
6268 | |
6269 | if (LSB >= RegWidth) |
6270 | return Error(L: LSBOp.getStartLoc(), |
6271 | Msg: "expected integer in range [0, 31]" ); |
6272 | if (Width < 1 || Width > RegWidth) |
6273 | return Error(L: WidthOp.getStartLoc(), |
6274 | Msg: "expected integer in range [1, 32]" ); |
6275 | |
6276 | uint64_t ImmR = 0; |
6277 | if (RegWidth == 32) |
6278 | ImmR = (32 - LSB) & 0x1f; |
6279 | else |
6280 | ImmR = (64 - LSB) & 0x3f; |
6281 | |
6282 | uint64_t ImmS = Width - 1; |
6283 | |
6284 | if (ImmR != 0 && ImmS >= ImmR) |
6285 | return Error(L: WidthOp.getStartLoc(), |
6286 | Msg: "requested insert overflows register" ); |
6287 | |
6288 | const MCExpr *ImmRExpr = MCConstantExpr::create(Value: ImmR, Ctx&: getContext()); |
6289 | const MCExpr *ImmSExpr = MCConstantExpr::create(Value: ImmS, Ctx&: getContext()); |
6290 | Operands[0] = |
6291 | AArch64Operand::CreateToken(Str: "bfm" , S: Op.getStartLoc(), Ctx&: getContext()); |
6292 | Operands[2] = AArch64Operand::CreateReg( |
6293 | RegNum: RegWidth == 32 ? AArch64::WZR : AArch64::XZR, Kind: RegKind::Scalar, |
6294 | S: SMLoc(), E: SMLoc(), Ctx&: getContext()); |
6295 | Operands[3] = AArch64Operand::CreateImm( |
6296 | Val: ImmRExpr, S: LSBOp.getStartLoc(), E: LSBOp.getEndLoc(), Ctx&: getContext()); |
6297 | Operands.emplace_back( |
6298 | Args: AArch64Operand::CreateImm(Val: ImmSExpr, S: WidthOp.getStartLoc(), |
6299 | E: WidthOp.getEndLoc(), Ctx&: getContext())); |
6300 | } |
6301 | } |
6302 | } else if (NumOperands == 5) { |
6303 | // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and |
6304 | // UBFIZ -> UBFM aliases. |
6305 | if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz" ) { |
6306 | AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]); |
6307 | AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]); |
6308 | AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]); |
6309 | |
6310 | if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) { |
6311 | const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Val: Op3.getImm()); |
6312 | const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Val: Op4.getImm()); |
6313 | |
6314 | if (Op3CE && Op4CE) { |
6315 | uint64_t Op3Val = Op3CE->getValue(); |
6316 | uint64_t Op4Val = Op4CE->getValue(); |
6317 | |
6318 | uint64_t RegWidth = 0; |
6319 | if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( |
6320 | Reg: Op1.getReg())) |
6321 | RegWidth = 64; |
6322 | else |
6323 | RegWidth = 32; |
6324 | |
6325 | if (Op3Val >= RegWidth) |
6326 | return Error(L: Op3.getStartLoc(), |
6327 | Msg: "expected integer in range [0, 31]" ); |
6328 | if (Op4Val < 1 || Op4Val > RegWidth) |
6329 | return Error(L: Op4.getStartLoc(), |
6330 | Msg: "expected integer in range [1, 32]" ); |
6331 | |
6332 | uint64_t NewOp3Val = 0; |
6333 | if (RegWidth == 32) |
6334 | NewOp3Val = (32 - Op3Val) & 0x1f; |
6335 | else |
6336 | NewOp3Val = (64 - Op3Val) & 0x3f; |
6337 | |
6338 | uint64_t NewOp4Val = Op4Val - 1; |
6339 | |
6340 | if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val) |
6341 | return Error(L: Op4.getStartLoc(), |
6342 | Msg: "requested insert overflows register" ); |
6343 | |
6344 | const MCExpr *NewOp3 = |
6345 | MCConstantExpr::create(Value: NewOp3Val, Ctx&: getContext()); |
6346 | const MCExpr *NewOp4 = |
6347 | MCConstantExpr::create(Value: NewOp4Val, Ctx&: getContext()); |
6348 | Operands[3] = AArch64Operand::CreateImm( |
6349 | Val: NewOp3, S: Op3.getStartLoc(), E: Op3.getEndLoc(), Ctx&: getContext()); |
6350 | Operands[4] = AArch64Operand::CreateImm( |
6351 | Val: NewOp4, S: Op4.getStartLoc(), E: Op4.getEndLoc(), Ctx&: getContext()); |
6352 | if (Tok == "bfi" ) |
6353 | Operands[0] = AArch64Operand::CreateToken(Str: "bfm" , S: Op.getStartLoc(), |
6354 | Ctx&: getContext()); |
6355 | else if (Tok == "sbfiz" ) |
6356 | Operands[0] = AArch64Operand::CreateToken(Str: "sbfm" , S: Op.getStartLoc(), |
6357 | Ctx&: getContext()); |
6358 | else if (Tok == "ubfiz" ) |
6359 | Operands[0] = AArch64Operand::CreateToken(Str: "ubfm" , S: Op.getStartLoc(), |
6360 | Ctx&: getContext()); |
6361 | else |
6362 | llvm_unreachable("No valid mnemonic for alias?" ); |
6363 | } |
6364 | } |
6365 | |
6366 | // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and |
6367 | // UBFX -> UBFM aliases. |
6368 | } else if (NumOperands == 5 && |
6369 | (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx" )) { |
6370 | AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]); |
6371 | AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]); |
6372 | AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]); |
6373 | |
6374 | if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) { |
6375 | const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Val: Op3.getImm()); |
6376 | const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Val: Op4.getImm()); |
6377 | |
6378 | if (Op3CE && Op4CE) { |
6379 | uint64_t Op3Val = Op3CE->getValue(); |
6380 | uint64_t Op4Val = Op4CE->getValue(); |
6381 | |
6382 | uint64_t RegWidth = 0; |
6383 | if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( |
6384 | Reg: Op1.getReg())) |
6385 | RegWidth = 64; |
6386 | else |
6387 | RegWidth = 32; |
6388 | |
6389 | if (Op3Val >= RegWidth) |
6390 | return Error(L: Op3.getStartLoc(), |
6391 | Msg: "expected integer in range [0, 31]" ); |
6392 | if (Op4Val < 1 || Op4Val > RegWidth) |
6393 | return Error(L: Op4.getStartLoc(), |
6394 | Msg: "expected integer in range [1, 32]" ); |
6395 | |
6396 | uint64_t NewOp4Val = Op3Val + Op4Val - 1; |
6397 | |
6398 | if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val) |
6399 | return Error(L: Op4.getStartLoc(), |
6400 | Msg: "requested extract overflows register" ); |
6401 | |
6402 | const MCExpr *NewOp4 = |
6403 | MCConstantExpr::create(Value: NewOp4Val, Ctx&: getContext()); |
6404 | Operands[4] = AArch64Operand::CreateImm( |
6405 | Val: NewOp4, S: Op4.getStartLoc(), E: Op4.getEndLoc(), Ctx&: getContext()); |
6406 | if (Tok == "bfxil" ) |
6407 | Operands[0] = AArch64Operand::CreateToken(Str: "bfm" , S: Op.getStartLoc(), |
6408 | Ctx&: getContext()); |
6409 | else if (Tok == "sbfx" ) |
6410 | Operands[0] = AArch64Operand::CreateToken(Str: "sbfm" , S: Op.getStartLoc(), |
6411 | Ctx&: getContext()); |
6412 | else if (Tok == "ubfx" ) |
6413 | Operands[0] = AArch64Operand::CreateToken(Str: "ubfm" , S: Op.getStartLoc(), |
6414 | Ctx&: getContext()); |
6415 | else |
6416 | llvm_unreachable("No valid mnemonic for alias?" ); |
6417 | } |
6418 | } |
6419 | } |
6420 | } |
6421 | |
6422 | // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing |
6423 | // instruction for FP registers correctly in some rare circumstances. Convert |
6424 | // it to a safe instruction and warn (because silently changing someone's |
6425 | // assembly is rude). |
6426 | if (getSTI().hasFeature(Feature: AArch64::FeatureZCZeroingFPWorkaround) && |
6427 | NumOperands == 4 && Tok == "movi" ) { |
6428 | AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]); |
6429 | AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]); |
6430 | AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]); |
6431 | if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) || |
6432 | (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) { |
6433 | StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken(); |
6434 | if (Suffix.lower() == ".2d" && |
6435 | cast<MCConstantExpr>(Val: Op3.getImm())->getValue() == 0) { |
6436 | Warning(L: IDLoc, Msg: "instruction movi.2d with immediate #0 may not function" |
6437 | " correctly on this CPU, converting to equivalent movi.16b" ); |
6438 | // Switch the suffix to .16b. |
6439 | unsigned Idx = Op1.isToken() ? 1 : 2; |
6440 | Operands[Idx] = |
6441 | AArch64Operand::CreateToken(Str: ".16b" , S: IDLoc, Ctx&: getContext()); |
6442 | } |
6443 | } |
6444 | } |
6445 | |
6446 | // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands. |
6447 | // InstAlias can't quite handle this since the reg classes aren't |
6448 | // subclasses. |
6449 | if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw" )) { |
6450 | // The source register can be Wn here, but the matcher expects a |
6451 | // GPR64. Twiddle it here if necessary. |
6452 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]); |
6453 | if (Op.isScalarReg()) { |
6454 | unsigned Reg = getXRegFromWReg(Reg: Op.getReg()); |
6455 | Operands[2] = AArch64Operand::CreateReg(RegNum: Reg, Kind: RegKind::Scalar, |
6456 | S: Op.getStartLoc(), E: Op.getEndLoc(), |
6457 | Ctx&: getContext()); |
6458 | } |
6459 | } |
6460 | // FIXME: Likewise for sxt[bh] with a Xd dst operand |
6461 | else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth" )) { |
6462 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]); |
6463 | if (Op.isScalarReg() && |
6464 | AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( |
6465 | Reg: Op.getReg())) { |
6466 | // The source register can be Wn here, but the matcher expects a |
6467 | // GPR64. Twiddle it here if necessary. |
6468 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]); |
6469 | if (Op.isScalarReg()) { |
6470 | unsigned Reg = getXRegFromWReg(Reg: Op.getReg()); |
6471 | Operands[2] = AArch64Operand::CreateReg(RegNum: Reg, Kind: RegKind::Scalar, |
6472 | S: Op.getStartLoc(), |
6473 | E: Op.getEndLoc(), Ctx&: getContext()); |
6474 | } |
6475 | } |
6476 | } |
6477 | // FIXME: Likewise for uxt[bh] with a Xd dst operand |
6478 | else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth" )) { |
6479 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]); |
6480 | if (Op.isScalarReg() && |
6481 | AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( |
6482 | Reg: Op.getReg())) { |
6483 | // The source register can be Wn here, but the matcher expects a |
6484 | // GPR32. Twiddle it here if necessary. |
6485 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]); |
6486 | if (Op.isScalarReg()) { |
6487 | unsigned Reg = getWRegFromXReg(Reg: Op.getReg()); |
6488 | Operands[1] = AArch64Operand::CreateReg(RegNum: Reg, Kind: RegKind::Scalar, |
6489 | S: Op.getStartLoc(), |
6490 | E: Op.getEndLoc(), Ctx&: getContext()); |
6491 | } |
6492 | } |
6493 | } |
6494 | |
6495 | MCInst Inst; |
6496 | FeatureBitset MissingFeatures; |
6497 | // First try to match against the secondary set of tables containing the |
6498 | // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2"). |
6499 | unsigned MatchResult = |
6500 | MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures, |
6501 | matchingInlineAsm: MatchingInlineAsm, VariantID: 1); |
6502 | |
6503 | // If that fails, try against the alternate table containing long-form NEON: |
6504 | // "fadd v0.2s, v1.2s, v2.2s" |
6505 | if (MatchResult != Match_Success) { |
6506 | // But first, save the short-form match result: we can use it in case the |
6507 | // long-form match also fails. |
6508 | auto ShortFormNEONErrorInfo = ErrorInfo; |
6509 | auto ShortFormNEONMatchResult = MatchResult; |
6510 | auto ShortFormNEONMissingFeatures = MissingFeatures; |
6511 | |
6512 | MatchResult = |
6513 | MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures, |
6514 | matchingInlineAsm: MatchingInlineAsm, VariantID: 0); |
6515 | |
6516 | // Now, both matches failed, and the long-form match failed on the mnemonic |
6517 | // suffix token operand. The short-form match failure is probably more |
6518 | // relevant: use it instead. |
6519 | if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 && |
6520 | Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() && |
6521 | ((AArch64Operand &)*Operands[1]).isTokenSuffix()) { |
6522 | MatchResult = ShortFormNEONMatchResult; |
6523 | ErrorInfo = ShortFormNEONErrorInfo; |
6524 | MissingFeatures = ShortFormNEONMissingFeatures; |
6525 | } |
6526 | } |
6527 | |
6528 | switch (MatchResult) { |
6529 | case Match_Success: { |
6530 | // Perform range checking and other semantic validations |
6531 | SmallVector<SMLoc, 8> OperandLocs; |
6532 | NumOperands = Operands.size(); |
6533 | for (unsigned i = 1; i < NumOperands; ++i) |
6534 | OperandLocs.push_back(Elt: Operands[i]->getStartLoc()); |
6535 | if (validateInstruction(Inst, IDLoc, Loc&: OperandLocs)) |
6536 | return true; |
6537 | |
6538 | Inst.setLoc(IDLoc); |
6539 | Out.emitInstruction(Inst, STI: getSTI()); |
6540 | return false; |
6541 | } |
6542 | case Match_MissingFeature: { |
6543 | assert(MissingFeatures.any() && "Unknown missing feature!" ); |
6544 | // Special case the error message for the very common case where only |
6545 | // a single subtarget feature is missing (neon, e.g.). |
6546 | std::string Msg = "instruction requires:" ; |
6547 | for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) { |
6548 | if (MissingFeatures[i]) { |
6549 | Msg += " " ; |
6550 | Msg += getSubtargetFeatureName(Val: i); |
6551 | } |
6552 | } |
6553 | return Error(L: IDLoc, Msg); |
6554 | } |
6555 | case Match_MnemonicFail: |
6556 | return showMatchError(Loc: IDLoc, ErrCode: MatchResult, ErrorInfo, Operands); |
6557 | case Match_InvalidOperand: { |
6558 | SMLoc ErrorLoc = IDLoc; |
6559 | |
6560 | if (ErrorInfo != ~0ULL) { |
6561 | if (ErrorInfo >= Operands.size()) |
6562 | return Error(L: IDLoc, Msg: "too few operands for instruction" , |
6563 | Range: SMRange(IDLoc, getTok().getLoc())); |
6564 | |
6565 | ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc(); |
6566 | if (ErrorLoc == SMLoc()) |
6567 | ErrorLoc = IDLoc; |
6568 | } |
6569 | // If the match failed on a suffix token operand, tweak the diagnostic |
6570 | // accordingly. |
6571 | if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() && |
6572 | ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix()) |
6573 | MatchResult = Match_InvalidSuffix; |
6574 | |
6575 | return showMatchError(Loc: ErrorLoc, ErrCode: MatchResult, ErrorInfo, Operands); |
6576 | } |
6577 | case Match_InvalidTiedOperand: |
6578 | case Match_InvalidMemoryIndexed1: |
6579 | case Match_InvalidMemoryIndexed2: |
6580 | case Match_InvalidMemoryIndexed4: |
6581 | case Match_InvalidMemoryIndexed8: |
6582 | case Match_InvalidMemoryIndexed16: |
6583 | case Match_InvalidCondCode: |
6584 | case Match_AddSubLSLImm3ShiftLarge: |
6585 | case Match_AddSubRegExtendSmall: |
6586 | case Match_AddSubRegExtendLarge: |
6587 | case Match_AddSubSecondSource: |
6588 | case Match_LogicalSecondSource: |
6589 | case Match_AddSubRegShift32: |
6590 | case Match_AddSubRegShift64: |
6591 | case Match_InvalidMovImm32Shift: |
6592 | case Match_InvalidMovImm64Shift: |
6593 | case Match_InvalidFPImm: |
6594 | case Match_InvalidMemoryWExtend8: |
6595 | case Match_InvalidMemoryWExtend16: |
6596 | case Match_InvalidMemoryWExtend32: |
6597 | case Match_InvalidMemoryWExtend64: |
6598 | case Match_InvalidMemoryWExtend128: |
6599 | case Match_InvalidMemoryXExtend8: |
6600 | case Match_InvalidMemoryXExtend16: |
6601 | case Match_InvalidMemoryXExtend32: |
6602 | case Match_InvalidMemoryXExtend64: |
6603 | case Match_InvalidMemoryXExtend128: |
6604 | case Match_InvalidMemoryIndexed1SImm4: |
6605 | case Match_InvalidMemoryIndexed2SImm4: |
6606 | case Match_InvalidMemoryIndexed3SImm4: |
6607 | case Match_InvalidMemoryIndexed4SImm4: |
6608 | case Match_InvalidMemoryIndexed1SImm6: |
6609 | case Match_InvalidMemoryIndexed16SImm4: |
6610 | case Match_InvalidMemoryIndexed32SImm4: |
6611 | case Match_InvalidMemoryIndexed4SImm7: |
6612 | case Match_InvalidMemoryIndexed8SImm7: |
6613 | case Match_InvalidMemoryIndexed16SImm7: |
6614 | case Match_InvalidMemoryIndexed8UImm5: |
6615 | case Match_InvalidMemoryIndexed8UImm3: |
6616 | case Match_InvalidMemoryIndexed4UImm5: |
6617 | case Match_InvalidMemoryIndexed2UImm5: |
6618 | case Match_InvalidMemoryIndexed1UImm6: |
6619 | case Match_InvalidMemoryIndexed2UImm6: |
6620 | case Match_InvalidMemoryIndexed4UImm6: |
6621 | case Match_InvalidMemoryIndexed8UImm6: |
6622 | case Match_InvalidMemoryIndexed16UImm6: |
6623 | case Match_InvalidMemoryIndexedSImm6: |
6624 | case Match_InvalidMemoryIndexedSImm5: |
6625 | case Match_InvalidMemoryIndexedSImm8: |
6626 | case Match_InvalidMemoryIndexedSImm9: |
6627 | case Match_InvalidMemoryIndexed16SImm9: |
6628 | case Match_InvalidMemoryIndexed8SImm10: |
6629 | case Match_InvalidImm0_0: |
6630 | case Match_InvalidImm0_1: |
6631 | case Match_InvalidImm0_3: |
6632 | case Match_InvalidImm0_7: |
6633 | case Match_InvalidImm0_15: |
6634 | case Match_InvalidImm0_31: |
6635 | case Match_InvalidImm0_63: |
6636 | case Match_InvalidImm0_127: |
6637 | case Match_InvalidImm0_255: |
6638 | case Match_InvalidImm0_65535: |
6639 | case Match_InvalidImm1_8: |
6640 | case Match_InvalidImm1_16: |
6641 | case Match_InvalidImm1_32: |
6642 | case Match_InvalidImm1_64: |
6643 | case Match_InvalidMemoryIndexedRange2UImm0: |
6644 | case Match_InvalidMemoryIndexedRange2UImm1: |
6645 | case Match_InvalidMemoryIndexedRange2UImm2: |
6646 | case Match_InvalidMemoryIndexedRange2UImm3: |
6647 | case Match_InvalidMemoryIndexedRange4UImm0: |
6648 | case Match_InvalidMemoryIndexedRange4UImm1: |
6649 | case Match_InvalidMemoryIndexedRange4UImm2: |
6650 | case Match_InvalidSVEAddSubImm8: |
6651 | case Match_InvalidSVEAddSubImm16: |
6652 | case Match_InvalidSVEAddSubImm32: |
6653 | case Match_InvalidSVEAddSubImm64: |
6654 | case Match_InvalidSVECpyImm8: |
6655 | case Match_InvalidSVECpyImm16: |
6656 | case Match_InvalidSVECpyImm32: |
6657 | case Match_InvalidSVECpyImm64: |
6658 | case Match_InvalidIndexRange0_0: |
6659 | case Match_InvalidIndexRange1_1: |
6660 | case Match_InvalidIndexRange0_15: |
6661 | case Match_InvalidIndexRange0_7: |
6662 | case Match_InvalidIndexRange0_3: |
6663 | case Match_InvalidIndexRange0_1: |
6664 | case Match_InvalidSVEIndexRange0_63: |
6665 | case Match_InvalidSVEIndexRange0_31: |
6666 | case Match_InvalidSVEIndexRange0_15: |
6667 | case Match_InvalidSVEIndexRange0_7: |
6668 | case Match_InvalidSVEIndexRange0_3: |
6669 | case Match_InvalidLabel: |
6670 | case Match_InvalidComplexRotationEven: |
6671 | case Match_InvalidComplexRotationOdd: |
6672 | case Match_InvalidGPR64shifted8: |
6673 | case Match_InvalidGPR64shifted16: |
6674 | case Match_InvalidGPR64shifted32: |
6675 | case Match_InvalidGPR64shifted64: |
6676 | case Match_InvalidGPR64shifted128: |
6677 | case Match_InvalidGPR64NoXZRshifted8: |
6678 | case Match_InvalidGPR64NoXZRshifted16: |
6679 | case Match_InvalidGPR64NoXZRshifted32: |
6680 | case Match_InvalidGPR64NoXZRshifted64: |
6681 | case Match_InvalidGPR64NoXZRshifted128: |
6682 | case Match_InvalidZPR32UXTW8: |
6683 | case Match_InvalidZPR32UXTW16: |
6684 | case Match_InvalidZPR32UXTW32: |
6685 | case Match_InvalidZPR32UXTW64: |
6686 | case Match_InvalidZPR32SXTW8: |
6687 | case Match_InvalidZPR32SXTW16: |
6688 | case Match_InvalidZPR32SXTW32: |
6689 | case Match_InvalidZPR32SXTW64: |
6690 | case Match_InvalidZPR64UXTW8: |
6691 | case Match_InvalidZPR64SXTW8: |
6692 | case Match_InvalidZPR64UXTW16: |
6693 | case Match_InvalidZPR64SXTW16: |
6694 | case Match_InvalidZPR64UXTW32: |
6695 | case Match_InvalidZPR64SXTW32: |
6696 | case Match_InvalidZPR64UXTW64: |
6697 | case Match_InvalidZPR64SXTW64: |
6698 | case Match_InvalidZPR32LSL8: |
6699 | case Match_InvalidZPR32LSL16: |
6700 | case Match_InvalidZPR32LSL32: |
6701 | case Match_InvalidZPR32LSL64: |
6702 | case Match_InvalidZPR64LSL8: |
6703 | case Match_InvalidZPR64LSL16: |
6704 | case Match_InvalidZPR64LSL32: |
6705 | case Match_InvalidZPR64LSL64: |
6706 | case Match_InvalidZPR0: |
6707 | case Match_InvalidZPR8: |
6708 | case Match_InvalidZPR16: |
6709 | case Match_InvalidZPR32: |
6710 | case Match_InvalidZPR64: |
6711 | case Match_InvalidZPR128: |
6712 | case Match_InvalidZPR_3b8: |
6713 | case Match_InvalidZPR_3b16: |
6714 | case Match_InvalidZPR_3b32: |
6715 | case Match_InvalidZPR_4b8: |
6716 | case Match_InvalidZPR_4b16: |
6717 | case Match_InvalidZPR_4b32: |
6718 | case Match_InvalidZPR_4b64: |
6719 | case Match_InvalidSVEPPRorPNRAnyReg: |
6720 | case Match_InvalidSVEPPRorPNRBReg: |
6721 | case Match_InvalidSVEPredicateAnyReg: |
6722 | case Match_InvalidSVEPattern: |
6723 | case Match_InvalidSVEVecLenSpecifier: |
6724 | case Match_InvalidSVEPredicateBReg: |
6725 | case Match_InvalidSVEPredicateHReg: |
6726 | case Match_InvalidSVEPredicateSReg: |
6727 | case Match_InvalidSVEPredicateDReg: |
6728 | case Match_InvalidSVEPredicate3bAnyReg: |
6729 | case Match_InvalidSVEPNPredicateB_p8to15Reg: |
6730 | case Match_InvalidSVEPNPredicateH_p8to15Reg: |
6731 | case Match_InvalidSVEPNPredicateS_p8to15Reg: |
6732 | case Match_InvalidSVEPNPredicateD_p8to15Reg: |
6733 | case Match_InvalidSVEPNPredicateAny_p8to15Reg: |
6734 | case Match_InvalidSVEPNPredicateBReg: |
6735 | case Match_InvalidSVEPNPredicateHReg: |
6736 | case Match_InvalidSVEPNPredicateSReg: |
6737 | case Match_InvalidSVEPNPredicateDReg: |
6738 | case Match_InvalidSVEPredicateListMul2x8: |
6739 | case Match_InvalidSVEPredicateListMul2x16: |
6740 | case Match_InvalidSVEPredicateListMul2x32: |
6741 | case Match_InvalidSVEPredicateListMul2x64: |
6742 | case Match_InvalidSVEExactFPImmOperandHalfOne: |
6743 | case Match_InvalidSVEExactFPImmOperandHalfTwo: |
6744 | case Match_InvalidSVEExactFPImmOperandZeroOne: |
6745 | case Match_InvalidMatrixTile32: |
6746 | case Match_InvalidMatrixTile64: |
6747 | case Match_InvalidMatrix: |
6748 | case Match_InvalidMatrix8: |
6749 | case Match_InvalidMatrix16: |
6750 | case Match_InvalidMatrix32: |
6751 | case Match_InvalidMatrix64: |
6752 | case Match_InvalidMatrixTileVectorH8: |
6753 | case Match_InvalidMatrixTileVectorH16: |
6754 | case Match_InvalidMatrixTileVectorH32: |
6755 | case Match_InvalidMatrixTileVectorH64: |
6756 | case Match_InvalidMatrixTileVectorH128: |
6757 | case Match_InvalidMatrixTileVectorV8: |
6758 | case Match_InvalidMatrixTileVectorV16: |
6759 | case Match_InvalidMatrixTileVectorV32: |
6760 | case Match_InvalidMatrixTileVectorV64: |
6761 | case Match_InvalidMatrixTileVectorV128: |
6762 | case Match_InvalidSVCR: |
6763 | case Match_InvalidMatrixIndexGPR32_12_15: |
6764 | case Match_InvalidMatrixIndexGPR32_8_11: |
6765 | case Match_InvalidLookupTable: |
6766 | case Match_InvalidSVEVectorListMul2x8: |
6767 | case Match_InvalidSVEVectorListMul2x16: |
6768 | case Match_InvalidSVEVectorListMul2x32: |
6769 | case Match_InvalidSVEVectorListMul2x64: |
6770 | case Match_InvalidSVEVectorListMul2x128: |
6771 | case Match_InvalidSVEVectorListMul4x8: |
6772 | case Match_InvalidSVEVectorListMul4x16: |
6773 | case Match_InvalidSVEVectorListMul4x32: |
6774 | case Match_InvalidSVEVectorListMul4x64: |
6775 | case Match_InvalidSVEVectorListMul4x128: |
6776 | case Match_InvalidSVEVectorListStrided2x8: |
6777 | case Match_InvalidSVEVectorListStrided2x16: |
6778 | case Match_InvalidSVEVectorListStrided2x32: |
6779 | case Match_InvalidSVEVectorListStrided2x64: |
6780 | case Match_InvalidSVEVectorListStrided4x8: |
6781 | case Match_InvalidSVEVectorListStrided4x16: |
6782 | case Match_InvalidSVEVectorListStrided4x32: |
6783 | case Match_InvalidSVEVectorListStrided4x64: |
6784 | case Match_MSR: |
6785 | case Match_MRS: { |
6786 | if (ErrorInfo >= Operands.size()) |
6787 | return Error(L: IDLoc, Msg: "too few operands for instruction" , Range: SMRange(IDLoc, (*Operands.back()).getEndLoc())); |
6788 | // Any time we get here, there's nothing fancy to do. Just get the |
6789 | // operand SMLoc and display the diagnostic. |
6790 | SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc(); |
6791 | if (ErrorLoc == SMLoc()) |
6792 | ErrorLoc = IDLoc; |
6793 | return showMatchError(Loc: ErrorLoc, ErrCode: MatchResult, ErrorInfo, Operands); |
6794 | } |
6795 | } |
6796 | |
6797 | llvm_unreachable("Implement any new match types added!" ); |
6798 | } |
6799 | |
6800 | /// ParseDirective parses the arm specific directives |
6801 | bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) { |
6802 | const MCContext::Environment Format = getContext().getObjectFileType(); |
6803 | bool IsMachO = Format == MCContext::IsMachO; |
6804 | bool IsCOFF = Format == MCContext::IsCOFF; |
6805 | |
6806 | auto IDVal = DirectiveID.getIdentifier().lower(); |
6807 | SMLoc Loc = DirectiveID.getLoc(); |
6808 | if (IDVal == ".arch" ) |
6809 | parseDirectiveArch(L: Loc); |
6810 | else if (IDVal == ".cpu" ) |
6811 | parseDirectiveCPU(L: Loc); |
6812 | else if (IDVal == ".tlsdesccall" ) |
6813 | parseDirectiveTLSDescCall(L: Loc); |
6814 | else if (IDVal == ".ltorg" || IDVal == ".pool" ) |
6815 | parseDirectiveLtorg(L: Loc); |
6816 | else if (IDVal == ".unreq" ) |
6817 | parseDirectiveUnreq(L: Loc); |
6818 | else if (IDVal == ".inst" ) |
6819 | parseDirectiveInst(L: Loc); |
6820 | else if (IDVal == ".cfi_negate_ra_state" ) |
6821 | parseDirectiveCFINegateRAState(); |
6822 | else if (IDVal == ".cfi_b_key_frame" ) |
6823 | parseDirectiveCFIBKeyFrame(); |
6824 | else if (IDVal == ".cfi_mte_tagged_frame" ) |
6825 | parseDirectiveCFIMTETaggedFrame(); |
6826 | else if (IDVal == ".arch_extension" ) |
6827 | parseDirectiveArchExtension(L: Loc); |
6828 | else if (IDVal == ".variant_pcs" ) |
6829 | parseDirectiveVariantPCS(L: Loc); |
6830 | else if (IsMachO) { |
6831 | if (IDVal == MCLOHDirectiveName()) |
6832 | parseDirectiveLOH(LOH: IDVal, L: Loc); |
6833 | else |
6834 | return true; |
6835 | } else if (IsCOFF) { |
6836 | if (IDVal == ".seh_stackalloc" ) |
6837 | parseDirectiveSEHAllocStack(L: Loc); |
6838 | else if (IDVal == ".seh_endprologue" ) |
6839 | parseDirectiveSEHPrologEnd(L: Loc); |
6840 | else if (IDVal == ".seh_save_r19r20_x" ) |
6841 | parseDirectiveSEHSaveR19R20X(L: Loc); |
6842 | else if (IDVal == ".seh_save_fplr" ) |
6843 | parseDirectiveSEHSaveFPLR(L: Loc); |
6844 | else if (IDVal == ".seh_save_fplr_x" ) |
6845 | parseDirectiveSEHSaveFPLRX(L: Loc); |
6846 | else if (IDVal == ".seh_save_reg" ) |
6847 | parseDirectiveSEHSaveReg(L: Loc); |
6848 | else if (IDVal == ".seh_save_reg_x" ) |
6849 | parseDirectiveSEHSaveRegX(L: Loc); |
6850 | else if (IDVal == ".seh_save_regp" ) |
6851 | parseDirectiveSEHSaveRegP(L: Loc); |
6852 | else if (IDVal == ".seh_save_regp_x" ) |
6853 | parseDirectiveSEHSaveRegPX(L: Loc); |
6854 | else if (IDVal == ".seh_save_lrpair" ) |
6855 | parseDirectiveSEHSaveLRPair(L: Loc); |
6856 | else if (IDVal == ".seh_save_freg" ) |
6857 | parseDirectiveSEHSaveFReg(L: Loc); |
6858 | else if (IDVal == ".seh_save_freg_x" ) |
6859 | parseDirectiveSEHSaveFRegX(L: Loc); |
6860 | else if (IDVal == ".seh_save_fregp" ) |
6861 | parseDirectiveSEHSaveFRegP(L: Loc); |
6862 | else if (IDVal == ".seh_save_fregp_x" ) |
6863 | parseDirectiveSEHSaveFRegPX(L: Loc); |
6864 | else if (IDVal == ".seh_set_fp" ) |
6865 | parseDirectiveSEHSetFP(L: Loc); |
6866 | else if (IDVal == ".seh_add_fp" ) |
6867 | parseDirectiveSEHAddFP(L: Loc); |
6868 | else if (IDVal == ".seh_nop" ) |
6869 | parseDirectiveSEHNop(L: Loc); |
6870 | else if (IDVal == ".seh_save_next" ) |
6871 | parseDirectiveSEHSaveNext(L: Loc); |
6872 | else if (IDVal == ".seh_startepilogue" ) |
6873 | parseDirectiveSEHEpilogStart(L: Loc); |
6874 | else if (IDVal == ".seh_endepilogue" ) |
6875 | parseDirectiveSEHEpilogEnd(L: Loc); |
6876 | else if (IDVal == ".seh_trap_frame" ) |
6877 | parseDirectiveSEHTrapFrame(L: Loc); |
6878 | else if (IDVal == ".seh_pushframe" ) |
6879 | parseDirectiveSEHMachineFrame(L: Loc); |
6880 | else if (IDVal == ".seh_context" ) |
6881 | parseDirectiveSEHContext(L: Loc); |
6882 | else if (IDVal == ".seh_ec_context" ) |
6883 | parseDirectiveSEHECContext(L: Loc); |
6884 | else if (IDVal == ".seh_clear_unwound_to_call" ) |
6885 | parseDirectiveSEHClearUnwoundToCall(L: Loc); |
6886 | else if (IDVal == ".seh_pac_sign_lr" ) |
6887 | parseDirectiveSEHPACSignLR(L: Loc); |
6888 | else if (IDVal == ".seh_save_any_reg" ) |
6889 | parseDirectiveSEHSaveAnyReg(L: Loc, Paired: false, Writeback: false); |
6890 | else if (IDVal == ".seh_save_any_reg_p" ) |
6891 | parseDirectiveSEHSaveAnyReg(L: Loc, Paired: true, Writeback: false); |
6892 | else if (IDVal == ".seh_save_any_reg_x" ) |
6893 | parseDirectiveSEHSaveAnyReg(L: Loc, Paired: false, Writeback: true); |
6894 | else if (IDVal == ".seh_save_any_reg_px" ) |
6895 | parseDirectiveSEHSaveAnyReg(L: Loc, Paired: true, Writeback: true); |
6896 | else |
6897 | return true; |
6898 | } else |
6899 | return true; |
6900 | return false; |
6901 | } |
6902 | |
6903 | static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo, |
6904 | SmallVector<StringRef, 4> &RequestedExtensions) { |
6905 | const bool NoCrypto = llvm::is_contained(Range&: RequestedExtensions, Element: "nocrypto" ); |
6906 | const bool Crypto = llvm::is_contained(Range&: RequestedExtensions, Element: "crypto" ); |
6907 | |
6908 | if (!NoCrypto && Crypto) { |
6909 | // Map 'generic' (and others) to sha2 and aes, because |
6910 | // that was the traditional meaning of crypto. |
6911 | if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A || |
6912 | ArchInfo == AArch64::ARMV8_3A) { |
6913 | RequestedExtensions.push_back(Elt: "sha2" ); |
6914 | RequestedExtensions.push_back(Elt: "aes" ); |
6915 | } |
6916 | if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A || |
6917 | ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A || |
6918 | ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A || |
6919 | ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A || |
6920 | ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A || |
6921 | ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) { |
6922 | RequestedExtensions.push_back(Elt: "sm4" ); |
6923 | RequestedExtensions.push_back(Elt: "sha3" ); |
6924 | RequestedExtensions.push_back(Elt: "sha2" ); |
6925 | RequestedExtensions.push_back(Elt: "aes" ); |
6926 | } |
6927 | } else if (NoCrypto) { |
6928 | // Map 'generic' (and others) to sha2 and aes, because |
6929 | // that was the traditional meaning of crypto. |
6930 | if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A || |
6931 | ArchInfo == AArch64::ARMV8_3A) { |
6932 | RequestedExtensions.push_back(Elt: "nosha2" ); |
6933 | RequestedExtensions.push_back(Elt: "noaes" ); |
6934 | } |
6935 | if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A || |
6936 | ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A || |
6937 | ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A || |
6938 | ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A || |
6939 | ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A || |
6940 | ArchInfo == AArch64::ARMV9_4A) { |
6941 | RequestedExtensions.push_back(Elt: "nosm4" ); |
6942 | RequestedExtensions.push_back(Elt: "nosha3" ); |
6943 | RequestedExtensions.push_back(Elt: "nosha2" ); |
6944 | RequestedExtensions.push_back(Elt: "noaes" ); |
6945 | } |
6946 | } |
6947 | } |
6948 | |
6949 | /// parseDirectiveArch |
6950 | /// ::= .arch token |
6951 | bool AArch64AsmParser::parseDirectiveArch(SMLoc L) { |
6952 | SMLoc ArchLoc = getLoc(); |
6953 | |
6954 | StringRef Arch, ExtensionString; |
6955 | std::tie(args&: Arch, args&: ExtensionString) = |
6956 | getParser().parseStringToEndOfStatement().trim().split(Separator: '+'); |
6957 | |
6958 | const AArch64::ArchInfo *ArchInfo = AArch64::parseArch(Arch); |
6959 | if (!ArchInfo) |
6960 | return Error(L: ArchLoc, Msg: "unknown arch name" ); |
6961 | |
6962 | if (parseToken(T: AsmToken::EndOfStatement)) |
6963 | return true; |
6964 | |
6965 | // Get the architecture and extension features. |
6966 | std::vector<StringRef> AArch64Features; |
6967 | AArch64Features.push_back(x: ArchInfo->ArchFeature); |
6968 | AArch64::getExtensionFeatures(Extensions: ArchInfo->DefaultExts, Features&: AArch64Features); |
6969 | |
6970 | MCSubtargetInfo &STI = copySTI(); |
6971 | std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end()); |
6972 | STI.setDefaultFeatures(CPU: "generic" , /*TuneCPU*/ "generic" , |
6973 | FS: join(Begin: ArchFeatures.begin(), End: ArchFeatures.end(), Separator: "," )); |
6974 | |
6975 | SmallVector<StringRef, 4> RequestedExtensions; |
6976 | if (!ExtensionString.empty()) |
6977 | ExtensionString.split(A&: RequestedExtensions, Separator: '+'); |
6978 | |
6979 | ExpandCryptoAEK(ArchInfo: *ArchInfo, RequestedExtensions); |
6980 | |
6981 | FeatureBitset Features = STI.getFeatureBits(); |
6982 | setAvailableFeatures(ComputeAvailableFeatures(FB: Features)); |
6983 | for (auto Name : RequestedExtensions) { |
6984 | bool EnableFeature = !Name.consume_front_insensitive(Prefix: "no" ); |
6985 | |
6986 | for (const auto &Extension : ExtensionMap) { |
6987 | if (Extension.Name != Name) |
6988 | continue; |
6989 | |
6990 | if (Extension.Features.none()) |
6991 | report_fatal_error(reason: "unsupported architectural extension: " + Name); |
6992 | |
6993 | FeatureBitset ToggleFeatures = |
6994 | EnableFeature |
6995 | ? STI.SetFeatureBitsTransitively(~Features & Extension.Features) |
6996 | : STI.ToggleFeature(FB: Features & Extension.Features); |
6997 | setAvailableFeatures(ComputeAvailableFeatures(FB: ToggleFeatures)); |
6998 | break; |
6999 | } |
7000 | } |
7001 | return false; |
7002 | } |
7003 | |
7004 | /// parseDirectiveArchExtension |
7005 | /// ::= .arch_extension [no]feature |
7006 | bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) { |
7007 | SMLoc ExtLoc = getLoc(); |
7008 | |
7009 | StringRef Name = getParser().parseStringToEndOfStatement().trim(); |
7010 | |
7011 | if (parseEOL()) |
7012 | return true; |
7013 | |
7014 | bool EnableFeature = true; |
7015 | if (Name.starts_with_insensitive(Prefix: "no" )) { |
7016 | EnableFeature = false; |
7017 | Name = Name.substr(Start: 2); |
7018 | } |
7019 | |
7020 | MCSubtargetInfo &STI = copySTI(); |
7021 | FeatureBitset Features = STI.getFeatureBits(); |
7022 | for (const auto &Extension : ExtensionMap) { |
7023 | if (Extension.Name != Name) |
7024 | continue; |
7025 | |
7026 | if (Extension.Features.none()) |
7027 | return Error(L: ExtLoc, Msg: "unsupported architectural extension: " + Name); |
7028 | |
7029 | FeatureBitset ToggleFeatures = |
7030 | EnableFeature |
7031 | ? STI.SetFeatureBitsTransitively(~Features & Extension.Features) |
7032 | : STI.ToggleFeature(FB: Features & Extension.Features); |
7033 | setAvailableFeatures(ComputeAvailableFeatures(FB: ToggleFeatures)); |
7034 | return false; |
7035 | } |
7036 | |
7037 | return Error(L: ExtLoc, Msg: "unknown architectural extension: " + Name); |
7038 | } |
7039 | |
7040 | static SMLoc incrementLoc(SMLoc L, int Offset) { |
7041 | return SMLoc::getFromPointer(Ptr: L.getPointer() + Offset); |
7042 | } |
7043 | |
7044 | /// parseDirectiveCPU |
7045 | /// ::= .cpu id |
7046 | bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) { |
7047 | SMLoc CurLoc = getLoc(); |
7048 | |
7049 | StringRef CPU, ExtensionString; |
7050 | std::tie(args&: CPU, args&: ExtensionString) = |
7051 | getParser().parseStringToEndOfStatement().trim().split(Separator: '+'); |
7052 | |
7053 | if (parseToken(T: AsmToken::EndOfStatement)) |
7054 | return true; |
7055 | |
7056 | SmallVector<StringRef, 4> RequestedExtensions; |
7057 | if (!ExtensionString.empty()) |
7058 | ExtensionString.split(A&: RequestedExtensions, Separator: '+'); |
7059 | |
7060 | const llvm::AArch64::ArchInfo *CpuArch = llvm::AArch64::getArchForCpu(CPU); |
7061 | if (!CpuArch) { |
7062 | Error(L: CurLoc, Msg: "unknown CPU name" ); |
7063 | return false; |
7064 | } |
7065 | ExpandCryptoAEK(ArchInfo: *CpuArch, RequestedExtensions); |
7066 | |
7067 | MCSubtargetInfo &STI = copySTI(); |
7068 | STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, FS: "" ); |
7069 | CurLoc = incrementLoc(L: CurLoc, Offset: CPU.size()); |
7070 | |
7071 | for (auto Name : RequestedExtensions) { |
7072 | // Advance source location past '+'. |
7073 | CurLoc = incrementLoc(L: CurLoc, Offset: 1); |
7074 | |
7075 | bool EnableFeature = !Name.consume_front_insensitive(Prefix: "no" ); |
7076 | |
7077 | bool FoundExtension = false; |
7078 | for (const auto &Extension : ExtensionMap) { |
7079 | if (Extension.Name != Name) |
7080 | continue; |
7081 | |
7082 | if (Extension.Features.none()) |
7083 | report_fatal_error(reason: "unsupported architectural extension: " + Name); |
7084 | |
7085 | FeatureBitset Features = STI.getFeatureBits(); |
7086 | FeatureBitset ToggleFeatures = |
7087 | EnableFeature |
7088 | ? STI.SetFeatureBitsTransitively(~Features & Extension.Features) |
7089 | : STI.ToggleFeature(FB: Features & Extension.Features); |
7090 | setAvailableFeatures(ComputeAvailableFeatures(FB: ToggleFeatures)); |
7091 | FoundExtension = true; |
7092 | |
7093 | break; |
7094 | } |
7095 | |
7096 | if (!FoundExtension) |
7097 | Error(L: CurLoc, Msg: "unsupported architectural extension" ); |
7098 | |
7099 | CurLoc = incrementLoc(L: CurLoc, Offset: Name.size()); |
7100 | } |
7101 | return false; |
7102 | } |
7103 | |
7104 | /// parseDirectiveInst |
7105 | /// ::= .inst opcode [, ...] |
7106 | bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) { |
7107 | if (getLexer().is(K: AsmToken::EndOfStatement)) |
7108 | return Error(L: Loc, Msg: "expected expression following '.inst' directive" ); |
7109 | |
7110 | auto parseOp = [&]() -> bool { |
7111 | SMLoc L = getLoc(); |
7112 | const MCExpr *Expr = nullptr; |
7113 | if (check(P: getParser().parseExpression(Res&: Expr), Loc: L, Msg: "expected expression" )) |
7114 | return true; |
7115 | const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Val: Expr); |
7116 | if (check(P: !Value, Loc: L, Msg: "expected constant expression" )) |
7117 | return true; |
7118 | getTargetStreamer().emitInst(Inst: Value->getValue()); |
7119 | return false; |
7120 | }; |
7121 | |
7122 | return parseMany(parseOne: parseOp); |
7123 | } |
7124 | |
7125 | // parseDirectiveTLSDescCall: |
7126 | // ::= .tlsdesccall symbol |
7127 | bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) { |
7128 | StringRef Name; |
7129 | if (check(P: getParser().parseIdentifier(Res&: Name), Loc: L, Msg: "expected symbol" ) || |
7130 | parseToken(T: AsmToken::EndOfStatement)) |
7131 | return true; |
7132 | |
7133 | MCSymbol *Sym = getContext().getOrCreateSymbol(Name); |
7134 | const MCExpr *Expr = MCSymbolRefExpr::create(Symbol: Sym, Ctx&: getContext()); |
7135 | Expr = AArch64MCExpr::create(Expr, Kind: AArch64MCExpr::VK_TLSDESC, Ctx&: getContext()); |
7136 | |
7137 | MCInst Inst; |
7138 | Inst.setOpcode(AArch64::TLSDESCCALL); |
7139 | Inst.addOperand(Op: MCOperand::createExpr(Val: Expr)); |
7140 | |
7141 | getParser().getStreamer().emitInstruction(Inst, STI: getSTI()); |
7142 | return false; |
7143 | } |
7144 | |
7145 | /// ::= .loh <lohName | lohId> label1, ..., labelN |
7146 | /// The number of arguments depends on the loh identifier. |
7147 | bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) { |
7148 | MCLOHType Kind; |
7149 | if (getTok().isNot(K: AsmToken::Identifier)) { |
7150 | if (getTok().isNot(K: AsmToken::Integer)) |
7151 | return TokError(Msg: "expected an identifier or a number in directive" ); |
7152 | // We successfully get a numeric value for the identifier. |
7153 | // Check if it is valid. |
7154 | int64_t Id = getTok().getIntVal(); |
7155 | if (Id <= -1U && !isValidMCLOHType(Kind: Id)) |
7156 | return TokError(Msg: "invalid numeric identifier in directive" ); |
7157 | Kind = (MCLOHType)Id; |
7158 | } else { |
7159 | StringRef Name = getTok().getIdentifier(); |
7160 | // We successfully parse an identifier. |
7161 | // Check if it is a recognized one. |
7162 | int Id = MCLOHNameToId(Name); |
7163 | |
7164 | if (Id == -1) |
7165 | return TokError(Msg: "invalid identifier in directive" ); |
7166 | Kind = (MCLOHType)Id; |
7167 | } |
7168 | // Consume the identifier. |
7169 | Lex(); |
7170 | // Get the number of arguments of this LOH. |
7171 | int NbArgs = MCLOHIdToNbArgs(Kind); |
7172 | |
7173 | assert(NbArgs != -1 && "Invalid number of arguments" ); |
7174 | |
7175 | SmallVector<MCSymbol *, 3> Args; |
7176 | for (int Idx = 0; Idx < NbArgs; ++Idx) { |
7177 | StringRef Name; |
7178 | if (getParser().parseIdentifier(Res&: Name)) |
7179 | return TokError(Msg: "expected identifier in directive" ); |
7180 | Args.push_back(Elt: getContext().getOrCreateSymbol(Name)); |
7181 | |
7182 | if (Idx + 1 == NbArgs) |
7183 | break; |
7184 | if (parseComma()) |
7185 | return true; |
7186 | } |
7187 | if (parseEOL()) |
7188 | return true; |
7189 | |
7190 | getStreamer().emitLOHDirective(Kind: (MCLOHType)Kind, Args); |
7191 | return false; |
7192 | } |
7193 | |
7194 | /// parseDirectiveLtorg |
7195 | /// ::= .ltorg | .pool |
7196 | bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) { |
7197 | if (parseEOL()) |
7198 | return true; |
7199 | getTargetStreamer().emitCurrentConstantPool(); |
7200 | return false; |
7201 | } |
7202 | |
7203 | /// parseDirectiveReq |
7204 | /// ::= name .req registername |
7205 | bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) { |
7206 | Lex(); // Eat the '.req' token. |
7207 | SMLoc SRegLoc = getLoc(); |
7208 | RegKind RegisterKind = RegKind::Scalar; |
7209 | MCRegister RegNum; |
7210 | ParseStatus ParseRes = tryParseScalarRegister(RegNum); |
7211 | |
7212 | if (!ParseRes.isSuccess()) { |
7213 | StringRef Kind; |
7214 | RegisterKind = RegKind::NeonVector; |
7215 | ParseRes = tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::NeonVector); |
7216 | |
7217 | if (ParseRes.isFailure()) |
7218 | return true; |
7219 | |
7220 | if (ParseRes.isSuccess() && !Kind.empty()) |
7221 | return Error(L: SRegLoc, Msg: "vector register without type specifier expected" ); |
7222 | } |
7223 | |
7224 | if (!ParseRes.isSuccess()) { |
7225 | StringRef Kind; |
7226 | RegisterKind = RegKind::SVEDataVector; |
7227 | ParseRes = |
7228 | tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::SVEDataVector); |
7229 | |
7230 | if (ParseRes.isFailure()) |
7231 | return true; |
7232 | |
7233 | if (ParseRes.isSuccess() && !Kind.empty()) |
7234 | return Error(L: SRegLoc, |
7235 | Msg: "sve vector register without type specifier expected" ); |
7236 | } |
7237 | |
7238 | if (!ParseRes.isSuccess()) { |
7239 | StringRef Kind; |
7240 | RegisterKind = RegKind::SVEPredicateVector; |
7241 | ParseRes = tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::SVEPredicateVector); |
7242 | |
7243 | if (ParseRes.isFailure()) |
7244 | return true; |
7245 | |
7246 | if (ParseRes.isSuccess() && !Kind.empty()) |
7247 | return Error(L: SRegLoc, |
7248 | Msg: "sve predicate register without type specifier expected" ); |
7249 | } |
7250 | |
7251 | if (!ParseRes.isSuccess()) |
7252 | return Error(L: SRegLoc, Msg: "register name or alias expected" ); |
7253 | |
7254 | // Shouldn't be anything else. |
7255 | if (parseEOL()) |
7256 | return true; |
7257 | |
7258 | auto pair = std::make_pair(x&: RegisterKind, y: (unsigned) RegNum); |
7259 | if (RegisterReqs.insert(KV: std::make_pair(x&: Name, y&: pair)).first->second != pair) |
7260 | Warning(L, Msg: "ignoring redefinition of register alias '" + Name + "'" ); |
7261 | |
7262 | return false; |
7263 | } |
7264 | |
7265 | /// parseDirectiveUneq |
7266 | /// ::= .unreq registername |
7267 | bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) { |
7268 | if (getTok().isNot(K: AsmToken::Identifier)) |
7269 | return TokError(Msg: "unexpected input in .unreq directive." ); |
7270 | RegisterReqs.erase(Key: getTok().getIdentifier().lower()); |
7271 | Lex(); // Eat the identifier. |
7272 | return parseToken(T: AsmToken::EndOfStatement); |
7273 | } |
7274 | |
7275 | bool AArch64AsmParser::parseDirectiveCFINegateRAState() { |
7276 | if (parseEOL()) |
7277 | return true; |
7278 | getStreamer().emitCFINegateRAState(); |
7279 | return false; |
7280 | } |
7281 | |
7282 | /// parseDirectiveCFIBKeyFrame |
7283 | /// ::= .cfi_b_key |
7284 | bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() { |
7285 | if (parseEOL()) |
7286 | return true; |
7287 | getStreamer().emitCFIBKeyFrame(); |
7288 | return false; |
7289 | } |
7290 | |
7291 | /// parseDirectiveCFIMTETaggedFrame |
7292 | /// ::= .cfi_mte_tagged_frame |
7293 | bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() { |
7294 | if (parseEOL()) |
7295 | return true; |
7296 | getStreamer().emitCFIMTETaggedFrame(); |
7297 | return false; |
7298 | } |
7299 | |
7300 | /// parseDirectiveVariantPCS |
7301 | /// ::= .variant_pcs symbolname |
7302 | bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) { |
7303 | StringRef Name; |
7304 | if (getParser().parseIdentifier(Res&: Name)) |
7305 | return TokError(Msg: "expected symbol name" ); |
7306 | if (parseEOL()) |
7307 | return true; |
7308 | getTargetStreamer().emitDirectiveVariantPCS( |
7309 | Symbol: getContext().getOrCreateSymbol(Name)); |
7310 | return false; |
7311 | } |
7312 | |
7313 | /// parseDirectiveSEHAllocStack |
7314 | /// ::= .seh_stackalloc |
7315 | bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) { |
7316 | int64_t Size; |
7317 | if (parseImmExpr(Out&: Size)) |
7318 | return true; |
7319 | getTargetStreamer().emitARM64WinCFIAllocStack(Size); |
7320 | return false; |
7321 | } |
7322 | |
7323 | /// parseDirectiveSEHPrologEnd |
7324 | /// ::= .seh_endprologue |
7325 | bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) { |
7326 | getTargetStreamer().emitARM64WinCFIPrologEnd(); |
7327 | return false; |
7328 | } |
7329 | |
7330 | /// parseDirectiveSEHSaveR19R20X |
7331 | /// ::= .seh_save_r19r20_x |
7332 | bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) { |
7333 | int64_t Offset; |
7334 | if (parseImmExpr(Out&: Offset)) |
7335 | return true; |
7336 | getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset); |
7337 | return false; |
7338 | } |
7339 | |
7340 | /// parseDirectiveSEHSaveFPLR |
7341 | /// ::= .seh_save_fplr |
7342 | bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) { |
7343 | int64_t Offset; |
7344 | if (parseImmExpr(Out&: Offset)) |
7345 | return true; |
7346 | getTargetStreamer().emitARM64WinCFISaveFPLR(Offset); |
7347 | return false; |
7348 | } |
7349 | |
7350 | /// parseDirectiveSEHSaveFPLRX |
7351 | /// ::= .seh_save_fplr_x |
7352 | bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) { |
7353 | int64_t Offset; |
7354 | if (parseImmExpr(Out&: Offset)) |
7355 | return true; |
7356 | getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset); |
7357 | return false; |
7358 | } |
7359 | |
7360 | /// parseDirectiveSEHSaveReg |
7361 | /// ::= .seh_save_reg |
7362 | bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) { |
7363 | unsigned Reg; |
7364 | int64_t Offset; |
7365 | if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::LR) || |
7366 | parseComma() || parseImmExpr(Out&: Offset)) |
7367 | return true; |
7368 | getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset); |
7369 | return false; |
7370 | } |
7371 | |
7372 | /// parseDirectiveSEHSaveRegX |
7373 | /// ::= .seh_save_reg_x |
7374 | bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) { |
7375 | unsigned Reg; |
7376 | int64_t Offset; |
7377 | if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::LR) || |
7378 | parseComma() || parseImmExpr(Out&: Offset)) |
7379 | return true; |
7380 | getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset); |
7381 | return false; |
7382 | } |
7383 | |
7384 | /// parseDirectiveSEHSaveRegP |
7385 | /// ::= .seh_save_regp |
7386 | bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) { |
7387 | unsigned Reg; |
7388 | int64_t Offset; |
7389 | if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::FP) || |
7390 | parseComma() || parseImmExpr(Out&: Offset)) |
7391 | return true; |
7392 | getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset); |
7393 | return false; |
7394 | } |
7395 | |
7396 | /// parseDirectiveSEHSaveRegPX |
7397 | /// ::= .seh_save_regp_x |
7398 | bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) { |
7399 | unsigned Reg; |
7400 | int64_t Offset; |
7401 | if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::FP) || |
7402 | parseComma() || parseImmExpr(Out&: Offset)) |
7403 | return true; |
7404 | getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset); |
7405 | return false; |
7406 | } |
7407 | |
7408 | /// parseDirectiveSEHSaveLRPair |
7409 | /// ::= .seh_save_lrpair |
7410 | bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) { |
7411 | unsigned Reg; |
7412 | int64_t Offset; |
7413 | L = getLoc(); |
7414 | if (parseRegisterInRange(Out&: Reg, Base: AArch64::X0, First: AArch64::X19, Last: AArch64::LR) || |
7415 | parseComma() || parseImmExpr(Out&: Offset)) |
7416 | return true; |
7417 | if (check(P: ((Reg - 19) % 2 != 0), Loc: L, |
7418 | Msg: "expected register with even offset from x19" )) |
7419 | return true; |
7420 | getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset); |
7421 | return false; |
7422 | } |
7423 | |
7424 | /// parseDirectiveSEHSaveFReg |
7425 | /// ::= .seh_save_freg |
7426 | bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) { |
7427 | unsigned Reg; |
7428 | int64_t Offset; |
7429 | if (parseRegisterInRange(Out&: Reg, Base: AArch64::D0, First: AArch64::D8, Last: AArch64::D15) || |
7430 | parseComma() || parseImmExpr(Out&: Offset)) |
7431 | return true; |
7432 | getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset); |
7433 | return false; |
7434 | } |
7435 | |
7436 | /// parseDirectiveSEHSaveFRegX |
7437 | /// ::= .seh_save_freg_x |
7438 | bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) { |
7439 | unsigned Reg; |
7440 | int64_t Offset; |
7441 | if (parseRegisterInRange(Out&: Reg, Base: AArch64::D0, First: AArch64::D8, Last: AArch64::D15) || |
7442 | parseComma() || parseImmExpr(Out&: Offset)) |
7443 | return true; |
7444 | getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset); |
7445 | return false; |
7446 | } |
7447 | |
7448 | /// parseDirectiveSEHSaveFRegP |
7449 | /// ::= .seh_save_fregp |
7450 | bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) { |
7451 | unsigned Reg; |
7452 | int64_t Offset; |
7453 | if (parseRegisterInRange(Out&: Reg, Base: AArch64::D0, First: AArch64::D8, Last: AArch64::D14) || |
7454 | parseComma() || parseImmExpr(Out&: Offset)) |
7455 | return true; |
7456 | getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset); |
7457 | return false; |
7458 | } |
7459 | |
7460 | /// parseDirectiveSEHSaveFRegPX |
7461 | /// ::= .seh_save_fregp_x |
7462 | bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) { |
7463 | unsigned Reg; |
7464 | int64_t Offset; |
7465 | if (parseRegisterInRange(Out&: Reg, Base: AArch64::D0, First: AArch64::D8, Last: AArch64::D14) || |
7466 | parseComma() || parseImmExpr(Out&: Offset)) |
7467 | return true; |
7468 | getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset); |
7469 | return false; |
7470 | } |
7471 | |
7472 | /// parseDirectiveSEHSetFP |
7473 | /// ::= .seh_set_fp |
7474 | bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) { |
7475 | getTargetStreamer().emitARM64WinCFISetFP(); |
7476 | return false; |
7477 | } |
7478 | |
7479 | /// parseDirectiveSEHAddFP |
7480 | /// ::= .seh_add_fp |
7481 | bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) { |
7482 | int64_t Size; |
7483 | if (parseImmExpr(Out&: Size)) |
7484 | return true; |
7485 | getTargetStreamer().emitARM64WinCFIAddFP(Size); |
7486 | return false; |
7487 | } |
7488 | |
7489 | /// parseDirectiveSEHNop |
7490 | /// ::= .seh_nop |
7491 | bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) { |
7492 | getTargetStreamer().emitARM64WinCFINop(); |
7493 | return false; |
7494 | } |
7495 | |
7496 | /// parseDirectiveSEHSaveNext |
7497 | /// ::= .seh_save_next |
7498 | bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) { |
7499 | getTargetStreamer().emitARM64WinCFISaveNext(); |
7500 | return false; |
7501 | } |
7502 | |
7503 | /// parseDirectiveSEHEpilogStart |
7504 | /// ::= .seh_startepilogue |
7505 | bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) { |
7506 | getTargetStreamer().emitARM64WinCFIEpilogStart(); |
7507 | return false; |
7508 | } |
7509 | |
7510 | /// parseDirectiveSEHEpilogEnd |
7511 | /// ::= .seh_endepilogue |
7512 | bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) { |
7513 | getTargetStreamer().emitARM64WinCFIEpilogEnd(); |
7514 | return false; |
7515 | } |
7516 | |
7517 | /// parseDirectiveSEHTrapFrame |
7518 | /// ::= .seh_trap_frame |
7519 | bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) { |
7520 | getTargetStreamer().emitARM64WinCFITrapFrame(); |
7521 | return false; |
7522 | } |
7523 | |
7524 | /// parseDirectiveSEHMachineFrame |
7525 | /// ::= .seh_pushframe |
7526 | bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) { |
7527 | getTargetStreamer().emitARM64WinCFIMachineFrame(); |
7528 | return false; |
7529 | } |
7530 | |
7531 | /// parseDirectiveSEHContext |
7532 | /// ::= .seh_context |
7533 | bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) { |
7534 | getTargetStreamer().emitARM64WinCFIContext(); |
7535 | return false; |
7536 | } |
7537 | |
7538 | /// parseDirectiveSEHECContext |
7539 | /// ::= .seh_ec_context |
7540 | bool AArch64AsmParser::parseDirectiveSEHECContext(SMLoc L) { |
7541 | getTargetStreamer().emitARM64WinCFIECContext(); |
7542 | return false; |
7543 | } |
7544 | |
7545 | /// parseDirectiveSEHClearUnwoundToCall |
7546 | /// ::= .seh_clear_unwound_to_call |
7547 | bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) { |
7548 | getTargetStreamer().emitARM64WinCFIClearUnwoundToCall(); |
7549 | return false; |
7550 | } |
7551 | |
7552 | /// parseDirectiveSEHPACSignLR |
7553 | /// ::= .seh_pac_sign_lr |
7554 | bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) { |
7555 | getTargetStreamer().emitARM64WinCFIPACSignLR(); |
7556 | return false; |
7557 | } |
7558 | |
7559 | /// parseDirectiveSEHSaveAnyReg |
7560 | /// ::= .seh_save_any_reg |
7561 | /// ::= .seh_save_any_reg_p |
7562 | /// ::= .seh_save_any_reg_x |
7563 | /// ::= .seh_save_any_reg_px |
7564 | bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, |
7565 | bool Writeback) { |
7566 | MCRegister Reg; |
7567 | SMLoc Start, End; |
7568 | int64_t Offset; |
7569 | if (check(P: parseRegister(Reg, StartLoc&: Start, EndLoc&: End), Loc: getLoc(), Msg: "expected register" ) || |
7570 | parseComma() || parseImmExpr(Out&: Offset)) |
7571 | return true; |
7572 | |
7573 | if (Reg == AArch64::FP || Reg == AArch64::LR || |
7574 | (Reg >= AArch64::X0 && Reg <= AArch64::X28)) { |
7575 | if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8)) |
7576 | return Error(L, Msg: "invalid save_any_reg offset" ); |
7577 | unsigned EncodedReg; |
7578 | if (Reg == AArch64::FP) |
7579 | EncodedReg = 29; |
7580 | else if (Reg == AArch64::LR) |
7581 | EncodedReg = 30; |
7582 | else |
7583 | EncodedReg = Reg - AArch64::X0; |
7584 | if (Paired) { |
7585 | if (Reg == AArch64::LR) |
7586 | return Error(L: Start, Msg: "lr cannot be paired with another register" ); |
7587 | if (Writeback) |
7588 | getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(Reg: EncodedReg, Offset); |
7589 | else |
7590 | getTargetStreamer().emitARM64WinCFISaveAnyRegIP(Reg: EncodedReg, Offset); |
7591 | } else { |
7592 | if (Writeback) |
7593 | getTargetStreamer().emitARM64WinCFISaveAnyRegIX(Reg: EncodedReg, Offset); |
7594 | else |
7595 | getTargetStreamer().emitARM64WinCFISaveAnyRegI(Reg: EncodedReg, Offset); |
7596 | } |
7597 | } else if (Reg >= AArch64::D0 && Reg <= AArch64::D31) { |
7598 | unsigned EncodedReg = Reg - AArch64::D0; |
7599 | if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8)) |
7600 | return Error(L, Msg: "invalid save_any_reg offset" ); |
7601 | if (Paired) { |
7602 | if (Reg == AArch64::D31) |
7603 | return Error(L: Start, Msg: "d31 cannot be paired with another register" ); |
7604 | if (Writeback) |
7605 | getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(Reg: EncodedReg, Offset); |
7606 | else |
7607 | getTargetStreamer().emitARM64WinCFISaveAnyRegDP(Reg: EncodedReg, Offset); |
7608 | } else { |
7609 | if (Writeback) |
7610 | getTargetStreamer().emitARM64WinCFISaveAnyRegDX(Reg: EncodedReg, Offset); |
7611 | else |
7612 | getTargetStreamer().emitARM64WinCFISaveAnyRegD(Reg: EncodedReg, Offset); |
7613 | } |
7614 | } else if (Reg >= AArch64::Q0 && Reg <= AArch64::Q31) { |
7615 | unsigned EncodedReg = Reg - AArch64::Q0; |
7616 | if (Offset < 0 || Offset % 16) |
7617 | return Error(L, Msg: "invalid save_any_reg offset" ); |
7618 | if (Paired) { |
7619 | if (Reg == AArch64::Q31) |
7620 | return Error(L: Start, Msg: "q31 cannot be paired with another register" ); |
7621 | if (Writeback) |
7622 | getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(Reg: EncodedReg, Offset); |
7623 | else |
7624 | getTargetStreamer().emitARM64WinCFISaveAnyRegQP(Reg: EncodedReg, Offset); |
7625 | } else { |
7626 | if (Writeback) |
7627 | getTargetStreamer().emitARM64WinCFISaveAnyRegQX(Reg: EncodedReg, Offset); |
7628 | else |
7629 | getTargetStreamer().emitARM64WinCFISaveAnyRegQ(Reg: EncodedReg, Offset); |
7630 | } |
7631 | } else { |
7632 | return Error(L: Start, Msg: "save_any_reg register must be x, q or d register" ); |
7633 | } |
7634 | return false; |
7635 | } |
7636 | |
7637 | bool AArch64AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) { |
7638 | // Try @AUTH expressions: they're more complex than the usual symbol variants. |
7639 | if (!parseAuthExpr(Res, EndLoc)) |
7640 | return false; |
7641 | return getParser().parsePrimaryExpr(Res, EndLoc, TypeInfo: nullptr); |
7642 | } |
7643 | |
7644 | /// parseAuthExpr |
7645 | /// ::= _sym@AUTH(ib,123[,addr]) |
7646 | /// ::= (_sym + 5)@AUTH(ib,123[,addr]) |
7647 | /// ::= (_sym - 5)@AUTH(ib,123[,addr]) |
7648 | bool AArch64AsmParser::parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc) { |
7649 | MCAsmParser &Parser = getParser(); |
7650 | MCContext &Ctx = getContext(); |
7651 | |
7652 | AsmToken Tok = Parser.getTok(); |
7653 | |
7654 | // Look for '_sym@AUTH' ... |
7655 | if (Tok.is(K: AsmToken::Identifier) && Tok.getIdentifier().ends_with(Suffix: "@AUTH" )) { |
7656 | StringRef SymName = Tok.getIdentifier().drop_back(N: strlen(s: "@AUTH" )); |
7657 | if (SymName.contains(C: '@')) |
7658 | return TokError( |
7659 | Msg: "combination of @AUTH with other modifiers not supported" ); |
7660 | Res = MCSymbolRefExpr::create(Symbol: Ctx.getOrCreateSymbol(Name: SymName), Ctx); |
7661 | |
7662 | Parser.Lex(); // Eat the identifier. |
7663 | } else { |
7664 | // ... or look for a more complex symbol reference, such as ... |
7665 | SmallVector<AsmToken, 6> Tokens; |
7666 | |
7667 | // ... '"_long sym"@AUTH' ... |
7668 | if (Tok.is(K: AsmToken::String)) |
7669 | Tokens.resize(N: 2); |
7670 | // ... or '(_sym + 5)@AUTH'. |
7671 | else if (Tok.is(K: AsmToken::LParen)) |
7672 | Tokens.resize(N: 6); |
7673 | else |
7674 | return true; |
7675 | |
7676 | if (Parser.getLexer().peekTokens(Buf: Tokens) != Tokens.size()) |
7677 | return true; |
7678 | |
7679 | // In either case, the expression ends with '@' 'AUTH'. |
7680 | if (Tokens[Tokens.size() - 2].isNot(K: AsmToken::At) || |
7681 | Tokens[Tokens.size() - 1].isNot(K: AsmToken::Identifier) || |
7682 | Tokens[Tokens.size() - 1].getIdentifier() != "AUTH" ) |
7683 | return true; |
7684 | |
7685 | if (Tok.is(K: AsmToken::String)) { |
7686 | StringRef SymName; |
7687 | if (Parser.parseIdentifier(Res&: SymName)) |
7688 | return true; |
7689 | Res = MCSymbolRefExpr::create(Symbol: Ctx.getOrCreateSymbol(Name: SymName), Ctx); |
7690 | } else { |
7691 | if (Parser.parsePrimaryExpr(Res, EndLoc, TypeInfo: nullptr)) |
7692 | return true; |
7693 | } |
7694 | |
7695 | Parser.Lex(); // '@' |
7696 | Parser.Lex(); // 'AUTH' |
7697 | } |
7698 | |
7699 | // At this point, we encountered "<id>@AUTH". There is no fallback anymore. |
7700 | if (parseToken(T: AsmToken::LParen, Msg: "expected '('" )) |
7701 | return true; |
7702 | |
7703 | if (Parser.getTok().isNot(K: AsmToken::Identifier)) |
7704 | return TokError(Msg: "expected key name" ); |
7705 | |
7706 | StringRef KeyStr = Parser.getTok().getIdentifier(); |
7707 | auto KeyIDOrNone = AArch64StringToPACKeyID(Name: KeyStr); |
7708 | if (!KeyIDOrNone) |
7709 | return TokError(Msg: "invalid key '" + KeyStr + "'" ); |
7710 | Parser.Lex(); |
7711 | |
7712 | if (parseToken(T: AsmToken::Comma, Msg: "expected ','" )) |
7713 | return true; |
7714 | |
7715 | if (Parser.getTok().isNot(K: AsmToken::Integer)) |
7716 | return TokError(Msg: "expected integer discriminator" ); |
7717 | int64_t Discriminator = Parser.getTok().getIntVal(); |
7718 | |
7719 | if (!isUInt<16>(x: Discriminator)) |
7720 | return TokError(Msg: "integer discriminator " + Twine(Discriminator) + |
7721 | " out of range [0, 0xFFFF]" ); |
7722 | Parser.Lex(); |
7723 | |
7724 | bool UseAddressDiversity = false; |
7725 | if (Parser.getTok().is(K: AsmToken::Comma)) { |
7726 | Parser.Lex(); |
7727 | if (Parser.getTok().isNot(K: AsmToken::Identifier) || |
7728 | Parser.getTok().getIdentifier() != "addr" ) |
7729 | return TokError(Msg: "expected 'addr'" ); |
7730 | UseAddressDiversity = true; |
7731 | Parser.Lex(); |
7732 | } |
7733 | |
7734 | EndLoc = Parser.getTok().getEndLoc(); |
7735 | if (parseToken(T: AsmToken::RParen, Msg: "expected ')'" )) |
7736 | return true; |
7737 | |
7738 | Res = AArch64AuthMCExpr::create(Expr: Res, Discriminator, Key: *KeyIDOrNone, |
7739 | HasAddressDiversity: UseAddressDiversity, Ctx); |
7740 | return false; |
7741 | } |
7742 | |
7743 | bool |
7744 | AArch64AsmParser::classifySymbolRef(const MCExpr *Expr, |
7745 | AArch64MCExpr::VariantKind &ELFRefKind, |
7746 | MCSymbolRefExpr::VariantKind &DarwinRefKind, |
7747 | int64_t &Addend) { |
7748 | ELFRefKind = AArch64MCExpr::VK_INVALID; |
7749 | DarwinRefKind = MCSymbolRefExpr::VK_None; |
7750 | Addend = 0; |
7751 | |
7752 | if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Val: Expr)) { |
7753 | ELFRefKind = AE->getKind(); |
7754 | Expr = AE->getSubExpr(); |
7755 | } |
7756 | |
7757 | const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Val: Expr); |
7758 | if (SE) { |
7759 | // It's a simple symbol reference with no addend. |
7760 | DarwinRefKind = SE->getKind(); |
7761 | return true; |
7762 | } |
7763 | |
7764 | // Check that it looks like a symbol + an addend |
7765 | MCValue Res; |
7766 | bool Relocatable = Expr->evaluateAsRelocatable(Res, Asm: nullptr, Fixup: nullptr); |
7767 | if (!Relocatable || Res.getSymB()) |
7768 | return false; |
7769 | |
7770 | // Treat expressions with an ELFRefKind (like ":abs_g1:3", or |
7771 | // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol. |
7772 | if (!Res.getSymA() && ELFRefKind == AArch64MCExpr::VK_INVALID) |
7773 | return false; |
7774 | |
7775 | if (Res.getSymA()) |
7776 | DarwinRefKind = Res.getSymA()->getKind(); |
7777 | Addend = Res.getConstant(); |
7778 | |
7779 | // It's some symbol reference + a constant addend, but really |
7780 | // shouldn't use both Darwin and ELF syntax. |
7781 | return ELFRefKind == AArch64MCExpr::VK_INVALID || |
7782 | DarwinRefKind == MCSymbolRefExpr::VK_None; |
7783 | } |
7784 | |
7785 | /// Force static initialization. |
7786 | extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser() { |
7787 | RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget()); |
7788 | RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget()); |
7789 | RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target()); |
7790 | RegisterMCAsmParser<AArch64AsmParser> W(getTheARM64_32Target()); |
7791 | RegisterMCAsmParser<AArch64AsmParser> V(getTheAArch64_32Target()); |
7792 | } |
7793 | |
7794 | #define GET_REGISTER_MATCHER |
7795 | #define GET_SUBTARGET_FEATURE_NAME |
7796 | #define GET_MATCHER_IMPLEMENTATION |
7797 | #define GET_MNEMONIC_SPELL_CHECKER |
7798 | #include "AArch64GenAsmMatcher.inc" |
7799 | |
7800 | // Define this matcher function after the auto-generated include so we |
7801 | // have the match class enum definitions. |
7802 | unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp, |
7803 | unsigned Kind) { |
7804 | AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp); |
7805 | |
7806 | auto MatchesOpImmediate = [&](int64_t ExpectedVal) -> MatchResultTy { |
7807 | if (!Op.isImm()) |
7808 | return Match_InvalidOperand; |
7809 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Op.getImm()); |
7810 | if (!CE) |
7811 | return Match_InvalidOperand; |
7812 | if (CE->getValue() == ExpectedVal) |
7813 | return Match_Success; |
7814 | return Match_InvalidOperand; |
7815 | }; |
7816 | |
7817 | switch (Kind) { |
7818 | default: |
7819 | return Match_InvalidOperand; |
7820 | case MCK_MPR: |
7821 | // If the Kind is a token for the MPR register class which has the "za" |
7822 | // register (SME accumulator array), check if the asm is a literal "za" |
7823 | // token. This is for the "smstart za" alias that defines the register |
7824 | // as a literal token. |
7825 | if (Op.isTokenEqual(Str: "za" )) |
7826 | return Match_Success; |
7827 | return Match_InvalidOperand; |
7828 | |
7829 | // If the kind is a token for a literal immediate, check if our asm operand |
7830 | // matches. This is for InstAliases which have a fixed-value immediate in |
7831 | // the asm string, such as hints which are parsed into a specific |
7832 | // instruction definition. |
7833 | #define MATCH_HASH(N) \ |
7834 | case MCK__HASH_##N: \ |
7835 | return MatchesOpImmediate(N); |
7836 | MATCH_HASH(0) |
7837 | MATCH_HASH(1) |
7838 | MATCH_HASH(2) |
7839 | MATCH_HASH(3) |
7840 | MATCH_HASH(4) |
7841 | MATCH_HASH(6) |
7842 | MATCH_HASH(7) |
7843 | MATCH_HASH(8) |
7844 | MATCH_HASH(10) |
7845 | MATCH_HASH(12) |
7846 | MATCH_HASH(14) |
7847 | MATCH_HASH(16) |
7848 | MATCH_HASH(24) |
7849 | MATCH_HASH(25) |
7850 | MATCH_HASH(26) |
7851 | MATCH_HASH(27) |
7852 | MATCH_HASH(28) |
7853 | MATCH_HASH(29) |
7854 | MATCH_HASH(30) |
7855 | MATCH_HASH(31) |
7856 | MATCH_HASH(32) |
7857 | MATCH_HASH(40) |
7858 | MATCH_HASH(48) |
7859 | MATCH_HASH(64) |
7860 | #undef MATCH_HASH |
7861 | #define MATCH_HASH_MINUS(N) \ |
7862 | case MCK__HASH__MINUS_##N: \ |
7863 | return MatchesOpImmediate(-N); |
7864 | MATCH_HASH_MINUS(4) |
7865 | MATCH_HASH_MINUS(8) |
7866 | MATCH_HASH_MINUS(16) |
7867 | #undef MATCH_HASH_MINUS |
7868 | } |
7869 | } |
7870 | |
7871 | ParseStatus AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) { |
7872 | |
7873 | SMLoc S = getLoc(); |
7874 | |
7875 | if (getTok().isNot(K: AsmToken::Identifier)) |
7876 | return Error(L: S, Msg: "expected register" ); |
7877 | |
7878 | MCRegister FirstReg; |
7879 | ParseStatus Res = tryParseScalarRegister(RegNum&: FirstReg); |
7880 | if (!Res.isSuccess()) |
7881 | return Error(L: S, Msg: "expected first even register of a consecutive same-size " |
7882 | "even/odd register pair" ); |
7883 | |
7884 | const MCRegisterClass &WRegClass = |
7885 | AArch64MCRegisterClasses[AArch64::GPR32RegClassID]; |
7886 | const MCRegisterClass &XRegClass = |
7887 | AArch64MCRegisterClasses[AArch64::GPR64RegClassID]; |
7888 | |
7889 | bool isXReg = XRegClass.contains(Reg: FirstReg), |
7890 | isWReg = WRegClass.contains(Reg: FirstReg); |
7891 | if (!isXReg && !isWReg) |
7892 | return Error(L: S, Msg: "expected first even register of a consecutive same-size " |
7893 | "even/odd register pair" ); |
7894 | |
7895 | const MCRegisterInfo *RI = getContext().getRegisterInfo(); |
7896 | unsigned FirstEncoding = RI->getEncodingValue(RegNo: FirstReg); |
7897 | |
7898 | if (FirstEncoding & 0x1) |
7899 | return Error(L: S, Msg: "expected first even register of a consecutive same-size " |
7900 | "even/odd register pair" ); |
7901 | |
7902 | if (getTok().isNot(K: AsmToken::Comma)) |
7903 | return Error(L: getLoc(), Msg: "expected comma" ); |
7904 | // Eat the comma |
7905 | Lex(); |
7906 | |
7907 | SMLoc E = getLoc(); |
7908 | MCRegister SecondReg; |
7909 | Res = tryParseScalarRegister(RegNum&: SecondReg); |
7910 | if (!Res.isSuccess()) |
7911 | return Error(L: E, Msg: "expected second odd register of a consecutive same-size " |
7912 | "even/odd register pair" ); |
7913 | |
7914 | if (RI->getEncodingValue(RegNo: SecondReg) != FirstEncoding + 1 || |
7915 | (isXReg && !XRegClass.contains(Reg: SecondReg)) || |
7916 | (isWReg && !WRegClass.contains(Reg: SecondReg))) |
7917 | return Error(L: E, Msg: "expected second odd register of a consecutive same-size " |
7918 | "even/odd register pair" ); |
7919 | |
7920 | unsigned Pair = 0; |
7921 | if (isXReg) { |
7922 | Pair = RI->getMatchingSuperReg(Reg: FirstReg, SubIdx: AArch64::sube64, |
7923 | RC: &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]); |
7924 | } else { |
7925 | Pair = RI->getMatchingSuperReg(Reg: FirstReg, SubIdx: AArch64::sube32, |
7926 | RC: &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]); |
7927 | } |
7928 | |
7929 | Operands.push_back(Elt: AArch64Operand::CreateReg(RegNum: Pair, Kind: RegKind::Scalar, S, |
7930 | E: getLoc(), Ctx&: getContext())); |
7931 | |
7932 | return ParseStatus::Success; |
7933 | } |
7934 | |
7935 | template <bool ParseShiftExtend, bool ParseSuffix> |
7936 | ParseStatus AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) { |
7937 | const SMLoc S = getLoc(); |
7938 | // Check for a SVE vector register specifier first. |
7939 | MCRegister RegNum; |
7940 | StringRef Kind; |
7941 | |
7942 | ParseStatus Res = |
7943 | tryParseVectorRegister(Reg&: RegNum, Kind, MatchKind: RegKind::SVEDataVector); |
7944 | |
7945 | if (!Res.isSuccess()) |
7946 | return Res; |
7947 | |
7948 | if (ParseSuffix && Kind.empty()) |
7949 | return ParseStatus::NoMatch; |
7950 | |
7951 | const auto &KindRes = parseVectorKind(Suffix: Kind, VectorKind: RegKind::SVEDataVector); |
7952 | if (!KindRes) |
7953 | return ParseStatus::NoMatch; |
7954 | |
7955 | unsigned ElementWidth = KindRes->second; |
7956 | |
7957 | // No shift/extend is the default. |
7958 | if (!ParseShiftExtend || getTok().isNot(K: AsmToken::Comma)) { |
7959 | Operands.push_back(Elt: AArch64Operand::CreateVectorReg( |
7960 | RegNum, Kind: RegKind::SVEDataVector, ElementWidth, S, E: S, Ctx&: getContext())); |
7961 | |
7962 | ParseStatus Res = tryParseVectorIndex(Operands); |
7963 | if (Res.isFailure()) |
7964 | return ParseStatus::Failure; |
7965 | return ParseStatus::Success; |
7966 | } |
7967 | |
7968 | // Eat the comma |
7969 | Lex(); |
7970 | |
7971 | // Match the shift |
7972 | SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd; |
7973 | Res = tryParseOptionalShiftExtend(Operands&: ExtOpnd); |
7974 | if (!Res.isSuccess()) |
7975 | return Res; |
7976 | |
7977 | auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get()); |
7978 | Operands.push_back(Elt: AArch64Operand::CreateVectorReg( |
7979 | RegNum, Kind: RegKind::SVEDataVector, ElementWidth, S, E: Ext->getEndLoc(), |
7980 | Ctx&: getContext(), ExtTy: Ext->getShiftExtendType(), ShiftAmount: Ext->getShiftExtendAmount(), |
7981 | HasExplicitAmount: Ext->hasShiftExtendAmount())); |
7982 | |
7983 | return ParseStatus::Success; |
7984 | } |
7985 | |
7986 | ParseStatus AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) { |
7987 | MCAsmParser &Parser = getParser(); |
7988 | |
7989 | SMLoc SS = getLoc(); |
7990 | const AsmToken &TokE = getTok(); |
7991 | bool IsHash = TokE.is(K: AsmToken::Hash); |
7992 | |
7993 | if (!IsHash && TokE.isNot(K: AsmToken::Identifier)) |
7994 | return ParseStatus::NoMatch; |
7995 | |
7996 | int64_t Pattern; |
7997 | if (IsHash) { |
7998 | Lex(); // Eat hash |
7999 | |
8000 | // Parse the immediate operand. |
8001 | const MCExpr *ImmVal; |
8002 | SS = getLoc(); |
8003 | if (Parser.parseExpression(Res&: ImmVal)) |
8004 | return ParseStatus::Failure; |
8005 | |
8006 | auto *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal); |
8007 | if (!MCE) |
8008 | return TokError(Msg: "invalid operand for instruction" ); |
8009 | |
8010 | Pattern = MCE->getValue(); |
8011 | } else { |
8012 | // Parse the pattern |
8013 | auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(Name: TokE.getString()); |
8014 | if (!Pat) |
8015 | return ParseStatus::NoMatch; |
8016 | |
8017 | Lex(); |
8018 | Pattern = Pat->Encoding; |
8019 | assert(Pattern >= 0 && Pattern < 32); |
8020 | } |
8021 | |
8022 | Operands.push_back( |
8023 | Elt: AArch64Operand::CreateImm(Val: MCConstantExpr::create(Value: Pattern, Ctx&: getContext()), |
8024 | S: SS, E: getLoc(), Ctx&: getContext())); |
8025 | |
8026 | return ParseStatus::Success; |
8027 | } |
8028 | |
8029 | ParseStatus |
8030 | AArch64AsmParser::tryParseSVEVecLenSpecifier(OperandVector &Operands) { |
8031 | int64_t Pattern; |
8032 | SMLoc SS = getLoc(); |
8033 | const AsmToken &TokE = getTok(); |
8034 | // Parse the pattern |
8035 | auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName( |
8036 | Name: TokE.getString()); |
8037 | if (!Pat) |
8038 | return ParseStatus::NoMatch; |
8039 | |
8040 | Lex(); |
8041 | Pattern = Pat->Encoding; |
8042 | assert(Pattern >= 0 && Pattern <= 1 && "Pattern does not exist" ); |
8043 | |
8044 | Operands.push_back( |
8045 | Elt: AArch64Operand::CreateImm(Val: MCConstantExpr::create(Value: Pattern, Ctx&: getContext()), |
8046 | S: SS, E: getLoc(), Ctx&: getContext())); |
8047 | |
8048 | return ParseStatus::Success; |
8049 | } |
8050 | |
8051 | ParseStatus AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) { |
8052 | SMLoc SS = getLoc(); |
8053 | |
8054 | MCRegister XReg; |
8055 | if (!tryParseScalarRegister(RegNum&: XReg).isSuccess()) |
8056 | return ParseStatus::NoMatch; |
8057 | |
8058 | MCContext &ctx = getContext(); |
8059 | const MCRegisterInfo *RI = ctx.getRegisterInfo(); |
8060 | int X8Reg = RI->getMatchingSuperReg( |
8061 | Reg: XReg, SubIdx: AArch64::x8sub_0, |
8062 | RC: &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]); |
8063 | if (!X8Reg) |
8064 | return Error(L: SS, |
8065 | Msg: "expected an even-numbered x-register in the range [x0,x22]" ); |
8066 | |
8067 | Operands.push_back( |
8068 | Elt: AArch64Operand::CreateReg(RegNum: X8Reg, Kind: RegKind::Scalar, S: SS, E: getLoc(), Ctx&: ctx)); |
8069 | return ParseStatus::Success; |
8070 | } |
8071 | |
8072 | ParseStatus AArch64AsmParser::tryParseImmRange(OperandVector &Operands) { |
8073 | SMLoc S = getLoc(); |
8074 | |
8075 | if (getTok().isNot(K: AsmToken::Integer)) |
8076 | return ParseStatus::NoMatch; |
8077 | |
8078 | if (getLexer().peekTok().isNot(K: AsmToken::Colon)) |
8079 | return ParseStatus::NoMatch; |
8080 | |
8081 | const MCExpr *ImmF; |
8082 | if (getParser().parseExpression(Res&: ImmF)) |
8083 | return ParseStatus::NoMatch; |
8084 | |
8085 | if (getTok().isNot(K: AsmToken::Colon)) |
8086 | return ParseStatus::NoMatch; |
8087 | |
8088 | Lex(); // Eat ':' |
8089 | if (getTok().isNot(K: AsmToken::Integer)) |
8090 | return ParseStatus::NoMatch; |
8091 | |
8092 | SMLoc E = getTok().getLoc(); |
8093 | const MCExpr *ImmL; |
8094 | if (getParser().parseExpression(Res&: ImmL)) |
8095 | return ParseStatus::NoMatch; |
8096 | |
8097 | unsigned ImmFVal = cast<MCConstantExpr>(Val: ImmF)->getValue(); |
8098 | unsigned ImmLVal = cast<MCConstantExpr>(Val: ImmL)->getValue(); |
8099 | |
8100 | Operands.push_back( |
8101 | Elt: AArch64Operand::CreateImmRange(First: ImmFVal, Last: ImmLVal, S, E, Ctx&: getContext())); |
8102 | return ParseStatus::Success; |
8103 | } |
8104 | |