1//===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "ARMBaseInstrInfo.h"
10#include "ARMFeatures.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMBaseInfo.h"
13#include "MCTargetDesc/ARMInstPrinter.h"
14#include "MCTargetDesc/ARMMCAsmInfo.h"
15#include "MCTargetDesc/ARMMCTargetDesc.h"
16#include "TargetInfo/ARMTargetInfo.h"
17#include "Utils/ARMBaseInfo.h"
18#include "llvm/ADT/APFloat.h"
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallBitVector.h"
22#include "llvm/ADT/SmallSet.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
26#include "llvm/ADT/StringSet.h"
27#include "llvm/ADT/StringSwitch.h"
28#include "llvm/ADT/Twine.h"
29#include "llvm/MC/MCContext.h"
30#include "llvm/MC/MCExpr.h"
31#include "llvm/MC/MCInst.h"
32#include "llvm/MC/MCInstrDesc.h"
33#include "llvm/MC/MCInstrInfo.h"
34#include "llvm/MC/MCParser/AsmLexer.h"
35#include "llvm/MC/MCParser/MCAsmParser.h"
36#include "llvm/MC/MCParser/MCAsmParserExtension.h"
37#include "llvm/MC/MCParser/MCAsmParserUtils.h"
38#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
39#include "llvm/MC/MCParser/MCTargetAsmParser.h"
40#include "llvm/MC/MCRegisterInfo.h"
41#include "llvm/MC/MCSection.h"
42#include "llvm/MC/MCStreamer.h"
43#include "llvm/MC/MCSubtargetInfo.h"
44#include "llvm/MC/MCSymbol.h"
45#include "llvm/MC/TargetRegistry.h"
46#include "llvm/Support/ARMBuildAttributes.h"
47#include "llvm/Support/ARMEHABI.h"
48#include "llvm/Support/Casting.h"
49#include "llvm/Support/CommandLine.h"
50#include "llvm/Support/Compiler.h"
51#include "llvm/Support/Debug.h"
52#include "llvm/Support/ErrorHandling.h"
53#include "llvm/Support/MathExtras.h"
54#include "llvm/Support/SMLoc.h"
55#include "llvm/Support/raw_ostream.h"
56#include "llvm/TargetParser/SubtargetFeature.h"
57#include "llvm/TargetParser/TargetParser.h"
58#include <algorithm>
59#include <cassert>
60#include <cstddef>
61#include <cstdint>
62#include <iterator>
63#include <limits>
64#include <memory>
65#include <optional>
66#include <string>
67#include <utility>
68#include <vector>
69
70#define DEBUG_TYPE "asm-parser"
71
72using namespace llvm;
73
74namespace {
75class ARMOperand;
76
77enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly };
78
79static cl::opt<ImplicitItModeTy> ImplicitItMode(
80 "arm-implicit-it", cl::init(Val: ImplicitItModeTy::ARMOnly),
81 cl::desc("Allow conditional instructions outside of an IT block"),
82 cl::values(clEnumValN(ImplicitItModeTy::Always, "always",
83 "Accept in both ISAs, emit implicit ITs in Thumb"),
84 clEnumValN(ImplicitItModeTy::Never, "never",
85 "Warn in ARM, reject in Thumb"),
86 clEnumValN(ImplicitItModeTy::ARMOnly, "arm",
87 "Accept in ARM, reject in Thumb"),
88 clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb",
89 "Warn in ARM, emit implicit ITs in Thumb")));
90
91static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes",
92 cl::init(Val: false));
93
94enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
95
96static inline unsigned extractITMaskBit(unsigned Mask, unsigned Position) {
97 // Position==0 means we're not in an IT block at all. Position==1
98 // means we want the first state bit, which is always 0 (Then).
99 // Position==2 means we want the second state bit, stored at bit 3
100 // of Mask, and so on downwards. So (5 - Position) will shift the
101 // right bit down to bit 0, including the always-0 bit at bit 4 for
102 // the mandatory initial Then.
103 return (Mask >> (5 - Position) & 1);
104}
105
106class UnwindContext {
107 using Locs = SmallVector<SMLoc, 4>;
108
109 MCAsmParser &Parser;
110 Locs FnStartLocs;
111 Locs CantUnwindLocs;
112 Locs PersonalityLocs;
113 Locs PersonalityIndexLocs;
114 Locs HandlerDataLocs;
115 MCRegister FPReg;
116
117public:
118 UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
119
120 bool hasFnStart() const { return !FnStartLocs.empty(); }
121 bool cantUnwind() const { return !CantUnwindLocs.empty(); }
122 bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
123
124 bool hasPersonality() const {
125 return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
126 }
127
128 void recordFnStart(SMLoc L) { FnStartLocs.push_back(Elt: L); }
129 void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(Elt: L); }
130 void recordPersonality(SMLoc L) { PersonalityLocs.push_back(Elt: L); }
131 void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(Elt: L); }
132 void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(Elt: L); }
133
134 void saveFPReg(MCRegister Reg) { FPReg = Reg; }
135 MCRegister getFPReg() const { return FPReg; }
136
137 void emitFnStartLocNotes() const {
138 for (const SMLoc &Loc : FnStartLocs)
139 Parser.Note(L: Loc, Msg: ".fnstart was specified here");
140 }
141
142 void emitCantUnwindLocNotes() const {
143 for (const SMLoc &Loc : CantUnwindLocs)
144 Parser.Note(L: Loc, Msg: ".cantunwind was specified here");
145 }
146
147 void emitHandlerDataLocNotes() const {
148 for (const SMLoc &Loc : HandlerDataLocs)
149 Parser.Note(L: Loc, Msg: ".handlerdata was specified here");
150 }
151
152 void emitPersonalityLocNotes() const {
153 for (Locs::const_iterator PI = PersonalityLocs.begin(),
154 PE = PersonalityLocs.end(),
155 PII = PersonalityIndexLocs.begin(),
156 PIE = PersonalityIndexLocs.end();
157 PI != PE || PII != PIE;) {
158 if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
159 Parser.Note(L: *PI++, Msg: ".personality was specified here");
160 else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
161 Parser.Note(L: *PII++, Msg: ".personalityindex was specified here");
162 else
163 llvm_unreachable(".personality and .personalityindex cannot be "
164 "at the same location");
165 }
166 }
167
168 void reset() {
169 FnStartLocs = Locs();
170 CantUnwindLocs = Locs();
171 PersonalityLocs = Locs();
172 HandlerDataLocs = Locs();
173 PersonalityIndexLocs = Locs();
174 FPReg = ARM::SP;
175 }
176};
177
178// Various sets of ARM instruction mnemonics which are used by the asm parser
179class ARMMnemonicSets {
180 StringSet<> CDE;
181 StringSet<> CDEWithVPTSuffix;
182public:
183 ARMMnemonicSets(const MCSubtargetInfo &STI);
184
185 /// Returns true iff a given mnemonic is a CDE instruction
186 bool isCDEInstr(StringRef Mnemonic) {
187 // Quick check before searching the set
188 if (!Mnemonic.starts_with(Prefix: "cx") && !Mnemonic.starts_with(Prefix: "vcx"))
189 return false;
190 return CDE.count(Key: Mnemonic);
191 }
192
193 /// Returns true iff a given mnemonic is a VPT-predicable CDE instruction
194 /// (possibly with a predication suffix "e" or "t")
195 bool isVPTPredicableCDEInstr(StringRef Mnemonic) {
196 if (!Mnemonic.starts_with(Prefix: "vcx"))
197 return false;
198 return CDEWithVPTSuffix.count(Key: Mnemonic);
199 }
200
201 /// Returns true iff a given mnemonic is an IT-predicable CDE instruction
202 /// (possibly with a condition suffix)
203 bool isITPredicableCDEInstr(StringRef Mnemonic) {
204 if (!Mnemonic.starts_with(Prefix: "cx"))
205 return false;
206 return Mnemonic.starts_with(Prefix: "cx1a") || Mnemonic.starts_with(Prefix: "cx1da") ||
207 Mnemonic.starts_with(Prefix: "cx2a") || Mnemonic.starts_with(Prefix: "cx2da") ||
208 Mnemonic.starts_with(Prefix: "cx3a") || Mnemonic.starts_with(Prefix: "cx3da");
209 }
210
211 /// Return true iff a given mnemonic is an integer CDE instruction with
212 /// dual-register destination
213 bool isCDEDualRegInstr(StringRef Mnemonic) {
214 if (!Mnemonic.starts_with(Prefix: "cx"))
215 return false;
216 return Mnemonic == "cx1d" || Mnemonic == "cx1da" ||
217 Mnemonic == "cx2d" || Mnemonic == "cx2da" ||
218 Mnemonic == "cx3d" || Mnemonic == "cx3da";
219 }
220};
221
222ARMMnemonicSets::ARMMnemonicSets(const MCSubtargetInfo &STI) {
223 for (StringRef Mnemonic: { "cx1", "cx1a", "cx1d", "cx1da",
224 "cx2", "cx2a", "cx2d", "cx2da",
225 "cx3", "cx3a", "cx3d", "cx3da", })
226 CDE.insert(key: Mnemonic);
227 for (StringRef Mnemonic :
228 {"vcx1", "vcx1a", "vcx2", "vcx2a", "vcx3", "vcx3a"}) {
229 CDE.insert(key: Mnemonic);
230 CDEWithVPTSuffix.insert(key: Mnemonic);
231 CDEWithVPTSuffix.insert(key: std::string(Mnemonic) + "t");
232 CDEWithVPTSuffix.insert(key: std::string(Mnemonic) + "e");
233 }
234}
235
236class ARMAsmParser : public MCTargetAsmParser {
237 const MCRegisterInfo *MRI;
238 UnwindContext UC;
239 ARMMnemonicSets MS;
240
241 ARMTargetStreamer &getTargetStreamer() {
242 assert(getParser().getStreamer().getTargetStreamer() &&
243 "do not have a target streamer");
244 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
245 return static_cast<ARMTargetStreamer &>(TS);
246 }
247
248 // Map of register aliases registers via the .req directive.
249 StringMap<MCRegister> RegisterReqs;
250
251 bool NextSymbolIsThumb;
252
253 bool useImplicitITThumb() const {
254 return ImplicitItMode == ImplicitItModeTy::Always ||
255 ImplicitItMode == ImplicitItModeTy::ThumbOnly;
256 }
257
258 bool useImplicitITARM() const {
259 return ImplicitItMode == ImplicitItModeTy::Always ||
260 ImplicitItMode == ImplicitItModeTy::ARMOnly;
261 }
262
263 struct {
264 ARMCC::CondCodes Cond; // Condition for IT block.
265 unsigned Mask:4; // Condition mask for instructions.
266 // Starting at first 1 (from lsb).
267 // '1' condition as indicated in IT.
268 // '0' inverse of condition (else).
269 // Count of instructions in IT block is
270 // 4 - trailingzeroes(mask)
271 // Note that this does not have the same encoding
272 // as in the IT instruction, which also depends
273 // on the low bit of the condition code.
274
275 unsigned CurPosition; // Current position in parsing of IT
276 // block. In range [0,4], with 0 being the IT
277 // instruction itself. Initialized according to
278 // count of instructions in block. ~0U if no
279 // active IT block.
280
281 bool IsExplicit; // true - The IT instruction was present in the
282 // input, we should not modify it.
283 // false - The IT instruction was added
284 // implicitly, we can extend it if that
285 // would be legal.
286 } ITState;
287
288 SmallVector<MCInst, 4> PendingConditionalInsts;
289
290 void onEndOfFile() override {
291 flushPendingInstructions(Out&: getParser().getStreamer());
292 }
293
294 void flushPendingInstructions(MCStreamer &Out) override {
295 if (!inImplicitITBlock()) {
296 assert(PendingConditionalInsts.size() == 0);
297 return;
298 }
299
300 // Emit the IT instruction
301 MCInst ITInst;
302 ITInst.setOpcode(ARM::t2IT);
303 ITInst.addOperand(Op: MCOperand::createImm(Val: ITState.Cond));
304 ITInst.addOperand(Op: MCOperand::createImm(Val: ITState.Mask));
305 Out.emitInstruction(Inst: ITInst, STI: getSTI());
306
307 // Emit the conditional instructions
308 assert(PendingConditionalInsts.size() <= 4);
309 for (const MCInst &Inst : PendingConditionalInsts) {
310 Out.emitInstruction(Inst, STI: getSTI());
311 }
312 PendingConditionalInsts.clear();
313
314 // Clear the IT state
315 ITState.Mask = 0;
316 ITState.CurPosition = ~0U;
317 }
318
319 bool inITBlock() { return ITState.CurPosition != ~0U; }
320 bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; }
321 bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; }
322
323 bool lastInITBlock() {
324 return ITState.CurPosition == 4 - (unsigned)llvm::countr_zero(Val: ITState.Mask);
325 }
326
327 void forwardITPosition() {
328 if (!inITBlock()) return;
329 // Move to the next instruction in the IT block, if there is one. If not,
330 // mark the block as done, except for implicit IT blocks, which we leave
331 // open until we find an instruction that can't be added to it.
332 unsigned TZ = llvm::countr_zero(Val: ITState.Mask);
333 if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
334 ITState.CurPosition = ~0U; // Done with the IT block after this.
335 }
336
337 // Rewind the state of the current IT block, removing the last slot from it.
338 void rewindImplicitITPosition() {
339 assert(inImplicitITBlock());
340 assert(ITState.CurPosition > 1);
341 ITState.CurPosition--;
342 unsigned TZ = llvm::countr_zero(Val: ITState.Mask);
343 unsigned NewMask = 0;
344 NewMask |= ITState.Mask & (0xC << TZ);
345 NewMask |= 0x2 << TZ;
346 ITState.Mask = NewMask;
347 }
348
349 // Rewind the state of the current IT block, removing the last slot from it.
350 // If we were at the first slot, this closes the IT block.
351 void discardImplicitITBlock() {
352 assert(inImplicitITBlock());
353 assert(ITState.CurPosition == 1);
354 ITState.CurPosition = ~0U;
355 }
356
357 // Get the condition code corresponding to the current IT block slot.
358 ARMCC::CondCodes currentITCond() {
359 unsigned MaskBit = extractITMaskBit(Mask: ITState.Mask, Position: ITState.CurPosition);
360 return MaskBit ? ARMCC::getOppositeCondition(CC: ITState.Cond) : ITState.Cond;
361 }
362
363 // Invert the condition of the current IT block slot without changing any
364 // other slots in the same block.
365 void invertCurrentITCondition() {
366 if (ITState.CurPosition == 1) {
367 ITState.Cond = ARMCC::getOppositeCondition(CC: ITState.Cond);
368 } else {
369 ITState.Mask ^= 1 << (5 - ITState.CurPosition);
370 }
371 }
372
373 // Returns true if the current IT block is full (all 4 slots used).
374 bool isITBlockFull() {
375 return inITBlock() && (ITState.Mask & 1);
376 }
377
378 // Extend the current implicit IT block to have one more slot with the given
379 // condition code.
380 void extendImplicitITBlock(ARMCC::CondCodes Cond) {
381 assert(inImplicitITBlock());
382 assert(!isITBlockFull());
383 assert(Cond == ITState.Cond ||
384 Cond == ARMCC::getOppositeCondition(ITState.Cond));
385 unsigned TZ = llvm::countr_zero(Val: ITState.Mask);
386 unsigned NewMask = 0;
387 // Keep any existing condition bits.
388 NewMask |= ITState.Mask & (0xE << TZ);
389 // Insert the new condition bit.
390 NewMask |= (Cond != ITState.Cond) << TZ;
391 // Move the trailing 1 down one bit.
392 NewMask |= 1 << (TZ - 1);
393 ITState.Mask = NewMask;
394 }
395
396 // Create a new implicit IT block with a dummy condition code.
397 void startImplicitITBlock() {
398 assert(!inITBlock());
399 ITState.Cond = ARMCC::AL;
400 ITState.Mask = 8;
401 ITState.CurPosition = 1;
402 ITState.IsExplicit = false;
403 }
404
405 // Create a new explicit IT block with the given condition and mask.
406 // The mask should be in the format used in ARMOperand and
407 // MCOperand, with a 1 implying 'e', regardless of the low bit of
408 // the condition.
409 void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) {
410 assert(!inITBlock());
411 ITState.Cond = Cond;
412 ITState.Mask = Mask;
413 ITState.CurPosition = 0;
414 ITState.IsExplicit = true;
415 }
416
417 struct {
418 unsigned Mask : 4;
419 unsigned CurPosition;
420 } VPTState;
421 bool inVPTBlock() { return VPTState.CurPosition != ~0U; }
422 void forwardVPTPosition() {
423 if (!inVPTBlock()) return;
424 unsigned TZ = llvm::countr_zero(Val: VPTState.Mask);
425 if (++VPTState.CurPosition == 5 - TZ)
426 VPTState.CurPosition = ~0U;
427 }
428
429 void Note(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
430 return getParser().Note(L, Msg, Range);
431 }
432
433 bool Warning(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
434 return getParser().Warning(L, Msg, Range);
435 }
436
437 bool Error(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
438 return getParser().Error(L, Msg, Range);
439 }
440
441 bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
442 unsigned MnemonicOpsEndInd, unsigned ListIndex,
443 bool IsARPop = false);
444 bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
445 unsigned MnemonicOpsEndInd, unsigned ListIndex);
446
447 MCRegister tryParseRegister(bool AllowOutofBoundReg = false);
448 bool tryParseRegisterWithWriteBack(OperandVector &);
449 int tryParseShiftRegister(OperandVector &);
450 std::optional<ARM_AM::ShiftOpc> tryParseShiftToken();
451 bool parseRegisterList(OperandVector &, bool EnforceOrder = true,
452 bool AllowRAAC = false, bool IsLazyLoadStore = false,
453 bool IsVSCCLRM = false);
454 bool parseMemory(OperandVector &);
455 bool parseOperand(OperandVector &, StringRef Mnemonic);
456 bool parseImmExpr(int64_t &Out);
457 bool parsePrefix(ARM::Specifier &);
458 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
459 unsigned &ShiftAmount);
460 bool parseLiteralValues(unsigned Size, SMLoc L);
461 bool parseDirectiveThumb(SMLoc L);
462 bool parseDirectiveARM(SMLoc L);
463 bool parseDirectiveThumbFunc(SMLoc L);
464 bool parseDirectiveCode(SMLoc L);
465 bool parseDirectiveSyntax(SMLoc L);
466 bool parseDirectiveReq(StringRef Name, SMLoc L);
467 bool parseDirectiveUnreq(SMLoc L);
468 bool parseDirectiveArch(SMLoc L);
469 bool parseDirectiveEabiAttr(SMLoc L);
470 bool parseDirectiveCPU(SMLoc L);
471 bool parseDirectiveFPU(SMLoc L);
472 bool parseDirectiveFnStart(SMLoc L);
473 bool parseDirectiveFnEnd(SMLoc L);
474 bool parseDirectiveCantUnwind(SMLoc L);
475 bool parseDirectivePersonality(SMLoc L);
476 bool parseDirectiveHandlerData(SMLoc L);
477 bool parseDirectiveSetFP(SMLoc L);
478 bool parseDirectivePad(SMLoc L);
479 bool parseDirectiveRegSave(SMLoc L, bool IsVector);
480 bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
481 bool parseDirectiveLtorg(SMLoc L);
482 bool parseDirectiveEven(SMLoc L);
483 bool parseDirectivePersonalityIndex(SMLoc L);
484 bool parseDirectiveUnwindRaw(SMLoc L);
485 bool parseDirectiveTLSDescSeq(SMLoc L);
486 bool parseDirectiveMovSP(SMLoc L);
487 bool parseDirectiveObjectArch(SMLoc L);
488 bool parseDirectiveArchExtension(SMLoc L);
489 bool parseDirectiveAlign(SMLoc L);
490 bool parseDirectiveThumbSet(SMLoc L);
491
492 bool parseDirectiveSEHAllocStack(SMLoc L, bool Wide);
493 bool parseDirectiveSEHSaveRegs(SMLoc L, bool Wide);
494 bool parseDirectiveSEHSaveSP(SMLoc L);
495 bool parseDirectiveSEHSaveFRegs(SMLoc L);
496 bool parseDirectiveSEHSaveLR(SMLoc L);
497 bool parseDirectiveSEHPrologEnd(SMLoc L, bool Fragment);
498 bool parseDirectiveSEHNop(SMLoc L, bool Wide);
499 bool parseDirectiveSEHEpilogStart(SMLoc L, bool Condition);
500 bool parseDirectiveSEHEpilogEnd(SMLoc L);
501 bool parseDirectiveSEHCustom(SMLoc L);
502
503 std::unique_ptr<ARMOperand> defaultCondCodeOp();
504 std::unique_ptr<ARMOperand> defaultCCOutOp();
505 std::unique_ptr<ARMOperand> defaultVPTPredOp();
506
507 bool isMnemonicVPTPredicable(StringRef Mnemonic, StringRef ExtraToken);
508 StringRef splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
509 ARMCC::CondCodes &PredicationCode,
510 ARMVCC::VPTCodes &VPTPredicationCode,
511 bool &CarrySetting, unsigned &ProcessorIMod,
512 StringRef &ITMask);
513 void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef ExtraToken,
514 StringRef FullInst, bool &CanAcceptCarrySet,
515 bool &CanAcceptPredicationCode,
516 bool &CanAcceptVPTPredicationCode);
517 bool enableArchExtFeature(StringRef Name, SMLoc &ExtLoc);
518
519 void tryConvertingToTwoOperandForm(StringRef Mnemonic,
520 ARMCC::CondCodes PredicationCode,
521 bool CarrySetting, OperandVector &Operands,
522 unsigned MnemonicOpsEndInd);
523
524 bool CDEConvertDualRegOperand(StringRef Mnemonic, OperandVector &Operands,
525 unsigned MnemonicOpsEndInd);
526
527 bool isThumb() const {
528 // FIXME: Can tablegen auto-generate this?
529 return getSTI().hasFeature(Feature: ARM::ModeThumb);
530 }
531
532 bool isThumbOne() const {
533 return isThumb() && !getSTI().hasFeature(Feature: ARM::FeatureThumb2);
534 }
535
536 bool isThumbTwo() const {
537 return isThumb() && getSTI().hasFeature(Feature: ARM::FeatureThumb2);
538 }
539
540 bool hasThumb() const {
541 return getSTI().hasFeature(Feature: ARM::HasV4TOps);
542 }
543
544 bool hasThumb2() const {
545 return getSTI().hasFeature(Feature: ARM::FeatureThumb2);
546 }
547
548 bool hasV6Ops() const {
549 return getSTI().hasFeature(Feature: ARM::HasV6Ops);
550 }
551
552 bool hasV6T2Ops() const {
553 return getSTI().hasFeature(Feature: ARM::HasV6T2Ops);
554 }
555
556 bool hasV6MOps() const {
557 return getSTI().hasFeature(Feature: ARM::HasV6MOps);
558 }
559
560 bool hasV7Ops() const {
561 return getSTI().hasFeature(Feature: ARM::HasV7Ops);
562 }
563
564 bool hasV8Ops() const {
565 return getSTI().hasFeature(Feature: ARM::HasV8Ops);
566 }
567
568 bool hasV8MBaseline() const {
569 return getSTI().hasFeature(Feature: ARM::HasV8MBaselineOps);
570 }
571
572 bool hasV8MMainline() const {
573 return getSTI().hasFeature(Feature: ARM::HasV8MMainlineOps);
574 }
575 bool hasV8_1MMainline() const {
576 return getSTI().hasFeature(Feature: ARM::HasV8_1MMainlineOps);
577 }
578 bool hasMVEFloat() const {
579 return getSTI().hasFeature(Feature: ARM::HasMVEFloatOps);
580 }
581 bool hasCDE() const {
582 return getSTI().hasFeature(Feature: ARM::HasCDEOps);
583 }
584 bool has8MSecExt() const {
585 return getSTI().hasFeature(Feature: ARM::Feature8MSecExt);
586 }
587
588 bool hasARM() const {
589 return !getSTI().hasFeature(Feature: ARM::FeatureNoARM);
590 }
591
592 bool hasDSP() const {
593 return getSTI().hasFeature(Feature: ARM::FeatureDSP);
594 }
595
596 bool hasD32() const {
597 return getSTI().hasFeature(Feature: ARM::FeatureD32);
598 }
599
600 bool hasV8_1aOps() const {
601 return getSTI().hasFeature(Feature: ARM::HasV8_1aOps);
602 }
603
604 bool hasRAS() const {
605 return getSTI().hasFeature(Feature: ARM::FeatureRAS);
606 }
607
608 void SwitchMode() {
609 MCSubtargetInfo &STI = copySTI();
610 auto FB = ComputeAvailableFeatures(FB: STI.ToggleFeature(FB: ARM::ModeThumb));
611 setAvailableFeatures(FB);
612 }
613
614 void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
615
616 bool isMClass() const {
617 return getSTI().hasFeature(Feature: ARM::FeatureMClass);
618 }
619
620 /// @name Auto-generated Match Functions
621 /// {
622
623#define GET_ASSEMBLER_HEADER
624#include "ARMGenAsmMatcher.inc"
625
626 /// }
627
628 ParseStatus parseITCondCode(OperandVector &);
629 ParseStatus parseCoprocNumOperand(OperandVector &);
630 ParseStatus parseCoprocRegOperand(OperandVector &);
631 ParseStatus parseCoprocOptionOperand(OperandVector &);
632 ParseStatus parseMemBarrierOptOperand(OperandVector &);
633 ParseStatus parseTraceSyncBarrierOptOperand(OperandVector &);
634 ParseStatus parseInstSyncBarrierOptOperand(OperandVector &);
635 ParseStatus parseProcIFlagsOperand(OperandVector &);
636 ParseStatus parseMSRMaskOperand(OperandVector &);
637 ParseStatus parseBankedRegOperand(OperandVector &);
638 ParseStatus parsePKHImm(OperandVector &O, ARM_AM::ShiftOpc, int Low,
639 int High);
640 ParseStatus parsePKHLSLImm(OperandVector &O) {
641 return parsePKHImm(O, ARM_AM::lsl, Low: 0, High: 31);
642 }
643 ParseStatus parsePKHASRImm(OperandVector &O) {
644 return parsePKHImm(O, ARM_AM::asr, Low: 1, High: 32);
645 }
646 ParseStatus parseSetEndImm(OperandVector &);
647 ParseStatus parseShifterImm(OperandVector &);
648 ParseStatus parseRotImm(OperandVector &);
649 ParseStatus parseModImm(OperandVector &);
650 ParseStatus parseBitfield(OperandVector &);
651 ParseStatus parsePostIdxReg(OperandVector &);
652 ParseStatus parseAM3Offset(OperandVector &);
653 ParseStatus parseFPImm(OperandVector &);
654 ParseStatus parseVectorList(OperandVector &);
655 ParseStatus parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
656 SMLoc &EndLoc);
657
658 // Asm Match Converter Methods
659 void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
660 void cvtThumbBranches(MCInst &Inst, const OperandVector &);
661 void cvtMVEVMOVQtoDReg(MCInst &Inst, const OperandVector &);
662
663 bool validateInstruction(MCInst &Inst, const OperandVector &Ops,
664 unsigned MnemonicOpsEndInd);
665 bool processInstruction(MCInst &Inst, const OperandVector &Ops,
666 unsigned MnemonicOpsEndInd, MCStreamer &Out);
667 bool shouldOmitVectorPredicateOperand(StringRef Mnemonic,
668 OperandVector &Operands,
669 unsigned MnemonicOpsEndInd);
670 bool isITBlockTerminator(MCInst &Inst) const;
671
672 void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands,
673 unsigned MnemonicOpsEndInd);
674 bool validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands, bool Load,
675 bool ARMMode, bool Writeback,
676 unsigned MnemonicOpsEndInd);
677
678public:
679 enum ARMMatchResultTy {
680 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
681 Match_RequiresNotITBlock,
682 Match_RequiresV6,
683 Match_RequiresThumb2,
684 Match_RequiresV8,
685 Match_RequiresFlagSetting,
686#define GET_OPERAND_DIAGNOSTIC_TYPES
687#include "ARMGenAsmMatcher.inc"
688
689 };
690
691 ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
692 const MCInstrInfo &MII, const MCTargetOptions &Options)
693 : MCTargetAsmParser(Options, STI, MII), UC(Parser), MS(STI) {
694 MCAsmParserExtension::Initialize(Parser);
695
696 // Cache the MCRegisterInfo.
697 MRI = getContext().getRegisterInfo();
698
699 // Initialize the set of available features.
700 setAvailableFeatures(ComputeAvailableFeatures(FB: STI.getFeatureBits()));
701
702 // Add build attributes based on the selected target.
703 if (AddBuildAttributes)
704 getTargetStreamer().emitTargetAttributes(STI);
705
706 // Not in an ITBlock to start with.
707 ITState.CurPosition = ~0U;
708
709 VPTState.CurPosition = ~0U;
710
711 NextSymbolIsThumb = false;
712 }
713
714 // Implementation of the MCTargetAsmParser interface:
715 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
716 ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
717 SMLoc &EndLoc) override;
718 bool parseInstruction(ParseInstructionInfo &Info, StringRef Name,
719 SMLoc NameLoc, OperandVector &Operands) override;
720 bool ParseDirective(AsmToken DirectiveID) override;
721
722 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
723 unsigned Kind) override;
724 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
725 unsigned
726 checkEarlyTargetMatchPredicate(MCInst &Inst,
727 const OperandVector &Operands) override;
728
729 bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
730 OperandVector &Operands, MCStreamer &Out,
731 uint64_t &ErrorInfo,
732 bool MatchingInlineAsm) override;
733 unsigned MatchInstruction(OperandVector &Operands, MCInst &Inst,
734 SmallVectorImpl<NearMissInfo> &NearMisses,
735 bool MatchingInlineAsm, bool &EmitInITBlock,
736 MCStreamer &Out);
737
738 struct NearMissMessage {
739 SMLoc Loc;
740 SmallString<128> Message;
741 };
742
743 const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
744
745 void FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
746 SmallVectorImpl<NearMissMessage> &NearMissesOut,
747 SMLoc IDLoc, OperandVector &Operands);
748 void ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, SMLoc IDLoc,
749 OperandVector &Operands);
750
751 void doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc) override;
752
753 void onLabelParsed(MCSymbol *Symbol) override;
754
755 const MCInstrDesc &getInstrDesc(unsigned int Opcode) const {
756 return MII.get(Opcode);
757 }
758
759 bool hasMVE() const { return getSTI().hasFeature(Feature: ARM::HasMVEIntegerOps); }
760
761 // Return the low-subreg of a given Q register.
762 MCRegister getDRegFromQReg(MCRegister QReg) const {
763 return MRI->getSubReg(Reg: QReg, Idx: ARM::dsub_0);
764 }
765
766 const MCRegisterInfo *getMRI() const { return MRI; }
767};
768
769/// ARMOperand - Instances of this class represent a parsed ARM machine
770/// operand.
771class ARMOperand : public MCParsedAsmOperand {
772 enum KindTy {
773 k_CondCode,
774 k_VPTPred,
775 k_CCOut,
776 k_ITCondMask,
777 k_CoprocNum,
778 k_CoprocReg,
779 k_CoprocOption,
780 k_Immediate,
781 k_MemBarrierOpt,
782 k_InstSyncBarrierOpt,
783 k_TraceSyncBarrierOpt,
784 k_Memory,
785 k_PostIndexRegister,
786 k_MSRMask,
787 k_BankedReg,
788 k_ProcIFlags,
789 k_VectorIndex,
790 k_Register,
791 k_RegisterList,
792 k_RegisterListWithAPSR,
793 k_DPRRegisterList,
794 k_SPRRegisterList,
795 k_FPSRegisterListWithVPR,
796 k_FPDRegisterListWithVPR,
797 k_VectorList,
798 k_VectorListAllLanes,
799 k_VectorListIndexed,
800 k_ShiftedRegister,
801 k_ShiftedImmediate,
802 k_ShifterImmediate,
803 k_RotateImmediate,
804 k_ModifiedImmediate,
805 k_ConstantPoolImmediate,
806 k_BitfieldDescriptor,
807 k_Token,
808 } Kind;
809
810 SMLoc StartLoc, EndLoc, AlignmentLoc;
811 SmallVector<MCRegister, 8> Registers;
812
813 ARMAsmParser *Parser;
814
815 struct CCOp {
816 ARMCC::CondCodes Val;
817 };
818
819 struct VCCOp {
820 ARMVCC::VPTCodes Val;
821 };
822
823 struct CopOp {
824 unsigned Val;
825 };
826
827 struct CoprocOptionOp {
828 unsigned Val;
829 };
830
831 struct ITMaskOp {
832 unsigned Mask:4;
833 };
834
835 struct MBOptOp {
836 ARM_MB::MemBOpt Val;
837 };
838
839 struct ISBOptOp {
840 ARM_ISB::InstSyncBOpt Val;
841 };
842
843 struct TSBOptOp {
844 ARM_TSB::TraceSyncBOpt Val;
845 };
846
847 struct IFlagsOp {
848 ARM_PROC::IFlags Val;
849 };
850
851 struct MMaskOp {
852 unsigned Val;
853 };
854
855 struct BankedRegOp {
856 unsigned Val;
857 };
858
859 struct TokOp {
860 const char *Data;
861 unsigned Length;
862 };
863
864 struct RegOp {
865 MCRegister RegNum;
866 };
867
868 // A vector register list is a sequential list of 1 to 4 registers.
869 struct VectorListOp {
870 MCRegister RegNum;
871 unsigned Count;
872 unsigned LaneIndex;
873 bool isDoubleSpaced;
874 };
875
876 struct VectorIndexOp {
877 unsigned Val;
878 };
879
880 struct ImmOp {
881 const MCExpr *Val;
882 };
883
884 /// Combined record for all forms of ARM address expressions.
885 struct MemoryOp {
886 MCRegister BaseRegNum;
887 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
888 // was specified.
889 const MCExpr *OffsetImm; // Offset immediate value
890 MCRegister OffsetRegNum; // Offset register num, when OffsetImm == NULL
891 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
892 unsigned ShiftImm; // shift for OffsetReg.
893 unsigned Alignment; // 0 = no alignment specified
894 // n = alignment in bytes (2, 4, 8, 16, or 32)
895 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit)
896 };
897
898 struct PostIdxRegOp {
899 MCRegister RegNum;
900 bool isAdd;
901 ARM_AM::ShiftOpc ShiftTy;
902 unsigned ShiftImm;
903 };
904
905 struct ShifterImmOp {
906 bool isASR;
907 unsigned Imm;
908 };
909
910 struct RegShiftedRegOp {
911 ARM_AM::ShiftOpc ShiftTy;
912 MCRegister SrcReg;
913 MCRegister ShiftReg;
914 unsigned ShiftImm;
915 };
916
917 struct RegShiftedImmOp {
918 ARM_AM::ShiftOpc ShiftTy;
919 MCRegister SrcReg;
920 unsigned ShiftImm;
921 };
922
923 struct RotImmOp {
924 unsigned Imm;
925 };
926
927 struct ModImmOp {
928 unsigned Bits;
929 unsigned Rot;
930 };
931
932 struct BitfieldOp {
933 unsigned LSB;
934 unsigned Width;
935 };
936
937 union {
938 struct CCOp CC;
939 struct VCCOp VCC;
940 struct CopOp Cop;
941 struct CoprocOptionOp CoprocOption;
942 struct MBOptOp MBOpt;
943 struct ISBOptOp ISBOpt;
944 struct TSBOptOp TSBOpt;
945 struct ITMaskOp ITMask;
946 struct IFlagsOp IFlags;
947 struct MMaskOp MMask;
948 struct BankedRegOp BankedReg;
949 struct TokOp Tok;
950 struct RegOp Reg;
951 struct VectorListOp VectorList;
952 struct VectorIndexOp VectorIndex;
953 struct ImmOp Imm;
954 struct MemoryOp Memory;
955 struct PostIdxRegOp PostIdxReg;
956 struct ShifterImmOp ShifterImm;
957 struct RegShiftedRegOp RegShiftedReg;
958 struct RegShiftedImmOp RegShiftedImm;
959 struct RotImmOp RotImm;
960 struct ModImmOp ModImm;
961 struct BitfieldOp Bitfield;
962 };
963
964public:
965 ARMOperand(KindTy K, ARMAsmParser &Parser) : Kind(K), Parser(&Parser) {}
966
967 /// getStartLoc - Get the location of the first token of this operand.
968 SMLoc getStartLoc() const override { return StartLoc; }
969
970 /// getEndLoc - Get the location of the last token of this operand.
971 SMLoc getEndLoc() const override { return EndLoc; }
972
973 /// getLocRange - Get the range between the first and last token of this
974 /// operand.
975 SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
976
977 /// getAlignmentLoc - Get the location of the Alignment token of this operand.
978 SMLoc getAlignmentLoc() const {
979 assert(Kind == k_Memory && "Invalid access!");
980 return AlignmentLoc;
981 }
982
983 ARMCC::CondCodes getCondCode() const {
984 assert(Kind == k_CondCode && "Invalid access!");
985 return CC.Val;
986 }
987
988 ARMVCC::VPTCodes getVPTPred() const {
989 assert(isVPTPred() && "Invalid access!");
990 return VCC.Val;
991 }
992
993 unsigned getCoproc() const {
994 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
995 return Cop.Val;
996 }
997
998 StringRef getToken() const {
999 assert(Kind == k_Token && "Invalid access!");
1000 return StringRef(Tok.Data, Tok.Length);
1001 }
1002
1003 MCRegister getReg() const override {
1004 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
1005 return Reg.RegNum;
1006 }
1007
1008 const SmallVectorImpl<MCRegister> &getRegList() const {
1009 assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||
1010 Kind == k_DPRRegisterList || Kind == k_SPRRegisterList ||
1011 Kind == k_FPSRegisterListWithVPR ||
1012 Kind == k_FPDRegisterListWithVPR) &&
1013 "Invalid access!");
1014 return Registers;
1015 }
1016
1017 const MCExpr *getImm() const {
1018 assert(isImm() && "Invalid access!");
1019 return Imm.Val;
1020 }
1021
1022 const MCExpr *getConstantPoolImm() const {
1023 assert(isConstantPoolImm() && "Invalid access!");
1024 return Imm.Val;
1025 }
1026
1027 unsigned getVectorIndex() const {
1028 assert(Kind == k_VectorIndex && "Invalid access!");
1029 return VectorIndex.Val;
1030 }
1031
1032 ARM_MB::MemBOpt getMemBarrierOpt() const {
1033 assert(Kind == k_MemBarrierOpt && "Invalid access!");
1034 return MBOpt.Val;
1035 }
1036
1037 ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
1038 assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
1039 return ISBOpt.Val;
1040 }
1041
1042 ARM_TSB::TraceSyncBOpt getTraceSyncBarrierOpt() const {
1043 assert(Kind == k_TraceSyncBarrierOpt && "Invalid access!");
1044 return TSBOpt.Val;
1045 }
1046
1047 ARM_PROC::IFlags getProcIFlags() const {
1048 assert(Kind == k_ProcIFlags && "Invalid access!");
1049 return IFlags.Val;
1050 }
1051
1052 unsigned getMSRMask() const {
1053 assert(Kind == k_MSRMask && "Invalid access!");
1054 return MMask.Val;
1055 }
1056
1057 unsigned getBankedReg() const {
1058 assert(Kind == k_BankedReg && "Invalid access!");
1059 return BankedReg.Val;
1060 }
1061
1062 bool isCoprocNum() const { return Kind == k_CoprocNum; }
1063 bool isCoprocReg() const { return Kind == k_CoprocReg; }
1064 bool isCoprocOption() const { return Kind == k_CoprocOption; }
1065 bool isCondCode() const { return Kind == k_CondCode; }
1066 bool isVPTPred() const { return Kind == k_VPTPred; }
1067 bool isCCOut() const { return Kind == k_CCOut; }
1068 bool isITMask() const { return Kind == k_ITCondMask; }
1069 bool isITCondCode() const { return Kind == k_CondCode; }
1070 bool isImm() const override {
1071 return Kind == k_Immediate;
1072 }
1073
1074 bool isARMBranchTarget() const {
1075 if (!isImm()) return false;
1076
1077 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm()))
1078 return CE->getValue() % 4 == 0;
1079 return true;
1080 }
1081
1082
1083 bool isThumbBranchTarget() const {
1084 if (!isImm()) return false;
1085
1086 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm()))
1087 return CE->getValue() % 2 == 0;
1088 return true;
1089 }
1090
1091 // checks whether this operand is an unsigned offset which fits is a field
1092 // of specified width and scaled by a specific number of bits
1093 template<unsigned width, unsigned scale>
1094 bool isUnsignedOffset() const {
1095 if (!isImm()) return false;
1096 if (isa<MCSymbolRefExpr>(Val: Imm.Val)) return true;
1097 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Imm.Val)) {
1098 int64_t Val = CE->getValue();
1099 int64_t Align = 1LL << scale;
1100 int64_t Max = Align * ((1LL << width) - 1);
1101 return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
1102 }
1103 return false;
1104 }
1105
1106 // checks whether this operand is an signed offset which fits is a field
1107 // of specified width and scaled by a specific number of bits
1108 template<unsigned width, unsigned scale>
1109 bool isSignedOffset() const {
1110 if (!isImm()) return false;
1111 if (isa<MCSymbolRefExpr>(Val: Imm.Val)) return true;
1112 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Imm.Val)) {
1113 int64_t Val = CE->getValue();
1114 int64_t Align = 1LL << scale;
1115 int64_t Max = Align * ((1LL << (width-1)) - 1);
1116 int64_t Min = -Align * (1LL << (width-1));
1117 return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
1118 }
1119 return false;
1120 }
1121
1122 // checks whether this operand is an offset suitable for the LE /
1123 // LETP instructions in Arm v8.1M
1124 bool isLEOffset() const {
1125 if (!isImm()) return false;
1126 if (isa<MCSymbolRefExpr>(Val: Imm.Val)) return true;
1127 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Imm.Val)) {
1128 int64_t Val = CE->getValue();
1129 return Val < 0 && Val >= -4094 && (Val & 1) == 0;
1130 }
1131 return false;
1132 }
1133
1134 // checks whether this operand is a memory operand computed as an offset
1135 // applied to PC. the offset may have 8 bits of magnitude and is represented
1136 // with two bits of shift. textually it may be either [pc, #imm], #imm or
1137 // relocable expression...
1138 bool isThumbMemPC() const {
1139 int64_t Val = 0;
1140 if (isImm()) {
1141 if (isa<MCSymbolRefExpr>(Val: Imm.Val)) return true;
1142 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Imm.Val);
1143 if (!CE) return false;
1144 Val = CE->getValue();
1145 }
1146 else if (isGPRMem()) {
1147 if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
1148 if(Memory.BaseRegNum != ARM::PC) return false;
1149 if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm))
1150 Val = CE->getValue();
1151 else
1152 return false;
1153 }
1154 else return false;
1155 return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
1156 }
1157
1158 bool isFPImm() const {
1159 if (!isImm()) return false;
1160 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1161 if (!CE || !isUInt<32>(x: CE->getValue()))
1162 return false;
1163 int Val = ARM_AM::getFP32Imm(Imm: APInt(32, CE->getValue()));
1164 return Val != -1;
1165 }
1166
1167 template<int64_t N, int64_t M>
1168 bool isImmediate() const {
1169 if (!isImm()) return false;
1170 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1171 if (!CE) return false;
1172 int64_t Value = CE->getValue();
1173 return Value >= N && Value <= M;
1174 }
1175
1176 template<int64_t N, int64_t M>
1177 bool isImmediateS4() const {
1178 if (!isImm()) return false;
1179 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1180 if (!CE) return false;
1181 int64_t Value = CE->getValue();
1182 return ((Value & 3) == 0) && Value >= N && Value <= M;
1183 }
1184 template<int64_t N, int64_t M>
1185 bool isImmediateS2() const {
1186 if (!isImm()) return false;
1187 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1188 if (!CE) return false;
1189 int64_t Value = CE->getValue();
1190 return ((Value & 1) == 0) && Value >= N && Value <= M;
1191 }
1192 bool isFBits16() const {
1193 return isImmediate<0, 17>();
1194 }
1195 bool isFBits32() const {
1196 return isImmediate<1, 33>();
1197 }
1198 bool isImm8s4() const {
1199 return isImmediateS4<-1020, 1020>();
1200 }
1201 bool isImm7s4() const {
1202 return isImmediateS4<-508, 508>();
1203 }
1204 bool isImm7Shift0() const {
1205 return isImmediate<-127, 127>();
1206 }
1207 bool isImm7Shift1() const {
1208 return isImmediateS2<-255, 255>();
1209 }
1210 bool isImm7Shift2() const {
1211 return isImmediateS4<-511, 511>();
1212 }
1213 bool isImm7() const {
1214 return isImmediate<-127, 127>();
1215 }
1216 bool isImm0_1020s4() const {
1217 return isImmediateS4<0, 1020>();
1218 }
1219 bool isImm0_508s4() const {
1220 return isImmediateS4<0, 508>();
1221 }
1222 bool isImm0_508s4Neg() const {
1223 if (!isImm()) return false;
1224 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1225 if (!CE) return false;
1226 int64_t Value = -CE->getValue();
1227 // explicitly exclude zero. we want that to use the normal 0_508 version.
1228 return ((Value & 3) == 0) && Value > 0 && Value <= 508;
1229 }
1230
1231 bool isImm0_4095Neg() const {
1232 if (!isImm()) return false;
1233 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1234 if (!CE) return false;
1235 // isImm0_4095Neg is used with 32-bit immediates only.
1236 // 32-bit immediates are zero extended to 64-bit when parsed,
1237 // thus simple -CE->getValue() results in a big negative number,
1238 // not a small positive number as intended
1239 if ((CE->getValue() >> 32) > 0) return false;
1240 uint32_t Value = -static_cast<uint32_t>(CE->getValue());
1241 return Value > 0 && Value < 4096;
1242 }
1243
1244 bool isImm0_7() const {
1245 return isImmediate<0, 7>();
1246 }
1247
1248 bool isImm1_16() const {
1249 return isImmediate<1, 16>();
1250 }
1251
1252 bool isImm1_32() const {
1253 return isImmediate<1, 32>();
1254 }
1255
1256 bool isImm8_255() const {
1257 return isImmediate<8, 255>();
1258 }
1259
1260 bool isImm0_255Expr() const {
1261 if (!isImm())
1262 return false;
1263 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1264 // If it's not a constant expression, it'll generate a fixup and be
1265 // handled later.
1266 if (!CE)
1267 return true;
1268 int64_t Value = CE->getValue();
1269 return isUInt<8>(x: Value);
1270 }
1271
1272 bool isImm256_65535Expr() const {
1273 if (!isImm()) return false;
1274 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1275 // If it's not a constant expression, it'll generate a fixup and be
1276 // handled later.
1277 if (!CE) return true;
1278 int64_t Value = CE->getValue();
1279 return Value >= 256 && Value < 65536;
1280 }
1281
1282 bool isImm0_65535Expr() const {
1283 if (!isImm()) return false;
1284 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1285 // If it's not a constant expression, it'll generate a fixup and be
1286 // handled later.
1287 if (!CE) return true;
1288 int64_t Value = CE->getValue();
1289 return Value >= 0 && Value < 65536;
1290 }
1291
1292 bool isImm24bit() const {
1293 return isImmediate<0, 0xffffff + 1>();
1294 }
1295
1296 bool isImmThumbSR() const {
1297 return isImmediate<1, 33>();
1298 }
1299
1300 bool isPKHLSLImm() const {
1301 return isImmediate<0, 32>();
1302 }
1303
1304 bool isPKHASRImm() const {
1305 return isImmediate<0, 33>();
1306 }
1307
1308 bool isAdrLabel() const {
1309 // If we have an immediate that's not a constant, treat it as a label
1310 // reference needing a fixup.
1311 if (isImm() && !isa<MCConstantExpr>(Val: getImm()))
1312 return true;
1313
1314 // If it is a constant, it must fit into a modified immediate encoding.
1315 if (!isImm()) return false;
1316 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1317 if (!CE) return false;
1318 int64_t Value = CE->getValue();
1319 return (ARM_AM::getSOImmVal(Arg: Value) != -1 ||
1320 ARM_AM::getSOImmVal(Arg: -Value) != -1);
1321 }
1322
1323 bool isT2SOImm() const {
1324 // If we have an immediate that's not a constant, treat it as an expression
1325 // needing a fixup.
1326 if (isImm() && !isa<MCConstantExpr>(Val: getImm())) {
1327 // We want to avoid matching :upper16: and :lower16: as we want these
1328 // expressions to match in isImm0_65535Expr()
1329 auto *ARM16Expr = dyn_cast<MCSpecifierExpr>(Val: getImm());
1330 return (!ARM16Expr || (ARM16Expr->getSpecifier() != ARM::S_HI16 &&
1331 ARM16Expr->getSpecifier() != ARM::S_LO16));
1332 }
1333 if (!isImm()) return false;
1334 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1335 if (!CE) return false;
1336 int64_t Value = CE->getValue();
1337 return ARM_AM::getT2SOImmVal(Arg: Value) != -1;
1338 }
1339
1340 bool isT2SOImmNot() const {
1341 if (!isImm()) return false;
1342 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1343 if (!CE) return false;
1344 int64_t Value = CE->getValue();
1345 return ARM_AM::getT2SOImmVal(Arg: Value) == -1 &&
1346 ARM_AM::getT2SOImmVal(Arg: ~Value) != -1;
1347 }
1348
1349 bool isT2SOImmNeg() const {
1350 if (!isImm()) return false;
1351 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1352 if (!CE) return false;
1353 int64_t Value = CE->getValue();
1354 // Only use this when not representable as a plain so_imm.
1355 return ARM_AM::getT2SOImmVal(Arg: Value) == -1 &&
1356 ARM_AM::getT2SOImmVal(Arg: -Value) != -1;
1357 }
1358
1359 bool isSetEndImm() const {
1360 if (!isImm()) return false;
1361 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1362 if (!CE) return false;
1363 int64_t Value = CE->getValue();
1364 return Value == 1 || Value == 0;
1365 }
1366
1367 bool isReg() const override { return Kind == k_Register; }
1368 bool isRegList() const { return Kind == k_RegisterList; }
1369 bool isRegListWithAPSR() const {
1370 return Kind == k_RegisterListWithAPSR || Kind == k_RegisterList;
1371 }
1372 bool isDReg() const {
1373 return isReg() &&
1374 ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg: Reg.RegNum);
1375 }
1376 bool isQReg() const {
1377 return isReg() &&
1378 ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg: Reg.RegNum);
1379 }
1380 bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
1381 bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
1382 bool isFPSRegListWithVPR() const { return Kind == k_FPSRegisterListWithVPR; }
1383 bool isFPDRegListWithVPR() const { return Kind == k_FPDRegisterListWithVPR; }
1384 bool isToken() const override { return Kind == k_Token; }
1385 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
1386 bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
1387 bool isTraceSyncBarrierOpt() const { return Kind == k_TraceSyncBarrierOpt; }
1388 bool isMem() const override {
1389 return isGPRMem() || isMVEMem();
1390 }
1391 bool isMVEMem() const {
1392 if (Kind != k_Memory)
1393 return false;
1394 if (Memory.BaseRegNum &&
1395 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg: Memory.BaseRegNum) &&
1396 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg: Memory.BaseRegNum))
1397 return false;
1398 if (Memory.OffsetRegNum &&
1399 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1400 Reg: Memory.OffsetRegNum))
1401 return false;
1402 return true;
1403 }
1404 bool isGPRMem() const {
1405 if (Kind != k_Memory)
1406 return false;
1407 if (Memory.BaseRegNum &&
1408 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg: Memory.BaseRegNum))
1409 return false;
1410 if (Memory.OffsetRegNum &&
1411 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg: Memory.OffsetRegNum))
1412 return false;
1413 return true;
1414 }
1415 bool isShifterImm() const { return Kind == k_ShifterImmediate; }
1416 bool isRegShiftedReg() const {
1417 return Kind == k_ShiftedRegister &&
1418 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1419 Reg: RegShiftedReg.SrcReg) &&
1420 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1421 Reg: RegShiftedReg.ShiftReg);
1422 }
1423 bool isRegShiftedImm() const {
1424 return Kind == k_ShiftedImmediate &&
1425 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1426 Reg: RegShiftedImm.SrcReg);
1427 }
1428 bool isRotImm() const { return Kind == k_RotateImmediate; }
1429
1430 template<unsigned Min, unsigned Max>
1431 bool isPowerTwoInRange() const {
1432 if (!isImm()) return false;
1433 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1434 if (!CE) return false;
1435 int64_t Value = CE->getValue();
1436 return Value > 0 && llvm::popcount(Value: (uint64_t)Value) == 1 && Value >= Min &&
1437 Value <= Max;
1438 }
1439 bool isModImm() const { return Kind == k_ModifiedImmediate; }
1440
1441 bool isModImmNot() const {
1442 if (!isImm()) return false;
1443 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1444 if (!CE) return false;
1445 int64_t Value = CE->getValue();
1446 return ARM_AM::getSOImmVal(Arg: ~Value) != -1;
1447 }
1448
1449 bool isModImmNeg() const {
1450 if (!isImm()) return false;
1451 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1452 if (!CE) return false;
1453 int64_t Value = CE->getValue();
1454 return ARM_AM::getSOImmVal(Arg: Value) == -1 &&
1455 ARM_AM::getSOImmVal(Arg: -Value) != -1;
1456 }
1457
1458 bool isThumbModImmNeg1_7() const {
1459 if (!isImm()) return false;
1460 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1461 if (!CE) return false;
1462 int32_t Value = -(int32_t)CE->getValue();
1463 return 0 < Value && Value < 8;
1464 }
1465
1466 bool isThumbModImmNeg8_255() const {
1467 if (!isImm()) return false;
1468 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1469 if (!CE) return false;
1470 int32_t Value = -(int32_t)CE->getValue();
1471 return 7 < Value && Value < 256;
1472 }
1473
1474 bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
1475 bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
1476 bool isPostIdxRegShifted() const {
1477 return Kind == k_PostIndexRegister &&
1478 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg: PostIdxReg.RegNum);
1479 }
1480 bool isPostIdxReg() const {
1481 return isPostIdxRegShifted() && PostIdxReg.ShiftTy == ARM_AM::no_shift;
1482 }
1483 bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
1484 if (!isGPRMem())
1485 return false;
1486 // No offset of any kind.
1487 return !Memory.OffsetRegNum && Memory.OffsetImm == nullptr &&
1488 (alignOK || Memory.Alignment == Alignment);
1489 }
1490 bool isMemNoOffsetT2(bool alignOK = false, unsigned Alignment = 0) const {
1491 if (!isGPRMem())
1492 return false;
1493
1494 if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1495 Reg: Memory.BaseRegNum))
1496 return false;
1497
1498 // No offset of any kind.
1499 return !Memory.OffsetRegNum && Memory.OffsetImm == nullptr &&
1500 (alignOK || Memory.Alignment == Alignment);
1501 }
1502 bool isMemNoOffsetT2NoSp(bool alignOK = false, unsigned Alignment = 0) const {
1503 if (!isGPRMem())
1504 return false;
1505
1506 if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].contains(
1507 Reg: Memory.BaseRegNum))
1508 return false;
1509
1510 // No offset of any kind.
1511 return !Memory.OffsetRegNum && Memory.OffsetImm == nullptr &&
1512 (alignOK || Memory.Alignment == Alignment);
1513 }
1514 bool isMemNoOffsetT(bool alignOK = false, unsigned Alignment = 0) const {
1515 if (!isGPRMem())
1516 return false;
1517
1518 if (!ARMMCRegisterClasses[ARM::tGPRRegClassID].contains(
1519 Reg: Memory.BaseRegNum))
1520 return false;
1521
1522 // No offset of any kind.
1523 return !Memory.OffsetRegNum && Memory.OffsetImm == nullptr &&
1524 (alignOK || Memory.Alignment == Alignment);
1525 }
1526 bool isMemPCRelImm12() const {
1527 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1528 return false;
1529 // Base register must be PC.
1530 if (Memory.BaseRegNum != ARM::PC)
1531 return false;
1532 // Immediate offset in range [-4095, 4095].
1533 if (!Memory.OffsetImm) return true;
1534 if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm)) {
1535 int64_t Val = CE->getValue();
1536 return (Val > -4096 && Val < 4096) ||
1537 (Val == std::numeric_limits<int32_t>::min());
1538 }
1539 return false;
1540 }
1541
1542 bool isAlignedMemory() const {
1543 return isMemNoOffset(alignOK: true);
1544 }
1545
1546 bool isAlignedMemoryNone() const {
1547 return isMemNoOffset(alignOK: false, Alignment: 0);
1548 }
1549
1550 bool isDupAlignedMemoryNone() const {
1551 return isMemNoOffset(alignOK: false, Alignment: 0);
1552 }
1553
1554 bool isAlignedMemory16() const {
1555 if (isMemNoOffset(alignOK: false, Alignment: 2)) // alignment in bytes for 16-bits is 2.
1556 return true;
1557 return isMemNoOffset(alignOK: false, Alignment: 0);
1558 }
1559
1560 bool isDupAlignedMemory16() const {
1561 if (isMemNoOffset(alignOK: false, Alignment: 2)) // alignment in bytes for 16-bits is 2.
1562 return true;
1563 return isMemNoOffset(alignOK: false, Alignment: 0);
1564 }
1565
1566 bool isAlignedMemory32() const {
1567 if (isMemNoOffset(alignOK: false, Alignment: 4)) // alignment in bytes for 32-bits is 4.
1568 return true;
1569 return isMemNoOffset(alignOK: false, Alignment: 0);
1570 }
1571
1572 bool isDupAlignedMemory32() const {
1573 if (isMemNoOffset(alignOK: false, Alignment: 4)) // alignment in bytes for 32-bits is 4.
1574 return true;
1575 return isMemNoOffset(alignOK: false, Alignment: 0);
1576 }
1577
1578 bool isAlignedMemory64() const {
1579 if (isMemNoOffset(alignOK: false, Alignment: 8)) // alignment in bytes for 64-bits is 8.
1580 return true;
1581 return isMemNoOffset(alignOK: false, Alignment: 0);
1582 }
1583
1584 bool isDupAlignedMemory64() const {
1585 if (isMemNoOffset(alignOK: false, Alignment: 8)) // alignment in bytes for 64-bits is 8.
1586 return true;
1587 return isMemNoOffset(alignOK: false, Alignment: 0);
1588 }
1589
1590 bool isAlignedMemory64or128() const {
1591 if (isMemNoOffset(alignOK: false, Alignment: 8)) // alignment in bytes for 64-bits is 8.
1592 return true;
1593 if (isMemNoOffset(alignOK: false, Alignment: 16)) // alignment in bytes for 128-bits is 16.
1594 return true;
1595 return isMemNoOffset(alignOK: false, Alignment: 0);
1596 }
1597
1598 bool isDupAlignedMemory64or128() const {
1599 if (isMemNoOffset(alignOK: false, Alignment: 8)) // alignment in bytes for 64-bits is 8.
1600 return true;
1601 if (isMemNoOffset(alignOK: false, Alignment: 16)) // alignment in bytes for 128-bits is 16.
1602 return true;
1603 return isMemNoOffset(alignOK: false, Alignment: 0);
1604 }
1605
1606 bool isAlignedMemory64or128or256() const {
1607 if (isMemNoOffset(alignOK: false, Alignment: 8)) // alignment in bytes for 64-bits is 8.
1608 return true;
1609 if (isMemNoOffset(alignOK: false, Alignment: 16)) // alignment in bytes for 128-bits is 16.
1610 return true;
1611 if (isMemNoOffset(alignOK: false, Alignment: 32)) // alignment in bytes for 256-bits is 32.
1612 return true;
1613 return isMemNoOffset(alignOK: false, Alignment: 0);
1614 }
1615
1616 bool isAddrMode2() const {
1617 if (!isGPRMem() || Memory.Alignment != 0) return false;
1618 // Check for register offset.
1619 if (Memory.OffsetRegNum) return true;
1620 // Immediate offset in range [-4095, 4095].
1621 if (!Memory.OffsetImm) return true;
1622 if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm)) {
1623 int64_t Val = CE->getValue();
1624 return Val > -4096 && Val < 4096;
1625 }
1626 return false;
1627 }
1628
1629 bool isAM2OffsetImm() const {
1630 if (!isImm()) return false;
1631 // Immediate offset in range [-4095, 4095].
1632 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1633 if (!CE) return false;
1634 int64_t Val = CE->getValue();
1635 return (Val == std::numeric_limits<int32_t>::min()) ||
1636 (Val > -4096 && Val < 4096);
1637 }
1638
1639 bool isAddrMode3() const {
1640 // If we have an immediate that's not a constant, treat it as a label
1641 // reference needing a fixup. If it is a constant, it's something else
1642 // and we reject it.
1643 if (isImm() && !isa<MCConstantExpr>(Val: getImm()))
1644 return true;
1645 if (!isGPRMem() || Memory.Alignment != 0) return false;
1646 // No shifts are legal for AM3.
1647 if (Memory.ShiftType != ARM_AM::no_shift) return false;
1648 // Check for register offset.
1649 if (Memory.OffsetRegNum) return true;
1650 // Immediate offset in range [-255, 255].
1651 if (!Memory.OffsetImm) return true;
1652 if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm)) {
1653 int64_t Val = CE->getValue();
1654 // The #-0 offset is encoded as std::numeric_limits<int32_t>::min(), and
1655 // we have to check for this too.
1656 return (Val > -256 && Val < 256) ||
1657 Val == std::numeric_limits<int32_t>::min();
1658 }
1659 return false;
1660 }
1661
1662 bool isAM3Offset() const {
1663 if (isPostIdxReg())
1664 return true;
1665 if (!isImm())
1666 return false;
1667 // Immediate offset in range [-255, 255].
1668 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
1669 if (!CE) return false;
1670 int64_t Val = CE->getValue();
1671 // Special case, #-0 is std::numeric_limits<int32_t>::min().
1672 return (Val > -256 && Val < 256) ||
1673 Val == std::numeric_limits<int32_t>::min();
1674 }
1675
1676 bool isAddrMode5() const {
1677 // If we have an immediate that's not a constant, treat it as a label
1678 // reference needing a fixup. If it is a constant, it's something else
1679 // and we reject it.
1680 if (isImm() && !isa<MCConstantExpr>(Val: getImm()))
1681 return true;
1682 if (!isGPRMem() || Memory.Alignment != 0) return false;
1683 // Check for register offset.
1684 if (Memory.OffsetRegNum) return false;
1685 // Immediate offset in range [-1020, 1020] and a multiple of 4.
1686 if (!Memory.OffsetImm) return true;
1687 if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm)) {
1688 int64_t Val = CE->getValue();
1689 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1690 Val == std::numeric_limits<int32_t>::min();
1691 }
1692 return false;
1693 }
1694
1695 bool isAddrMode5FP16() const {
1696 // If we have an immediate that's not a constant, treat it as a label
1697 // reference needing a fixup. If it is a constant, it's something else
1698 // and we reject it.
1699 if (isImm() && !isa<MCConstantExpr>(Val: getImm()))
1700 return true;
1701 if (!isGPRMem() || Memory.Alignment != 0) return false;
1702 // Check for register offset.
1703 if (Memory.OffsetRegNum) return false;
1704 // Immediate offset in range [-510, 510] and a multiple of 2.
1705 if (!Memory.OffsetImm) return true;
1706 if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm)) {
1707 int64_t Val = CE->getValue();
1708 return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1709 Val == std::numeric_limits<int32_t>::min();
1710 }
1711 return false;
1712 }
1713
1714 bool isMemTBB() const {
1715 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1716 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1717 return false;
1718 return true;
1719 }
1720
1721 bool isMemTBH() const {
1722 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1723 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1724 Memory.Alignment != 0 )
1725 return false;
1726 return true;
1727 }
1728
1729 bool isMemRegOffset() const {
1730 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1731 return false;
1732 return true;
1733 }
1734
1735 bool isT2MemRegOffset() const {
1736 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1737 Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
1738 return false;
1739 // Only lsl #{0, 1, 2, 3} allowed.
1740 if (Memory.ShiftType == ARM_AM::no_shift)
1741 return true;
1742 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1743 return false;
1744 return true;
1745 }
1746
1747 bool isMemThumbRR() const {
1748 // Thumb reg+reg addressing is simple. Just two registers, a base and
1749 // an offset. No shifts, negations or any other complicating factors.
1750 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1751 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1752 return false;
1753 return isARMLowRegister(Reg: Memory.BaseRegNum) &&
1754 (!Memory.OffsetRegNum || isARMLowRegister(Reg: Memory.OffsetRegNum));
1755 }
1756
1757 bool isMemThumbRIs4() const {
1758 if (!isGPRMem() || Memory.OffsetRegNum ||
1759 !isARMLowRegister(Reg: Memory.BaseRegNum) || Memory.Alignment != 0)
1760 return false;
1761 // Immediate offset, multiple of 4 in range [0, 124].
1762 if (!Memory.OffsetImm) return true;
1763 if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm)) {
1764 int64_t Val = CE->getValue();
1765 return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1766 }
1767 return false;
1768 }
1769
1770 bool isMemThumbRIs2() const {
1771 if (!isGPRMem() || Memory.OffsetRegNum ||
1772 !isARMLowRegister(Reg: Memory.BaseRegNum) || Memory.Alignment != 0)
1773 return false;
1774 // Immediate offset, multiple of 4 in range [0, 62].
1775 if (!Memory.OffsetImm) return true;
1776 if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm)) {
1777 int64_t Val = CE->getValue();
1778 return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1779 }
1780 return false;
1781 }
1782
1783 bool isMemThumbRIs1() const {
1784 if (!isGPRMem() || Memory.OffsetRegNum ||
1785 !isARMLowRegister(Reg: Memory.BaseRegNum) || Memory.Alignment != 0)
1786 return false;
1787 // Immediate offset in range [0, 31].
1788 if (!Memory.OffsetImm) return true;
1789 if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm)) {
1790 int64_t Val = CE->getValue();
1791 return Val >= 0 && Val <= 31;
1792 }
1793 return false;
1794 }
1795
1796 bool isMemThumbSPI() const {
1797 if (!isGPRMem() || Memory.OffsetRegNum || Memory.BaseRegNum != ARM::SP ||
1798 Memory.Alignment != 0)
1799 return false;
1800 // Immediate offset, multiple of 4 in range [0, 1020].
1801 if (!Memory.OffsetImm) return true;
1802 if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm)) {
1803 int64_t Val = CE->getValue();
1804 return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1805 }
1806 return false;
1807 }
1808
1809 bool isMemImm8s4Offset() const {
1810 // If we have an immediate that's not a constant, treat it as a label
1811 // reference needing a fixup. If it is a constant, it's something else
1812 // and we reject it.
1813 if (isImm() && !isa<MCConstantExpr>(Val: getImm()))
1814 return true;
1815 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1816 return false;
1817 // Immediate offset a multiple of 4 in range [-1020, 1020].
1818 if (!Memory.OffsetImm) return true;
1819 if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm)) {
1820 int64_t Val = CE->getValue();
1821 // Special case, #-0 is std::numeric_limits<int32_t>::min().
1822 return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1823 Val == std::numeric_limits<int32_t>::min();
1824 }
1825 return false;
1826 }
1827
1828 bool isMemImm7s4Offset() const {
1829 // If we have an immediate that's not a constant, treat it as a label
1830 // reference needing a fixup. If it is a constant, it's something else
1831 // and we reject it.
1832 if (isImm() && !isa<MCConstantExpr>(Val: getImm()))
1833 return true;
1834 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0 ||
1835 !ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1836 Reg: Memory.BaseRegNum))
1837 return false;
1838 // Immediate offset a multiple of 4 in range [-508, 508].
1839 if (!Memory.OffsetImm) return true;
1840 if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm)) {
1841 int64_t Val = CE->getValue();
1842 // Special case, #-0 is INT32_MIN.
1843 return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN;
1844 }
1845 return false;
1846 }
1847
1848 bool isMemImm0_1020s4Offset() const {
1849 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1850 return false;
1851 // Immediate offset a multiple of 4 in range [0, 1020].
1852 if (!Memory.OffsetImm) return true;
1853 if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm)) {
1854 int64_t Val = CE->getValue();
1855 return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1856 }
1857 return false;
1858 }
1859
1860 bool isMemImm8Offset() const {
1861 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1862 return false;
1863 // Base reg of PC isn't allowed for these encodings.
1864 if (Memory.BaseRegNum == ARM::PC) return false;
1865 // Immediate offset in range [-255, 255].
1866 if (!Memory.OffsetImm) return true;
1867 if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm)) {
1868 int64_t Val = CE->getValue();
1869 return (Val == std::numeric_limits<int32_t>::min()) ||
1870 (Val > -256 && Val < 256);
1871 }
1872 return false;
1873 }
1874
1875 template<unsigned Bits, unsigned RegClassID>
1876 bool isMemImm7ShiftedOffset() const {
1877 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0 ||
1878 !ARMMCRegisterClasses[RegClassID].contains(Reg: Memory.BaseRegNum))
1879 return false;
1880
1881 // Expect an immediate offset equal to an element of the range
1882 // [-127, 127], shifted left by Bits.
1883
1884 if (!Memory.OffsetImm) return true;
1885 if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm)) {
1886 int64_t Val = CE->getValue();
1887
1888 // INT32_MIN is a special-case value (indicating the encoding with
1889 // zero offset and the subtract bit set)
1890 if (Val == INT32_MIN)
1891 return true;
1892
1893 unsigned Divisor = 1U << Bits;
1894
1895 // Check that the low bits are zero
1896 if (Val % Divisor != 0)
1897 return false;
1898
1899 // Check that the remaining offset is within range.
1900 Val /= Divisor;
1901 return (Val >= -127 && Val <= 127);
1902 }
1903 return false;
1904 }
1905
1906 template <int shift> bool isMemRegRQOffset() const {
1907 if (!isMVEMem() || Memory.OffsetImm != nullptr || Memory.Alignment != 0)
1908 return false;
1909
1910 if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1911 Reg: Memory.BaseRegNum))
1912 return false;
1913 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1914 Reg: Memory.OffsetRegNum))
1915 return false;
1916
1917 if (shift == 0 && Memory.ShiftType != ARM_AM::no_shift)
1918 return false;
1919
1920 if (shift > 0 &&
1921 (Memory.ShiftType != ARM_AM::uxtw || Memory.ShiftImm != shift))
1922 return false;
1923
1924 return true;
1925 }
1926
1927 template <int shift> bool isMemRegQOffset() const {
1928 if (!isMVEMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1929 return false;
1930
1931 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1932 Reg: Memory.BaseRegNum))
1933 return false;
1934
1935 if (!Memory.OffsetImm)
1936 return true;
1937 static_assert(shift < 56,
1938 "Such that we dont shift by a value higher than 62");
1939 if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm)) {
1940 int64_t Val = CE->getValue();
1941
1942 // The value must be a multiple of (1 << shift)
1943 if ((Val & ((1U << shift) - 1)) != 0)
1944 return false;
1945
1946 // And be in the right range, depending on the amount that it is shifted
1947 // by. Shift 0, is equal to 7 unsigned bits, the sign bit is set
1948 // separately.
1949 int64_t Range = (1U << (7 + shift)) - 1;
1950 return (Val == INT32_MIN) || (Val > -Range && Val < Range);
1951 }
1952 return false;
1953 }
1954
1955 bool isMemPosImm8Offset() const {
1956 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1957 return false;
1958 // Immediate offset in range [0, 255].
1959 if (!Memory.OffsetImm) return true;
1960 if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm)) {
1961 int64_t Val = CE->getValue();
1962 return Val >= 0 && Val < 256;
1963 }
1964 return false;
1965 }
1966
1967 bool isMemNegImm8Offset() const {
1968 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1969 return false;
1970 // Base reg of PC isn't allowed for these encodings.
1971 if (Memory.BaseRegNum == ARM::PC) return false;
1972 // Immediate offset in range [-255, -1].
1973 if (!Memory.OffsetImm) return false;
1974 if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm)) {
1975 int64_t Val = CE->getValue();
1976 return (Val == std::numeric_limits<int32_t>::min()) ||
1977 (Val > -256 && Val < 0);
1978 }
1979 return false;
1980 }
1981
1982 bool isMemUImm12Offset() const {
1983 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1984 return false;
1985 // Immediate offset in range [0, 4095].
1986 if (!Memory.OffsetImm) return true;
1987 if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm)) {
1988 int64_t Val = CE->getValue();
1989 return (Val >= 0 && Val < 4096);
1990 }
1991 return false;
1992 }
1993
1994 bool isMemImm12Offset() const {
1995 // If we have an immediate that's not a constant, treat it as a label
1996 // reference needing a fixup. If it is a constant, it's something else
1997 // and we reject it.
1998
1999 if (isImm() && !isa<MCConstantExpr>(Val: getImm()))
2000 return true;
2001
2002 if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
2003 return false;
2004 // Immediate offset in range [-4095, 4095].
2005 if (!Memory.OffsetImm) return true;
2006 if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm)) {
2007 int64_t Val = CE->getValue();
2008 return (Val > -4096 && Val < 4096) ||
2009 (Val == std::numeric_limits<int32_t>::min());
2010 }
2011 // If we have an immediate that's not a constant, treat it as a
2012 // symbolic expression needing a fixup.
2013 return true;
2014 }
2015
2016 bool isConstPoolAsmImm() const {
2017 // Delay processing of Constant Pool Immediate, this will turn into
2018 // a constant. Match no other operand
2019 return (isConstantPoolImm());
2020 }
2021
2022 bool isPostIdxImm8() const {
2023 if (!isImm()) return false;
2024 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
2025 if (!CE) return false;
2026 int64_t Val = CE->getValue();
2027 return (Val > -256 && Val < 256) ||
2028 (Val == std::numeric_limits<int32_t>::min());
2029 }
2030
2031 bool isPostIdxImm8s4() const {
2032 if (!isImm()) return false;
2033 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
2034 if (!CE) return false;
2035 int64_t Val = CE->getValue();
2036 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
2037 (Val == std::numeric_limits<int32_t>::min());
2038 }
2039
2040 bool isMSRMask() const { return Kind == k_MSRMask; }
2041 bool isBankedReg() const { return Kind == k_BankedReg; }
2042 bool isProcIFlags() const { return Kind == k_ProcIFlags; }
2043
2044 // NEON operands.
2045 bool isAnyVectorList() const {
2046 return Kind == k_VectorList || Kind == k_VectorListAllLanes ||
2047 Kind == k_VectorListIndexed;
2048 }
2049
2050 bool isVectorList() const { return Kind == k_VectorList; }
2051
2052 bool isSingleSpacedVectorList() const {
2053 return Kind == k_VectorList && !VectorList.isDoubleSpaced;
2054 }
2055
2056 bool isDoubleSpacedVectorList() const {
2057 return Kind == k_VectorList && VectorList.isDoubleSpaced;
2058 }
2059
2060 bool isVecListOneD() const {
2061 // We convert a single D reg to a list containing a D reg
2062 if (isDReg() && !Parser->hasMVE())
2063 return true;
2064 if (!isSingleSpacedVectorList()) return false;
2065 return VectorList.Count == 1;
2066 }
2067
2068 bool isVecListTwoMQ() const {
2069 return isSingleSpacedVectorList() && VectorList.Count == 2 &&
2070 ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2071 Reg: VectorList.RegNum);
2072 }
2073
2074 bool isVecListDPair() const {
2075 // We convert a single Q reg to a list with the two corresponding D
2076 // registers
2077 if (isQReg() && !Parser->hasMVE())
2078 return true;
2079 if (!isSingleSpacedVectorList()) return false;
2080 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2081 .contains(Reg: VectorList.RegNum));
2082 }
2083
2084 bool isVecListThreeD() const {
2085 if (!isSingleSpacedVectorList()) return false;
2086 return VectorList.Count == 3;
2087 }
2088
2089 bool isVecListFourD() const {
2090 if (!isSingleSpacedVectorList()) return false;
2091 return VectorList.Count == 4;
2092 }
2093
2094 bool isVecListDPairSpaced() const {
2095 if (Kind != k_VectorList) return false;
2096 if (isSingleSpacedVectorList()) return false;
2097 return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
2098 .contains(Reg: VectorList.RegNum));
2099 }
2100
2101 bool isVecListThreeQ() const {
2102 if (!isDoubleSpacedVectorList()) return false;
2103 return VectorList.Count == 3;
2104 }
2105
2106 bool isVecListFourQ() const {
2107 if (!isDoubleSpacedVectorList()) return false;
2108 return VectorList.Count == 4;
2109 }
2110
2111 bool isVecListFourMQ() const {
2112 return isSingleSpacedVectorList() && VectorList.Count == 4 &&
2113 ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2114 Reg: VectorList.RegNum);
2115 }
2116
2117 bool isSingleSpacedVectorAllLanes() const {
2118 return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
2119 }
2120
2121 bool isDoubleSpacedVectorAllLanes() const {
2122 return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
2123 }
2124
2125 bool isVecListOneDAllLanes() const {
2126 if (!isSingleSpacedVectorAllLanes()) return false;
2127 return VectorList.Count == 1;
2128 }
2129
2130 bool isVecListDPairAllLanes() const {
2131 if (!isSingleSpacedVectorAllLanes()) return false;
2132 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2133 .contains(Reg: VectorList.RegNum));
2134 }
2135
2136 bool isVecListDPairSpacedAllLanes() const {
2137 if (!isDoubleSpacedVectorAllLanes()) return false;
2138 return VectorList.Count == 2;
2139 }
2140
2141 bool isVecListThreeDAllLanes() const {
2142 if (!isSingleSpacedVectorAllLanes()) return false;
2143 return VectorList.Count == 3;
2144 }
2145
2146 bool isVecListThreeQAllLanes() const {
2147 if (!isDoubleSpacedVectorAllLanes()) return false;
2148 return VectorList.Count == 3;
2149 }
2150
2151 bool isVecListFourDAllLanes() const {
2152 if (!isSingleSpacedVectorAllLanes()) return false;
2153 return VectorList.Count == 4;
2154 }
2155
2156 bool isVecListFourQAllLanes() const {
2157 if (!isDoubleSpacedVectorAllLanes()) return false;
2158 return VectorList.Count == 4;
2159 }
2160
2161 bool isSingleSpacedVectorIndexed() const {
2162 return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
2163 }
2164
2165 bool isDoubleSpacedVectorIndexed() const {
2166 return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
2167 }
2168
2169 bool isVecListOneDByteIndexed() const {
2170 if (!isSingleSpacedVectorIndexed()) return false;
2171 return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
2172 }
2173
2174 bool isVecListOneDHWordIndexed() const {
2175 if (!isSingleSpacedVectorIndexed()) return false;
2176 return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
2177 }
2178
2179 bool isVecListOneDWordIndexed() const {
2180 if (!isSingleSpacedVectorIndexed()) return false;
2181 return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
2182 }
2183
2184 bool isVecListTwoDByteIndexed() const {
2185 if (!isSingleSpacedVectorIndexed()) return false;
2186 return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
2187 }
2188
2189 bool isVecListTwoDHWordIndexed() const {
2190 if (!isSingleSpacedVectorIndexed()) return false;
2191 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2192 }
2193
2194 bool isVecListTwoQWordIndexed() const {
2195 if (!isDoubleSpacedVectorIndexed()) return false;
2196 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2197 }
2198
2199 bool isVecListTwoQHWordIndexed() const {
2200 if (!isDoubleSpacedVectorIndexed()) return false;
2201 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2202 }
2203
2204 bool isVecListTwoDWordIndexed() const {
2205 if (!isSingleSpacedVectorIndexed()) return false;
2206 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2207 }
2208
2209 bool isVecListThreeDByteIndexed() const {
2210 if (!isSingleSpacedVectorIndexed()) return false;
2211 return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
2212 }
2213
2214 bool isVecListThreeDHWordIndexed() const {
2215 if (!isSingleSpacedVectorIndexed()) return false;
2216 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2217 }
2218
2219 bool isVecListThreeQWordIndexed() const {
2220 if (!isDoubleSpacedVectorIndexed()) return false;
2221 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2222 }
2223
2224 bool isVecListThreeQHWordIndexed() const {
2225 if (!isDoubleSpacedVectorIndexed()) return false;
2226 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2227 }
2228
2229 bool isVecListThreeDWordIndexed() const {
2230 if (!isSingleSpacedVectorIndexed()) return false;
2231 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2232 }
2233
2234 bool isVecListFourDByteIndexed() const {
2235 if (!isSingleSpacedVectorIndexed()) return false;
2236 return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
2237 }
2238
2239 bool isVecListFourDHWordIndexed() const {
2240 if (!isSingleSpacedVectorIndexed()) return false;
2241 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2242 }
2243
2244 bool isVecListFourQWordIndexed() const {
2245 if (!isDoubleSpacedVectorIndexed()) return false;
2246 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2247 }
2248
2249 bool isVecListFourQHWordIndexed() const {
2250 if (!isDoubleSpacedVectorIndexed()) return false;
2251 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2252 }
2253
2254 bool isVecListFourDWordIndexed() const {
2255 if (!isSingleSpacedVectorIndexed()) return false;
2256 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2257 }
2258
2259 bool isVectorIndex() const { return Kind == k_VectorIndex; }
2260
2261 template <unsigned NumLanes>
2262 bool isVectorIndexInRange() const {
2263 if (Kind != k_VectorIndex) return false;
2264 return VectorIndex.Val < NumLanes;
2265 }
2266
2267 bool isVectorIndex8() const { return isVectorIndexInRange<8>(); }
2268 bool isVectorIndex16() const { return isVectorIndexInRange<4>(); }
2269 bool isVectorIndex32() const { return isVectorIndexInRange<2>(); }
2270 bool isVectorIndex64() const { return isVectorIndexInRange<1>(); }
2271
2272 template<int PermittedValue, int OtherPermittedValue>
2273 bool isMVEPairVectorIndex() const {
2274 if (Kind != k_VectorIndex) return false;
2275 return VectorIndex.Val == PermittedValue ||
2276 VectorIndex.Val == OtherPermittedValue;
2277 }
2278
2279 bool isNEONi8splat() const {
2280 if (!isImm()) return false;
2281 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
2282 // Must be a constant.
2283 if (!CE) return false;
2284 int64_t Value = CE->getValue();
2285 // i8 value splatted across 8 bytes. The immediate is just the 8 byte
2286 // value.
2287 return Value >= 0 && Value < 256;
2288 }
2289
2290 bool isNEONi16splat() const {
2291 if (isNEONByteReplicate(NumBytes: 2))
2292 return false; // Leave that for bytes replication and forbid by default.
2293 if (!isImm())
2294 return false;
2295 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
2296 // Must be a constant.
2297 if (!CE) return false;
2298 unsigned Value = CE->getValue();
2299 return ARM_AM::isNEONi16splat(Value);
2300 }
2301
2302 bool isNEONi16splatNot() const {
2303 if (!isImm())
2304 return false;
2305 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
2306 // Must be a constant.
2307 if (!CE) return false;
2308 unsigned Value = CE->getValue();
2309 return ARM_AM::isNEONi16splat(Value: ~Value & 0xffff);
2310 }
2311
2312 bool isNEONi32splat() const {
2313 if (isNEONByteReplicate(NumBytes: 4))
2314 return false; // Leave that for bytes replication and forbid by default.
2315 if (!isImm())
2316 return false;
2317 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
2318 // Must be a constant.
2319 if (!CE) return false;
2320 unsigned Value = CE->getValue();
2321 return ARM_AM::isNEONi32splat(Value);
2322 }
2323
2324 bool isNEONi32splatNot() const {
2325 if (!isImm())
2326 return false;
2327 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
2328 // Must be a constant.
2329 if (!CE) return false;
2330 unsigned Value = CE->getValue();
2331 return ARM_AM::isNEONi32splat(Value: ~Value);
2332 }
2333
2334 static bool isValidNEONi32vmovImm(int64_t Value) {
2335 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
2336 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
2337 return ((Value & 0xffffffffffffff00) == 0) ||
2338 ((Value & 0xffffffffffff00ff) == 0) ||
2339 ((Value & 0xffffffffff00ffff) == 0) ||
2340 ((Value & 0xffffffff00ffffff) == 0) ||
2341 ((Value & 0xffffffffffff00ff) == 0xff) ||
2342 ((Value & 0xffffffffff00ffff) == 0xffff);
2343 }
2344
2345 bool isNEONReplicate(unsigned Width, unsigned NumElems, bool Inv) const {
2346 assert((Width == 8 || Width == 16 || Width == 32) &&
2347 "Invalid element width");
2348 assert(NumElems * Width <= 64 && "Invalid result width");
2349
2350 if (!isImm())
2351 return false;
2352 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
2353 // Must be a constant.
2354 if (!CE)
2355 return false;
2356 int64_t Value = CE->getValue();
2357 if (!Value)
2358 return false; // Don't bother with zero.
2359 if (Inv)
2360 Value = ~Value;
2361
2362 uint64_t Mask = (1ull << Width) - 1;
2363 uint64_t Elem = Value & Mask;
2364 if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
2365 return false;
2366 if (Width == 32 && !isValidNEONi32vmovImm(Value: Elem))
2367 return false;
2368
2369 for (unsigned i = 1; i < NumElems; ++i) {
2370 Value >>= Width;
2371 if ((Value & Mask) != Elem)
2372 return false;
2373 }
2374 return true;
2375 }
2376
2377 bool isNEONByteReplicate(unsigned NumBytes) const {
2378 return isNEONReplicate(Width: 8, NumElems: NumBytes, Inv: false);
2379 }
2380
2381 static void checkNeonReplicateArgs(unsigned FromW, unsigned ToW) {
2382 assert((FromW == 8 || FromW == 16 || FromW == 32) &&
2383 "Invalid source width");
2384 assert((ToW == 16 || ToW == 32 || ToW == 64) &&
2385 "Invalid destination width");
2386 assert(FromW < ToW && "ToW is not less than FromW");
2387 }
2388
2389 template<unsigned FromW, unsigned ToW>
2390 bool isNEONmovReplicate() const {
2391 checkNeonReplicateArgs(FromW, ToW);
2392 if (ToW == 64 && isNEONi64splat())
2393 return false;
2394 return isNEONReplicate(Width: FromW, NumElems: ToW / FromW, Inv: false);
2395 }
2396
2397 template<unsigned FromW, unsigned ToW>
2398 bool isNEONinvReplicate() const {
2399 checkNeonReplicateArgs(FromW, ToW);
2400 return isNEONReplicate(Width: FromW, NumElems: ToW / FromW, Inv: true);
2401 }
2402
2403 bool isNEONi32vmov() const {
2404 if (isNEONByteReplicate(NumBytes: 4))
2405 return false; // Let it to be classified as byte-replicate case.
2406 if (!isImm())
2407 return false;
2408 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
2409 // Must be a constant.
2410 if (!CE)
2411 return false;
2412 return isValidNEONi32vmovImm(Value: CE->getValue());
2413 }
2414
2415 bool isNEONi32vmovNeg() const {
2416 if (!isImm()) return false;
2417 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
2418 // Must be a constant.
2419 if (!CE) return false;
2420 return isValidNEONi32vmovImm(Value: ~CE->getValue());
2421 }
2422
2423 bool isNEONi64splat() const {
2424 if (!isImm()) return false;
2425 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
2426 // Must be a constant.
2427 if (!CE) return false;
2428 uint64_t Value = CE->getValue();
2429 // i64 value with each byte being either 0 or 0xff.
2430 for (unsigned i = 0; i < 8; ++i, Value >>= 8)
2431 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
2432 return true;
2433 }
2434
2435 template<int64_t Angle, int64_t Remainder>
2436 bool isComplexRotation() const {
2437 if (!isImm()) return false;
2438
2439 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
2440 if (!CE) return false;
2441 uint64_t Value = CE->getValue();
2442
2443 return (Value % Angle == Remainder && Value <= 270);
2444 }
2445
2446 bool isMVELongShift() const {
2447 if (!isImm()) return false;
2448 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
2449 // Must be a constant.
2450 if (!CE) return false;
2451 uint64_t Value = CE->getValue();
2452 return Value >= 1 && Value <= 32;
2453 }
2454
2455 bool isMveSaturateOp() const {
2456 if (!isImm()) return false;
2457 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
2458 if (!CE) return false;
2459 uint64_t Value = CE->getValue();
2460 return Value == 48 || Value == 64;
2461 }
2462
2463 bool isITCondCodeNoAL() const {
2464 if (!isITCondCode()) return false;
2465 ARMCC::CondCodes CC = getCondCode();
2466 return CC != ARMCC::AL;
2467 }
2468
2469 bool isITCondCodeRestrictedI() const {
2470 if (!isITCondCode())
2471 return false;
2472 ARMCC::CondCodes CC = getCondCode();
2473 return CC == ARMCC::EQ || CC == ARMCC::NE;
2474 }
2475
2476 bool isITCondCodeRestrictedS() const {
2477 if (!isITCondCode())
2478 return false;
2479 ARMCC::CondCodes CC = getCondCode();
2480 return CC == ARMCC::LT || CC == ARMCC::GT || CC == ARMCC::LE ||
2481 CC == ARMCC::GE;
2482 }
2483
2484 bool isITCondCodeRestrictedU() const {
2485 if (!isITCondCode())
2486 return false;
2487 ARMCC::CondCodes CC = getCondCode();
2488 return CC == ARMCC::HS || CC == ARMCC::HI;
2489 }
2490
2491 bool isITCondCodeRestrictedFP() const {
2492 if (!isITCondCode())
2493 return false;
2494 ARMCC::CondCodes CC = getCondCode();
2495 return CC == ARMCC::EQ || CC == ARMCC::NE || CC == ARMCC::LT ||
2496 CC == ARMCC::GT || CC == ARMCC::LE || CC == ARMCC::GE;
2497 }
2498
2499 void setVecListDPair(unsigned int DPair) {
2500 Kind = k_VectorList;
2501 VectorList.RegNum = DPair;
2502 VectorList.Count = 2;
2503 VectorList.isDoubleSpaced = false;
2504 }
2505
2506 void setVecListOneD(unsigned int DReg) {
2507 Kind = k_VectorList;
2508 VectorList.RegNum = DReg;
2509 VectorList.Count = 1;
2510 VectorList.isDoubleSpaced = false;
2511 }
2512
2513 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
2514 // Add as immediates when possible. Null MCExpr = 0.
2515 if (!Expr)
2516 Inst.addOperand(Op: MCOperand::createImm(Val: 0));
2517 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Expr))
2518 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue()));
2519 else
2520 Inst.addOperand(Op: MCOperand::createExpr(Val: Expr));
2521 }
2522
2523 void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const {
2524 assert(N == 1 && "Invalid number of operands!");
2525 addExpr(Inst, Expr: getImm());
2526 }
2527
2528 void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const {
2529 assert(N == 1 && "Invalid number of operands!");
2530 addExpr(Inst, Expr: getImm());
2531 }
2532
2533 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2534 assert(N == 2 && "Invalid number of operands!");
2535 Inst.addOperand(Op: MCOperand::createImm(Val: unsigned(getCondCode())));
2536 unsigned RegNum = getCondCode() == ARMCC::AL ? ARM::NoRegister : ARM::CPSR;
2537 Inst.addOperand(Op: MCOperand::createReg(Reg: RegNum));
2538 }
2539
2540 void addVPTPredNOperands(MCInst &Inst, unsigned N) const {
2541 assert(N == 3 && "Invalid number of operands!");
2542 Inst.addOperand(Op: MCOperand::createImm(Val: unsigned(getVPTPred())));
2543 unsigned RegNum = getVPTPred() == ARMVCC::None ? ARM::NoRegister : ARM::P0;
2544 Inst.addOperand(Op: MCOperand::createReg(Reg: RegNum));
2545 Inst.addOperand(Op: MCOperand::createReg(Reg: 0));
2546 }
2547
2548 void addVPTPredROperands(MCInst &Inst, unsigned N) const {
2549 assert(N == 4 && "Invalid number of operands!");
2550 addVPTPredNOperands(Inst, N: N-1);
2551 MCRegister RegNum;
2552 if (getVPTPred() == ARMVCC::None) {
2553 RegNum = ARM::NoRegister;
2554 } else {
2555 unsigned NextOpIndex = Inst.getNumOperands();
2556 auto &MCID = Parser->getInstrDesc(Opcode: Inst.getOpcode());
2557 int TiedOp = MCID.getOperandConstraint(OpNum: NextOpIndex, Constraint: MCOI::TIED_TO);
2558 assert(TiedOp >= 0 &&
2559 "Inactive register in vpred_r is not tied to an output!");
2560 RegNum = Inst.getOperand(i: TiedOp).getReg();
2561 }
2562 Inst.addOperand(Op: MCOperand::createReg(Reg: RegNum));
2563 }
2564
2565 void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
2566 assert(N == 1 && "Invalid number of operands!");
2567 Inst.addOperand(Op: MCOperand::createImm(Val: getCoproc()));
2568 }
2569
2570 void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
2571 assert(N == 1 && "Invalid number of operands!");
2572 Inst.addOperand(Op: MCOperand::createImm(Val: getCoproc()));
2573 }
2574
2575 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
2576 assert(N == 1 && "Invalid number of operands!");
2577 Inst.addOperand(Op: MCOperand::createImm(Val: CoprocOption.Val));
2578 }
2579
2580 void addITMaskOperands(MCInst &Inst, unsigned N) const {
2581 assert(N == 1 && "Invalid number of operands!");
2582 Inst.addOperand(Op: MCOperand::createImm(Val: ITMask.Mask));
2583 }
2584
2585 void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
2586 assert(N == 1 && "Invalid number of operands!");
2587 Inst.addOperand(Op: MCOperand::createImm(Val: unsigned(getCondCode())));
2588 }
2589
2590 void addITCondCodeInvOperands(MCInst &Inst, unsigned N) const {
2591 assert(N == 1 && "Invalid number of operands!");
2592 Inst.addOperand(Op: MCOperand::createImm(Val: unsigned(ARMCC::getOppositeCondition(CC: getCondCode()))));
2593 }
2594
2595 void addCCOutOperands(MCInst &Inst, unsigned N) const {
2596 assert(N == 1 && "Invalid number of operands!");
2597 Inst.addOperand(Op: MCOperand::createReg(Reg: getReg()));
2598 }
2599
2600 void addRegOperands(MCInst &Inst, unsigned N) const {
2601 assert(N == 1 && "Invalid number of operands!");
2602 Inst.addOperand(Op: MCOperand::createReg(Reg: getReg()));
2603 }
2604
2605 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
2606 assert(N == 3 && "Invalid number of operands!");
2607 assert(isRegShiftedReg() &&
2608 "addRegShiftedRegOperands() on non-RegShiftedReg!");
2609 Inst.addOperand(Op: MCOperand::createReg(Reg: RegShiftedReg.SrcReg));
2610 Inst.addOperand(Op: MCOperand::createReg(Reg: RegShiftedReg.ShiftReg));
2611 Inst.addOperand(Op: MCOperand::createImm(
2612 Val: ARM_AM::getSORegOpc(ShOp: RegShiftedReg.ShiftTy, Imm: RegShiftedReg.ShiftImm)));
2613 }
2614
2615 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
2616 assert(N == 2 && "Invalid number of operands!");
2617 assert(isRegShiftedImm() &&
2618 "addRegShiftedImmOperands() on non-RegShiftedImm!");
2619 Inst.addOperand(Op: MCOperand::createReg(Reg: RegShiftedImm.SrcReg));
2620 // Shift of #32 is encoded as 0 where permitted
2621 unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
2622 Inst.addOperand(Op: MCOperand::createImm(
2623 Val: ARM_AM::getSORegOpc(ShOp: RegShiftedImm.ShiftTy, Imm)));
2624 }
2625
2626 void addShifterImmOperands(MCInst &Inst, unsigned N) const {
2627 assert(N == 1 && "Invalid number of operands!");
2628 Inst.addOperand(Op: MCOperand::createImm(Val: (ShifterImm.isASR << 5) |
2629 ShifterImm.Imm));
2630 }
2631
2632 void addRegListOperands(MCInst &Inst, unsigned N) const {
2633 assert(N == 1 && "Invalid number of operands!");
2634 const SmallVectorImpl<MCRegister> &RegList = getRegList();
2635 for (MCRegister Reg : RegList)
2636 Inst.addOperand(Op: MCOperand::createReg(Reg));
2637 }
2638
2639 void addRegListWithAPSROperands(MCInst &Inst, unsigned N) const {
2640 assert(N == 1 && "Invalid number of operands!");
2641 const SmallVectorImpl<MCRegister> &RegList = getRegList();
2642 for (MCRegister Reg : RegList)
2643 Inst.addOperand(Op: MCOperand::createReg(Reg));
2644 }
2645
2646 void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
2647 addRegListOperands(Inst, N);
2648 }
2649
2650 void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
2651 addRegListOperands(Inst, N);
2652 }
2653
2654 void addFPSRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2655 addRegListOperands(Inst, N);
2656 }
2657
2658 void addFPDRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2659 addRegListOperands(Inst, N);
2660 }
2661
2662 void addRotImmOperands(MCInst &Inst, unsigned N) const {
2663 assert(N == 1 && "Invalid number of operands!");
2664 // Encoded as val>>3. The printer handles display as 8, 16, 24.
2665 Inst.addOperand(Op: MCOperand::createImm(Val: RotImm.Imm >> 3));
2666 }
2667
2668 void addModImmOperands(MCInst &Inst, unsigned N) const {
2669 assert(N == 1 && "Invalid number of operands!");
2670
2671 // Support for fixups (MCFixup)
2672 if (isImm())
2673 return addImmOperands(Inst, N);
2674
2675 Inst.addOperand(Op: MCOperand::createImm(Val: ModImm.Bits | (ModImm.Rot << 7)));
2676 }
2677
2678 void addModImmNotOperands(MCInst &Inst, unsigned N) const {
2679 assert(N == 1 && "Invalid number of operands!");
2680 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2681 uint32_t Enc = ARM_AM::getSOImmVal(Arg: ~CE->getValue());
2682 Inst.addOperand(Op: MCOperand::createImm(Val: Enc));
2683 }
2684
2685 void addModImmNegOperands(MCInst &Inst, unsigned N) const {
2686 assert(N == 1 && "Invalid number of operands!");
2687 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2688 uint32_t Enc = ARM_AM::getSOImmVal(Arg: -CE->getValue());
2689 Inst.addOperand(Op: MCOperand::createImm(Val: Enc));
2690 }
2691
2692 void addThumbModImmNeg8_255Operands(MCInst &Inst, unsigned N) const {
2693 assert(N == 1 && "Invalid number of operands!");
2694 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2695 uint32_t Val = -CE->getValue();
2696 Inst.addOperand(Op: MCOperand::createImm(Val));
2697 }
2698
2699 void addThumbModImmNeg1_7Operands(MCInst &Inst, unsigned N) const {
2700 assert(N == 1 && "Invalid number of operands!");
2701 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2702 uint32_t Val = -CE->getValue();
2703 Inst.addOperand(Op: MCOperand::createImm(Val));
2704 }
2705
2706 void addBitfieldOperands(MCInst &Inst, unsigned N) const {
2707 assert(N == 1 && "Invalid number of operands!");
2708 // Munge the lsb/width into a bitfield mask.
2709 unsigned lsb = Bitfield.LSB;
2710 unsigned width = Bitfield.Width;
2711 // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
2712 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
2713 (32 - (lsb + width)));
2714 Inst.addOperand(Op: MCOperand::createImm(Val: Mask));
2715 }
2716
2717 void addImmOperands(MCInst &Inst, unsigned N) const {
2718 assert(N == 1 && "Invalid number of operands!");
2719 addExpr(Inst, Expr: getImm());
2720 }
2721
2722 void addFBits16Operands(MCInst &Inst, unsigned N) const {
2723 assert(N == 1 && "Invalid number of operands!");
2724 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2725 Inst.addOperand(Op: MCOperand::createImm(Val: 16 - CE->getValue()));
2726 }
2727
2728 void addFBits32Operands(MCInst &Inst, unsigned N) const {
2729 assert(N == 1 && "Invalid number of operands!");
2730 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2731 Inst.addOperand(Op: MCOperand::createImm(Val: 32 - CE->getValue()));
2732 }
2733
2734 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2735 assert(N == 1 && "Invalid number of operands!");
2736 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2737 int Val = ARM_AM::getFP32Imm(Imm: APInt(32, CE->getValue()));
2738 Inst.addOperand(Op: MCOperand::createImm(Val));
2739 }
2740
2741 void addImm8s4Operands(MCInst &Inst, unsigned N) const {
2742 assert(N == 1 && "Invalid number of operands!");
2743 // FIXME: We really want to scale the value here, but the LDRD/STRD
2744 // instruction don't encode operands that way yet.
2745 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2746 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue()));
2747 }
2748
2749 void addImm7s4Operands(MCInst &Inst, unsigned N) const {
2750 assert(N == 1 && "Invalid number of operands!");
2751 // FIXME: We really want to scale the value here, but the VSTR/VLDR_VSYSR
2752 // instruction don't encode operands that way yet.
2753 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2754 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue()));
2755 }
2756
2757 void addImm7Shift0Operands(MCInst &Inst, unsigned N) const {
2758 assert(N == 1 && "Invalid number of operands!");
2759 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2760 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue()));
2761 }
2762
2763 void addImm7Shift1Operands(MCInst &Inst, unsigned N) const {
2764 assert(N == 1 && "Invalid number of operands!");
2765 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2766 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue()));
2767 }
2768
2769 void addImm7Shift2Operands(MCInst &Inst, unsigned N) const {
2770 assert(N == 1 && "Invalid number of operands!");
2771 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2772 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue()));
2773 }
2774
2775 void addImm7Operands(MCInst &Inst, unsigned N) const {
2776 assert(N == 1 && "Invalid number of operands!");
2777 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2778 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue()));
2779 }
2780
2781 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
2782 assert(N == 1 && "Invalid number of operands!");
2783 // The immediate is scaled by four in the encoding and is stored
2784 // in the MCInst as such. Lop off the low two bits here.
2785 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2786 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue() / 4));
2787 }
2788
2789 void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
2790 assert(N == 1 && "Invalid number of operands!");
2791 // The immediate is scaled by four in the encoding and is stored
2792 // in the MCInst as such. Lop off the low two bits here.
2793 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2794 Inst.addOperand(Op: MCOperand::createImm(Val: -(CE->getValue() / 4)));
2795 }
2796
2797 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
2798 assert(N == 1 && "Invalid number of operands!");
2799 // The immediate is scaled by four in the encoding and is stored
2800 // in the MCInst as such. Lop off the low two bits here.
2801 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2802 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue() / 4));
2803 }
2804
2805 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
2806 assert(N == 1 && "Invalid number of operands!");
2807 // The constant encodes as the immediate-1, and we store in the instruction
2808 // the bits as encoded, so subtract off one here.
2809 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2810 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue() - 1));
2811 }
2812
2813 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
2814 assert(N == 1 && "Invalid number of operands!");
2815 // The constant encodes as the immediate-1, and we store in the instruction
2816 // the bits as encoded, so subtract off one here.
2817 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2818 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue() - 1));
2819 }
2820
2821 void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
2822 assert(N == 1 && "Invalid number of operands!");
2823 // The constant encodes as the immediate, except for 32, which encodes as
2824 // zero.
2825 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2826 unsigned Imm = CE->getValue();
2827 Inst.addOperand(Op: MCOperand::createImm(Val: (Imm == 32 ? 0 : Imm)));
2828 }
2829
2830 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
2831 assert(N == 1 && "Invalid number of operands!");
2832 // An ASR value of 32 encodes as 0, so that's how we want to add it to
2833 // the instruction as well.
2834 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2835 int Val = CE->getValue();
2836 Inst.addOperand(Op: MCOperand::createImm(Val: Val == 32 ? 0 : Val));
2837 }
2838
2839 void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
2840 assert(N == 1 && "Invalid number of operands!");
2841 // The operand is actually a t2_so_imm, but we have its bitwise
2842 // negation in the assembly source, so twiddle it here.
2843 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2844 Inst.addOperand(Op: MCOperand::createImm(Val: ~(uint32_t)CE->getValue()));
2845 }
2846
2847 void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
2848 assert(N == 1 && "Invalid number of operands!");
2849 // The operand is actually a t2_so_imm, but we have its
2850 // negation in the assembly source, so twiddle it here.
2851 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2852 Inst.addOperand(Op: MCOperand::createImm(Val: -(uint32_t)CE->getValue()));
2853 }
2854
2855 void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
2856 assert(N == 1 && "Invalid number of operands!");
2857 // The operand is actually an imm0_4095, but we have its
2858 // negation in the assembly source, so twiddle it here.
2859 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2860 Inst.addOperand(Op: MCOperand::createImm(Val: -(uint32_t)CE->getValue()));
2861 }
2862
2863 void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
2864 if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm())) {
2865 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue() >> 2));
2866 return;
2867 }
2868 const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Val: Imm.Val);
2869 Inst.addOperand(Op: MCOperand::createExpr(Val: SR));
2870 }
2871
2872 void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
2873 assert(N == 1 && "Invalid number of operands!");
2874 if (isImm()) {
2875 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
2876 if (CE) {
2877 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue()));
2878 return;
2879 }
2880 const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Val: Imm.Val);
2881 Inst.addOperand(Op: MCOperand::createExpr(Val: SR));
2882 return;
2883 }
2884
2885 assert(isGPRMem() && "Unknown value type!");
2886 assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
2887 if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm))
2888 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue()));
2889 else
2890 Inst.addOperand(Op: MCOperand::createExpr(Val: Memory.OffsetImm));
2891 }
2892
2893 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
2894 assert(N == 1 && "Invalid number of operands!");
2895 Inst.addOperand(Op: MCOperand::createImm(Val: unsigned(getMemBarrierOpt())));
2896 }
2897
2898 void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2899 assert(N == 1 && "Invalid number of operands!");
2900 Inst.addOperand(Op: MCOperand::createImm(Val: unsigned(getInstSyncBarrierOpt())));
2901 }
2902
2903 void addTraceSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2904 assert(N == 1 && "Invalid number of operands!");
2905 Inst.addOperand(Op: MCOperand::createImm(Val: unsigned(getTraceSyncBarrierOpt())));
2906 }
2907
2908 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
2909 assert(N == 1 && "Invalid number of operands!");
2910 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.BaseRegNum));
2911 }
2912
2913 void addMemNoOffsetT2Operands(MCInst &Inst, unsigned N) const {
2914 assert(N == 1 && "Invalid number of operands!");
2915 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.BaseRegNum));
2916 }
2917
2918 void addMemNoOffsetT2NoSpOperands(MCInst &Inst, unsigned N) const {
2919 assert(N == 1 && "Invalid number of operands!");
2920 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.BaseRegNum));
2921 }
2922
2923 void addMemNoOffsetTOperands(MCInst &Inst, unsigned N) const {
2924 assert(N == 1 && "Invalid number of operands!");
2925 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.BaseRegNum));
2926 }
2927
2928 void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
2929 assert(N == 1 && "Invalid number of operands!");
2930 if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm))
2931 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue()));
2932 else
2933 Inst.addOperand(Op: MCOperand::createExpr(Val: Memory.OffsetImm));
2934 }
2935
2936 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2937 assert(N == 1 && "Invalid number of operands!");
2938 assert(isImm() && "Not an immediate!");
2939
2940 // If we have an immediate that's not a constant, treat it as a label
2941 // reference needing a fixup.
2942 if (!isa<MCConstantExpr>(Val: getImm())) {
2943 Inst.addOperand(Op: MCOperand::createExpr(Val: getImm()));
2944 return;
2945 }
2946
2947 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
2948 int Val = CE->getValue();
2949 Inst.addOperand(Op: MCOperand::createImm(Val));
2950 }
2951
2952 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
2953 assert(N == 2 && "Invalid number of operands!");
2954 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.BaseRegNum));
2955 Inst.addOperand(Op: MCOperand::createImm(Val: Memory.Alignment));
2956 }
2957
2958 void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2959 addAlignedMemoryOperands(Inst, N);
2960 }
2961
2962 void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2963 addAlignedMemoryOperands(Inst, N);
2964 }
2965
2966 void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2967 addAlignedMemoryOperands(Inst, N);
2968 }
2969
2970 void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2971 addAlignedMemoryOperands(Inst, N);
2972 }
2973
2974 void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2975 addAlignedMemoryOperands(Inst, N);
2976 }
2977
2978 void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2979 addAlignedMemoryOperands(Inst, N);
2980 }
2981
2982 void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2983 addAlignedMemoryOperands(Inst, N);
2984 }
2985
2986 void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2987 addAlignedMemoryOperands(Inst, N);
2988 }
2989
2990 void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2991 addAlignedMemoryOperands(Inst, N);
2992 }
2993
2994 void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2995 addAlignedMemoryOperands(Inst, N);
2996 }
2997
2998 void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
2999 addAlignedMemoryOperands(Inst, N);
3000 }
3001
3002 void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
3003 assert(N == 3 && "Invalid number of operands!");
3004 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.BaseRegNum));
3005 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.OffsetRegNum));
3006 if (!Memory.OffsetRegNum) {
3007 if (!Memory.OffsetImm)
3008 Inst.addOperand(Op: MCOperand::createImm(Val: 0));
3009 else if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm)) {
3010 int32_t Val = CE->getValue();
3011 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3012 // Special case for #-0
3013 if (Val == std::numeric_limits<int32_t>::min())
3014 Val = 0;
3015 if (Val < 0)
3016 Val = -Val;
3017 Val = ARM_AM::getAM2Opc(Opc: AddSub, Imm12: Val, SO: ARM_AM::no_shift);
3018 Inst.addOperand(Op: MCOperand::createImm(Val));
3019 } else
3020 Inst.addOperand(Op: MCOperand::createExpr(Val: Memory.OffsetImm));
3021 } else {
3022 // For register offset, we encode the shift type and negation flag
3023 // here.
3024 int32_t Val =
3025 ARM_AM::getAM2Opc(Opc: Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
3026 Imm12: Memory.ShiftImm, SO: Memory.ShiftType);
3027 Inst.addOperand(Op: MCOperand::createImm(Val));
3028 }
3029 }
3030
3031 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
3032 assert(N == 2 && "Invalid number of operands!");
3033 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
3034 assert(CE && "non-constant AM2OffsetImm operand!");
3035 int32_t Val = CE->getValue();
3036 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3037 // Special case for #-0
3038 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
3039 if (Val < 0) Val = -Val;
3040 Val = ARM_AM::getAM2Opc(Opc: AddSub, Imm12: Val, SO: ARM_AM::no_shift);
3041 Inst.addOperand(Op: MCOperand::createReg(Reg: 0));
3042 Inst.addOperand(Op: MCOperand::createImm(Val));
3043 }
3044
3045 void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
3046 assert(N == 3 && "Invalid number of operands!");
3047 // If we have an immediate that's not a constant, treat it as a label
3048 // reference needing a fixup. If it is a constant, it's something else
3049 // and we reject it.
3050 if (isImm()) {
3051 Inst.addOperand(Op: MCOperand::createExpr(Val: getImm()));
3052 Inst.addOperand(Op: MCOperand::createReg(Reg: 0));
3053 Inst.addOperand(Op: MCOperand::createImm(Val: 0));
3054 return;
3055 }
3056
3057 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.BaseRegNum));
3058 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.OffsetRegNum));
3059 if (!Memory.OffsetRegNum) {
3060 if (!Memory.OffsetImm)
3061 Inst.addOperand(Op: MCOperand::createImm(Val: 0));
3062 else if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm)) {
3063 int32_t Val = CE->getValue();
3064 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3065 // Special case for #-0
3066 if (Val == std::numeric_limits<int32_t>::min())
3067 Val = 0;
3068 if (Val < 0)
3069 Val = -Val;
3070 Val = ARM_AM::getAM3Opc(Opc: AddSub, Offset: Val);
3071 Inst.addOperand(Op: MCOperand::createImm(Val));
3072 } else
3073 Inst.addOperand(Op: MCOperand::createExpr(Val: Memory.OffsetImm));
3074 } else {
3075 // For register offset, we encode the shift type and negation flag
3076 // here.
3077 int32_t Val =
3078 ARM_AM::getAM3Opc(Opc: Memory.isNegative ? ARM_AM::sub : ARM_AM::add, Offset: 0);
3079 Inst.addOperand(Op: MCOperand::createImm(Val));
3080 }
3081 }
3082
3083 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
3084 assert(N == 2 && "Invalid number of operands!");
3085 if (Kind == k_PostIndexRegister) {
3086 int32_t Val =
3087 ARM_AM::getAM3Opc(Opc: PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, Offset: 0);
3088 Inst.addOperand(Op: MCOperand::createReg(Reg: PostIdxReg.RegNum));
3089 Inst.addOperand(Op: MCOperand::createImm(Val));
3090 return;
3091 }
3092
3093 // Constant offset.
3094 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
3095 int32_t Val = CE->getValue();
3096 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3097 // Special case for #-0
3098 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
3099 if (Val < 0) Val = -Val;
3100 Val = ARM_AM::getAM3Opc(Opc: AddSub, Offset: Val);
3101 Inst.addOperand(Op: MCOperand::createReg(Reg: 0));
3102 Inst.addOperand(Op: MCOperand::createImm(Val));
3103 }
3104
3105 void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
3106 assert(N == 2 && "Invalid number of operands!");
3107 // If we have an immediate that's not a constant, treat it as a label
3108 // reference needing a fixup. If it is a constant, it's something else
3109 // and we reject it.
3110 if (isImm()) {
3111 Inst.addOperand(Op: MCOperand::createExpr(Val: getImm()));
3112 Inst.addOperand(Op: MCOperand::createImm(Val: 0));
3113 return;
3114 }
3115
3116 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.BaseRegNum));
3117 if (!Memory.OffsetImm)
3118 Inst.addOperand(Op: MCOperand::createImm(Val: 0));
3119 else if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm)) {
3120 // The lower two bits are always zero and as such are not encoded.
3121 int32_t Val = CE->getValue() / 4;
3122 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3123 // Special case for #-0
3124 if (Val == std::numeric_limits<int32_t>::min())
3125 Val = 0;
3126 if (Val < 0)
3127 Val = -Val;
3128 Val = ARM_AM::getAM5Opc(Opc: AddSub, Offset: Val);
3129 Inst.addOperand(Op: MCOperand::createImm(Val));
3130 } else
3131 Inst.addOperand(Op: MCOperand::createExpr(Val: Memory.OffsetImm));
3132 }
3133
3134 void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const {
3135 assert(N == 2 && "Invalid number of operands!");
3136 // If we have an immediate that's not a constant, treat it as a label
3137 // reference needing a fixup. If it is a constant, it's something else
3138 // and we reject it.
3139 if (isImm()) {
3140 Inst.addOperand(Op: MCOperand::createExpr(Val: getImm()));
3141 Inst.addOperand(Op: MCOperand::createImm(Val: 0));
3142 return;
3143 }
3144
3145 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.BaseRegNum));
3146 // The lower bit is always zero and as such is not encoded.
3147 if (!Memory.OffsetImm)
3148 Inst.addOperand(Op: MCOperand::createImm(Val: 0));
3149 else if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm)) {
3150 int32_t Val = CE->getValue() / 2;
3151 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3152 // Special case for #-0
3153 if (Val == std::numeric_limits<int32_t>::min())
3154 Val = 0;
3155 if (Val < 0)
3156 Val = -Val;
3157 Val = ARM_AM::getAM5FP16Opc(Opc: AddSub, Offset: Val);
3158 Inst.addOperand(Op: MCOperand::createImm(Val));
3159 } else
3160 Inst.addOperand(Op: MCOperand::createExpr(Val: Memory.OffsetImm));
3161 }
3162
3163 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
3164 assert(N == 2 && "Invalid number of operands!");
3165 // If we have an immediate that's not a constant, treat it as a label
3166 // reference needing a fixup. If it is a constant, it's something else
3167 // and we reject it.
3168 if (isImm()) {
3169 Inst.addOperand(Op: MCOperand::createExpr(Val: getImm()));
3170 Inst.addOperand(Op: MCOperand::createImm(Val: 0));
3171 return;
3172 }
3173
3174 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.BaseRegNum));
3175 addExpr(Inst, Expr: Memory.OffsetImm);
3176 }
3177
3178 void addMemImm7s4OffsetOperands(MCInst &Inst, unsigned N) const {
3179 assert(N == 2 && "Invalid number of operands!");
3180 // If we have an immediate that's not a constant, treat it as a label
3181 // reference needing a fixup. If it is a constant, it's something else
3182 // and we reject it.
3183 if (isImm()) {
3184 Inst.addOperand(Op: MCOperand::createExpr(Val: getImm()));
3185 Inst.addOperand(Op: MCOperand::createImm(Val: 0));
3186 return;
3187 }
3188
3189 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.BaseRegNum));
3190 addExpr(Inst, Expr: Memory.OffsetImm);
3191 }
3192
3193 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
3194 assert(N == 2 && "Invalid number of operands!");
3195 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.BaseRegNum));
3196 if (!Memory.OffsetImm)
3197 Inst.addOperand(Op: MCOperand::createImm(Val: 0));
3198 else if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm))
3199 // The lower two bits are always zero and as such are not encoded.
3200 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue() / 4));
3201 else
3202 Inst.addOperand(Op: MCOperand::createExpr(Val: Memory.OffsetImm));
3203 }
3204
3205 void addMemImmOffsetOperands(MCInst &Inst, unsigned N) const {
3206 assert(N == 2 && "Invalid number of operands!");
3207 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.BaseRegNum));
3208 addExpr(Inst, Expr: Memory.OffsetImm);
3209 }
3210
3211 void addMemRegRQOffsetOperands(MCInst &Inst, unsigned N) const {
3212 assert(N == 2 && "Invalid number of operands!");
3213 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.BaseRegNum));
3214 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.OffsetRegNum));
3215 }
3216
3217 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3218 assert(N == 2 && "Invalid number of operands!");
3219 // If this is an immediate, it's a label reference.
3220 if (isImm()) {
3221 addExpr(Inst, Expr: getImm());
3222 Inst.addOperand(Op: MCOperand::createImm(Val: 0));
3223 return;
3224 }
3225
3226 // Otherwise, it's a normal memory reg+offset.
3227 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.BaseRegNum));
3228 addExpr(Inst, Expr: Memory.OffsetImm);
3229 }
3230
3231 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3232 assert(N == 2 && "Invalid number of operands!");
3233 // If this is an immediate, it's a label reference.
3234 if (isImm()) {
3235 addExpr(Inst, Expr: getImm());
3236 Inst.addOperand(Op: MCOperand::createImm(Val: 0));
3237 return;
3238 }
3239
3240 // Otherwise, it's a normal memory reg+offset.
3241 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.BaseRegNum));
3242 addExpr(Inst, Expr: Memory.OffsetImm);
3243 }
3244
3245 void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const {
3246 assert(N == 1 && "Invalid number of operands!");
3247 // This is container for the immediate that we will create the constant
3248 // pool from
3249 addExpr(Inst, Expr: getConstantPoolImm());
3250 }
3251
3252 void addMemTBBOperands(MCInst &Inst, unsigned N) const {
3253 assert(N == 2 && "Invalid number of operands!");
3254 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.BaseRegNum));
3255 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.OffsetRegNum));
3256 }
3257
3258 void addMemTBHOperands(MCInst &Inst, unsigned N) const {
3259 assert(N == 2 && "Invalid number of operands!");
3260 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.BaseRegNum));
3261 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.OffsetRegNum));
3262 }
3263
3264 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3265 assert(N == 3 && "Invalid number of operands!");
3266 unsigned Val =
3267 ARM_AM::getAM2Opc(Opc: Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
3268 Imm12: Memory.ShiftImm, SO: Memory.ShiftType);
3269 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.BaseRegNum));
3270 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.OffsetRegNum));
3271 Inst.addOperand(Op: MCOperand::createImm(Val));
3272 }
3273
3274 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3275 assert(N == 3 && "Invalid number of operands!");
3276 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.BaseRegNum));
3277 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.OffsetRegNum));
3278 Inst.addOperand(Op: MCOperand::createImm(Val: Memory.ShiftImm));
3279 }
3280
3281 void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
3282 assert(N == 2 && "Invalid number of operands!");
3283 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.BaseRegNum));
3284 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.OffsetRegNum));
3285 }
3286
3287 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
3288 assert(N == 2 && "Invalid number of operands!");
3289 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.BaseRegNum));
3290 if (!Memory.OffsetImm)
3291 Inst.addOperand(Op: MCOperand::createImm(Val: 0));
3292 else if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm))
3293 // The lower two bits are always zero and as such are not encoded.
3294 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue() / 4));
3295 else
3296 Inst.addOperand(Op: MCOperand::createExpr(Val: Memory.OffsetImm));
3297 }
3298
3299 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
3300 assert(N == 2 && "Invalid number of operands!");
3301 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.BaseRegNum));
3302 if (!Memory.OffsetImm)
3303 Inst.addOperand(Op: MCOperand::createImm(Val: 0));
3304 else if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm))
3305 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue() / 2));
3306 else
3307 Inst.addOperand(Op: MCOperand::createExpr(Val: Memory.OffsetImm));
3308 }
3309
3310 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
3311 assert(N == 2 && "Invalid number of operands!");
3312 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.BaseRegNum));
3313 addExpr(Inst, Expr: Memory.OffsetImm);
3314 }
3315
3316 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
3317 assert(N == 2 && "Invalid number of operands!");
3318 Inst.addOperand(Op: MCOperand::createReg(Reg: Memory.BaseRegNum));
3319 if (!Memory.OffsetImm)
3320 Inst.addOperand(Op: MCOperand::createImm(Val: 0));
3321 else if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Memory.OffsetImm))
3322 // The lower two bits are always zero and as such are not encoded.
3323 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue() / 4));
3324 else
3325 Inst.addOperand(Op: MCOperand::createExpr(Val: Memory.OffsetImm));
3326 }
3327
3328 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
3329 assert(N == 1 && "Invalid number of operands!");
3330 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
3331 assert(CE && "non-constant post-idx-imm8 operand!");
3332 int Imm = CE->getValue();
3333 bool isAdd = Imm >= 0;
3334 if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
3335 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
3336 Inst.addOperand(Op: MCOperand::createImm(Val: Imm));
3337 }
3338
3339 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
3340 assert(N == 1 && "Invalid number of operands!");
3341 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
3342 assert(CE && "non-constant post-idx-imm8s4 operand!");
3343 int Imm = CE->getValue();
3344 bool isAdd = Imm >= 0;
3345 if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
3346 // Immediate is scaled by 4.
3347 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
3348 Inst.addOperand(Op: MCOperand::createImm(Val: Imm));
3349 }
3350
3351 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
3352 assert(N == 2 && "Invalid number of operands!");
3353 Inst.addOperand(Op: MCOperand::createReg(Reg: PostIdxReg.RegNum));
3354 Inst.addOperand(Op: MCOperand::createImm(Val: PostIdxReg.isAdd));
3355 }
3356
3357 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
3358 assert(N == 2 && "Invalid number of operands!");
3359 Inst.addOperand(Op: MCOperand::createReg(Reg: PostIdxReg.RegNum));
3360 // The sign, shift type, and shift amount are encoded in a single operand
3361 // using the AM2 encoding helpers.
3362 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
3363 unsigned Imm = ARM_AM::getAM2Opc(Opc: opc, Imm12: PostIdxReg.ShiftImm,
3364 SO: PostIdxReg.ShiftTy);
3365 Inst.addOperand(Op: MCOperand::createImm(Val: Imm));
3366 }
3367
3368 void addPowerTwoOperands(MCInst &Inst, unsigned N) const {
3369 assert(N == 1 && "Invalid number of operands!");
3370 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
3371 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue()));
3372 }
3373
3374 void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
3375 assert(N == 1 && "Invalid number of operands!");
3376 Inst.addOperand(Op: MCOperand::createImm(Val: unsigned(getMSRMask())));
3377 }
3378
3379 void addBankedRegOperands(MCInst &Inst, unsigned N) const {
3380 assert(N == 1 && "Invalid number of operands!");
3381 Inst.addOperand(Op: MCOperand::createImm(Val: unsigned(getBankedReg())));
3382 }
3383
3384 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
3385 assert(N == 1 && "Invalid number of operands!");
3386 Inst.addOperand(Op: MCOperand::createImm(Val: unsigned(getProcIFlags())));
3387 }
3388
3389 void addVecListOperands(MCInst &Inst, unsigned N) const {
3390 assert(N == 1 && "Invalid number of operands!");
3391
3392 if (isAnyVectorList())
3393 Inst.addOperand(Op: MCOperand::createReg(Reg: VectorList.RegNum));
3394 else if (isDReg() && !Parser->hasMVE()) {
3395 Inst.addOperand(Op: MCOperand::createReg(Reg: Reg.RegNum));
3396 } else if (isQReg() && !Parser->hasMVE()) {
3397 MCRegister DPair = Parser->getDRegFromQReg(QReg: Reg.RegNum);
3398 DPair = Parser->getMRI()->getMatchingSuperReg(
3399 Reg: DPair, SubIdx: ARM::dsub_0, RC: &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3400 Inst.addOperand(Op: MCOperand::createReg(Reg: DPair));
3401 } else {
3402 LLVM_DEBUG(dbgs() << "TYPE: " << Kind << "\n");
3403 llvm_unreachable(
3404 "attempted to add a vector list register with wrong type!");
3405 }
3406 }
3407
3408 void addMVEVecListOperands(MCInst &Inst, unsigned N) const {
3409 assert(N == 1 && "Invalid number of operands!");
3410
3411 // When we come here, the VectorList field will identify a range
3412 // of q-registers by its base register and length, and it will
3413 // have already been error-checked to be the expected length of
3414 // range and contain only q-regs in the range q0-q7. So we can
3415 // count on the base register being in the range q0-q6 (for 2
3416 // regs) or q0-q4 (for 4)
3417 //
3418 // The MVE instructions taking a register range of this kind will
3419 // need an operand in the MQQPR or MQQQQPR class, representing the
3420 // entire range as a unit. So we must translate into that class,
3421 // by finding the index of the base register in the MQPR reg
3422 // class, and returning the super-register at the corresponding
3423 // index in the target class.
3424
3425 const MCRegisterClass *RC_in = &ARMMCRegisterClasses[ARM::MQPRRegClassID];
3426 const MCRegisterClass *RC_out =
3427 (VectorList.Count == 2) ? &ARMMCRegisterClasses[ARM::MQQPRRegClassID]
3428 : &ARMMCRegisterClasses[ARM::MQQQQPRRegClassID];
3429
3430 unsigned I, E = RC_out->getNumRegs();
3431 for (I = 0; I < E; I++)
3432 if (RC_in->getRegister(i: I) == VectorList.RegNum)
3433 break;
3434 assert(I < E && "Invalid vector list start register!");
3435
3436 Inst.addOperand(Op: MCOperand::createReg(Reg: RC_out->getRegister(i: I)));
3437 }
3438
3439 void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
3440 assert(N == 2 && "Invalid number of operands!");
3441 Inst.addOperand(Op: MCOperand::createReg(Reg: VectorList.RegNum));
3442 Inst.addOperand(Op: MCOperand::createImm(Val: VectorList.LaneIndex));
3443 }
3444
3445 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
3446 assert(N == 1 && "Invalid number of operands!");
3447 Inst.addOperand(Op: MCOperand::createImm(Val: getVectorIndex()));
3448 }
3449
3450 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
3451 assert(N == 1 && "Invalid number of operands!");
3452 Inst.addOperand(Op: MCOperand::createImm(Val: getVectorIndex()));
3453 }
3454
3455 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
3456 assert(N == 1 && "Invalid number of operands!");
3457 Inst.addOperand(Op: MCOperand::createImm(Val: getVectorIndex()));
3458 }
3459
3460 void addVectorIndex64Operands(MCInst &Inst, unsigned N) const {
3461 assert(N == 1 && "Invalid number of operands!");
3462 Inst.addOperand(Op: MCOperand::createImm(Val: getVectorIndex()));
3463 }
3464
3465 void addMVEVectorIndexOperands(MCInst &Inst, unsigned N) const {
3466 assert(N == 1 && "Invalid number of operands!");
3467 Inst.addOperand(Op: MCOperand::createImm(Val: getVectorIndex()));
3468 }
3469
3470 void addMVEPairVectorIndexOperands(MCInst &Inst, unsigned N) const {
3471 assert(N == 1 && "Invalid number of operands!");
3472 Inst.addOperand(Op: MCOperand::createImm(Val: getVectorIndex()));
3473 }
3474
3475 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
3476 assert(N == 1 && "Invalid number of operands!");
3477 // The immediate encodes the type of constant as well as the value.
3478 // Mask in that this is an i8 splat.
3479 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
3480 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue() | 0xe00));
3481 }
3482
3483 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
3484 assert(N == 1 && "Invalid number of operands!");
3485 // The immediate encodes the type of constant as well as the value.
3486 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
3487 unsigned Value = CE->getValue();
3488 Value = ARM_AM::encodeNEONi16splat(Value);
3489 Inst.addOperand(Op: MCOperand::createImm(Val: Value));
3490 }
3491
3492 void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const {
3493 assert(N == 1 && "Invalid number of operands!");
3494 // The immediate encodes the type of constant as well as the value.
3495 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
3496 unsigned Value = CE->getValue();
3497 Value = ARM_AM::encodeNEONi16splat(Value: ~Value & 0xffff);
3498 Inst.addOperand(Op: MCOperand::createImm(Val: Value));
3499 }
3500
3501 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
3502 assert(N == 1 && "Invalid number of operands!");
3503 // The immediate encodes the type of constant as well as the value.
3504 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
3505 unsigned Value = CE->getValue();
3506 Value = ARM_AM::encodeNEONi32splat(Value);
3507 Inst.addOperand(Op: MCOperand::createImm(Val: Value));
3508 }
3509
3510 void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const {
3511 assert(N == 1 && "Invalid number of operands!");
3512 // The immediate encodes the type of constant as well as the value.
3513 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
3514 unsigned Value = CE->getValue();
3515 Value = ARM_AM::encodeNEONi32splat(Value: ~Value);
3516 Inst.addOperand(Op: MCOperand::createImm(Val: Value));
3517 }
3518
3519 void addNEONi8ReplicateOperands(MCInst &Inst, bool Inv) const {
3520 // The immediate encodes the type of constant as well as the value.
3521 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
3522 assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
3523 Inst.getOpcode() == ARM::VMOVv16i8) &&
3524 "All instructions that wants to replicate non-zero byte "
3525 "always must be replaced with VMOVv8i8 or VMOVv16i8.");
3526 unsigned Value = CE->getValue();
3527 if (Inv)
3528 Value = ~Value;
3529 unsigned B = Value & 0xff;
3530 B |= 0xe00; // cmode = 0b1110
3531 Inst.addOperand(Op: MCOperand::createImm(Val: B));
3532 }
3533
3534 void addNEONinvi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3535 assert(N == 1 && "Invalid number of operands!");
3536 addNEONi8ReplicateOperands(Inst, Inv: true);
3537 }
3538
3539 static unsigned encodeNeonVMOVImmediate(unsigned Value) {
3540 if (Value >= 256 && Value <= 0xffff)
3541 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
3542 else if (Value > 0xffff && Value <= 0xffffff)
3543 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
3544 else if (Value > 0xffffff)
3545 Value = (Value >> 24) | 0x600;
3546 return Value;
3547 }
3548
3549 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
3550 assert(N == 1 && "Invalid number of operands!");
3551 // The immediate encodes the type of constant as well as the value.
3552 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
3553 unsigned Value = encodeNeonVMOVImmediate(Value: CE->getValue());
3554 Inst.addOperand(Op: MCOperand::createImm(Val: Value));
3555 }
3556
3557 void addNEONvmovi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3558 assert(N == 1 && "Invalid number of operands!");
3559 addNEONi8ReplicateOperands(Inst, Inv: false);
3560 }
3561
3562 void addNEONvmovi16ReplicateOperands(MCInst &Inst, unsigned N) const {
3563 assert(N == 1 && "Invalid number of operands!");
3564 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
3565 assert((Inst.getOpcode() == ARM::VMOVv4i16 ||
3566 Inst.getOpcode() == ARM::VMOVv8i16 ||
3567 Inst.getOpcode() == ARM::VMVNv4i16 ||
3568 Inst.getOpcode() == ARM::VMVNv8i16) &&
3569 "All instructions that want to replicate non-zero half-word "
3570 "always must be replaced with V{MOV,MVN}v{4,8}i16.");
3571 uint64_t Value = CE->getValue();
3572 unsigned Elem = Value & 0xffff;
3573 if (Elem >= 256)
3574 Elem = (Elem >> 8) | 0x200;
3575 Inst.addOperand(Op: MCOperand::createImm(Val: Elem));
3576 }
3577
3578 void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
3579 assert(N == 1 && "Invalid number of operands!");
3580 // The immediate encodes the type of constant as well as the value.
3581 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
3582 unsigned Value = encodeNeonVMOVImmediate(Value: ~CE->getValue());
3583 Inst.addOperand(Op: MCOperand::createImm(Val: Value));
3584 }
3585
3586 void addNEONvmovi32ReplicateOperands(MCInst &Inst, unsigned N) const {
3587 assert(N == 1 && "Invalid number of operands!");
3588 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
3589 assert((Inst.getOpcode() == ARM::VMOVv2i32 ||
3590 Inst.getOpcode() == ARM::VMOVv4i32 ||
3591 Inst.getOpcode() == ARM::VMVNv2i32 ||
3592 Inst.getOpcode() == ARM::VMVNv4i32) &&
3593 "All instructions that want to replicate non-zero word "
3594 "always must be replaced with V{MOV,MVN}v{2,4}i32.");
3595 uint64_t Value = CE->getValue();
3596 unsigned Elem = encodeNeonVMOVImmediate(Value: Value & 0xffffffff);
3597 Inst.addOperand(Op: MCOperand::createImm(Val: Elem));
3598 }
3599
3600 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
3601 assert(N == 1 && "Invalid number of operands!");
3602 // The immediate encodes the type of constant as well as the value.
3603 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
3604 uint64_t Value = CE->getValue();
3605 unsigned Imm = 0;
3606 for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
3607 Imm |= (Value & 1) << i;
3608 }
3609 Inst.addOperand(Op: MCOperand::createImm(Val: Imm | 0x1e00));
3610 }
3611
3612 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
3613 assert(N == 1 && "Invalid number of operands!");
3614 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
3615 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue() / 90));
3616 }
3617
3618 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
3619 assert(N == 1 && "Invalid number of operands!");
3620 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
3621 Inst.addOperand(Op: MCOperand::createImm(Val: (CE->getValue() - 90) / 180));
3622 }
3623
3624 void addMveSaturateOperands(MCInst &Inst, unsigned N) const {
3625 assert(N == 1 && "Invalid number of operands!");
3626 const MCConstantExpr *CE = cast<MCConstantExpr>(Val: getImm());
3627 unsigned Imm = CE->getValue();
3628 assert((Imm == 48 || Imm == 64) && "Invalid saturate operand");
3629 Inst.addOperand(Op: MCOperand::createImm(Val: Imm == 48 ? 1 : 0));
3630 }
3631
3632 void print(raw_ostream &OS, const MCAsmInfo &MAI) const override;
3633
3634 static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S,
3635 ARMAsmParser &Parser) {
3636 auto Op = std::make_unique<ARMOperand>(args: k_ITCondMask, args&: Parser);
3637 Op->ITMask.Mask = Mask;
3638 Op->StartLoc = S;
3639 Op->EndLoc = S;
3640 return Op;
3641 }
3642
3643 static std::unique_ptr<ARMOperand>
3644 CreateCondCode(ARMCC::CondCodes CC, SMLoc S, ARMAsmParser &Parser) {
3645 auto Op = std::make_unique<ARMOperand>(args: k_CondCode, args&: Parser);
3646 Op->CC.Val = CC;
3647 Op->StartLoc = S;
3648 Op->EndLoc = S;
3649 return Op;
3650 }
3651
3652 static std::unique_ptr<ARMOperand> CreateVPTPred(ARMVCC::VPTCodes CC, SMLoc S,
3653 ARMAsmParser &Parser) {
3654 auto Op = std::make_unique<ARMOperand>(args: k_VPTPred, args&: Parser);
3655 Op->VCC.Val = CC;
3656 Op->StartLoc = S;
3657 Op->EndLoc = S;
3658 return Op;
3659 }
3660
3661 static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S,
3662 ARMAsmParser &Parser) {
3663 auto Op = std::make_unique<ARMOperand>(args: k_CoprocNum, args&: Parser);
3664 Op->Cop.Val = CopVal;
3665 Op->StartLoc = S;
3666 Op->EndLoc = S;
3667 return Op;
3668 }
3669
3670 static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S,
3671 ARMAsmParser &Parser) {
3672 auto Op = std::make_unique<ARMOperand>(args: k_CoprocReg, args&: Parser);
3673 Op->Cop.Val = CopVal;
3674 Op->StartLoc = S;
3675 Op->EndLoc = S;
3676 return Op;
3677 }
3678
3679 static std::unique_ptr<ARMOperand>
3680 CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E, ARMAsmParser &Parser) {
3681 auto Op = std::make_unique<ARMOperand>(args: k_CoprocOption, args&: Parser);
3682 Op->Cop.Val = Val;
3683 Op->StartLoc = S;
3684 Op->EndLoc = E;
3685 return Op;
3686 }
3687
3688 static std::unique_ptr<ARMOperand> CreateCCOut(MCRegister Reg, SMLoc S,
3689 ARMAsmParser &Parser) {
3690 auto Op = std::make_unique<ARMOperand>(args: k_CCOut, args&: Parser);
3691 Op->Reg.RegNum = Reg;
3692 Op->StartLoc = S;
3693 Op->EndLoc = S;
3694 return Op;
3695 }
3696
3697 static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S,
3698 ARMAsmParser &Parser) {
3699 auto Op = std::make_unique<ARMOperand>(args: k_Token, args&: Parser);
3700 Op->Tok.Data = Str.data();
3701 Op->Tok.Length = Str.size();
3702 Op->StartLoc = S;
3703 Op->EndLoc = S;
3704 return Op;
3705 }
3706
3707 static std::unique_ptr<ARMOperand> CreateReg(MCRegister Reg, SMLoc S, SMLoc E,
3708 ARMAsmParser &Parser) {
3709 auto Op = std::make_unique<ARMOperand>(args: k_Register, args&: Parser);
3710 Op->Reg.RegNum = Reg;
3711 Op->StartLoc = S;
3712 Op->EndLoc = E;
3713 return Op;
3714 }
3715
3716 static std::unique_ptr<ARMOperand>
3717 CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, MCRegister SrcReg,
3718 MCRegister ShiftReg, unsigned ShiftImm, SMLoc S,
3719 SMLoc E, ARMAsmParser &Parser) {
3720 auto Op = std::make_unique<ARMOperand>(args: k_ShiftedRegister, args&: Parser);
3721 Op->RegShiftedReg.ShiftTy = ShTy;
3722 Op->RegShiftedReg.SrcReg = SrcReg;
3723 Op->RegShiftedReg.ShiftReg = ShiftReg;
3724 Op->RegShiftedReg.ShiftImm = ShiftImm;
3725 Op->StartLoc = S;
3726 Op->EndLoc = E;
3727 return Op;
3728 }
3729
3730 static std::unique_ptr<ARMOperand>
3731 CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, MCRegister SrcReg,
3732 unsigned ShiftImm, SMLoc S, SMLoc E,
3733 ARMAsmParser &Parser) {
3734 auto Op = std::make_unique<ARMOperand>(args: k_ShiftedImmediate, args&: Parser);
3735 Op->RegShiftedImm.ShiftTy = ShTy;
3736 Op->RegShiftedImm.SrcReg = SrcReg;
3737 Op->RegShiftedImm.ShiftImm = ShiftImm;
3738 Op->StartLoc = S;
3739 Op->EndLoc = E;
3740 return Op;
3741 }
3742
3743 static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
3744 SMLoc S, SMLoc E,
3745 ARMAsmParser &Parser) {
3746 auto Op = std::make_unique<ARMOperand>(args: k_ShifterImmediate, args&: Parser);
3747 Op->ShifterImm.isASR = isASR;
3748 Op->ShifterImm.Imm = Imm;
3749 Op->StartLoc = S;
3750 Op->EndLoc = E;
3751 return Op;
3752 }
3753
3754 static std::unique_ptr<ARMOperand>
3755 CreateRotImm(unsigned Imm, SMLoc S, SMLoc E, ARMAsmParser &Parser) {
3756 auto Op = std::make_unique<ARMOperand>(args: k_RotateImmediate, args&: Parser);
3757 Op->RotImm.Imm = Imm;
3758 Op->StartLoc = S;
3759 Op->EndLoc = E;
3760 return Op;
3761 }
3762
3763 static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
3764 SMLoc S, SMLoc E,
3765 ARMAsmParser &Parser) {
3766 auto Op = std::make_unique<ARMOperand>(args: k_ModifiedImmediate, args&: Parser);
3767 Op->ModImm.Bits = Bits;
3768 Op->ModImm.Rot = Rot;
3769 Op->StartLoc = S;
3770 Op->EndLoc = E;
3771 return Op;
3772 }
3773
3774 static std::unique_ptr<ARMOperand>
3775 CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E,
3776 ARMAsmParser &Parser) {
3777 auto Op = std::make_unique<ARMOperand>(args: k_ConstantPoolImmediate, args&: Parser);
3778 Op->Imm.Val = Val;
3779 Op->StartLoc = S;
3780 Op->EndLoc = E;
3781 return Op;
3782 }
3783
3784 static std::unique_ptr<ARMOperand> CreateBitfield(unsigned LSB,
3785 unsigned Width, SMLoc S,
3786 SMLoc E,
3787 ARMAsmParser &Parser) {
3788 auto Op = std::make_unique<ARMOperand>(args: k_BitfieldDescriptor, args&: Parser);
3789 Op->Bitfield.LSB = LSB;
3790 Op->Bitfield.Width = Width;
3791 Op->StartLoc = S;
3792 Op->EndLoc = E;
3793 return Op;
3794 }
3795
3796 static std::unique_ptr<ARMOperand>
3797 CreateRegList(SmallVectorImpl<std::pair<unsigned, MCRegister>> &Regs,
3798 SMLoc StartLoc, SMLoc EndLoc, ARMAsmParser &Parser) {
3799 assert(Regs.size() > 0 && "RegList contains no registers?");
3800 KindTy Kind = k_RegisterList;
3801
3802 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
3803 Reg: Regs.front().second)) {
3804 if (Regs.back().second == ARM::VPR)
3805 Kind = k_FPDRegisterListWithVPR;
3806 else
3807 Kind = k_DPRRegisterList;
3808 } else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
3809 Reg: Regs.front().second)) {
3810 if (Regs.back().second == ARM::VPR)
3811 Kind = k_FPSRegisterListWithVPR;
3812 else
3813 Kind = k_SPRRegisterList;
3814 } else if (Regs.front().second == ARM::VPR) {
3815 assert(Regs.size() == 1 &&
3816 "Register list starting with VPR expected to only contain VPR");
3817 Kind = k_FPSRegisterListWithVPR;
3818 }
3819
3820 if (Kind == k_RegisterList && Regs.back().second == ARM::APSR)
3821 Kind = k_RegisterListWithAPSR;
3822
3823 assert(llvm::is_sorted(Regs) && "Register list must be sorted by encoding");
3824
3825 auto Op = std::make_unique<ARMOperand>(args&: Kind, args&: Parser);
3826 for (const auto &P : Regs)
3827 Op->Registers.push_back(Elt: P.second);
3828
3829 Op->StartLoc = StartLoc;
3830 Op->EndLoc = EndLoc;
3831 return Op;
3832 }
3833
3834 static std::unique_ptr<ARMOperand>
3835 CreateVectorList(MCRegister Reg, unsigned Count, bool isDoubleSpaced, SMLoc S,
3836 SMLoc E, ARMAsmParser &Parser) {
3837 auto Op = std::make_unique<ARMOperand>(args: k_VectorList, args&: Parser);
3838 Op->VectorList.RegNum = Reg;
3839 Op->VectorList.Count = Count;
3840 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3841 Op->StartLoc = S;
3842 Op->EndLoc = E;
3843 return Op;
3844 }
3845
3846 static std::unique_ptr<ARMOperand>
3847 CreateVectorListAllLanes(MCRegister Reg, unsigned Count, bool isDoubleSpaced,
3848 SMLoc S, SMLoc E, ARMAsmParser &Parser) {
3849 auto Op = std::make_unique<ARMOperand>(args: k_VectorListAllLanes, args&: Parser);
3850 Op->VectorList.RegNum = Reg;
3851 Op->VectorList.Count = Count;
3852 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3853 Op->StartLoc = S;
3854 Op->EndLoc = E;
3855 return Op;
3856 }
3857
3858 static std::unique_ptr<ARMOperand>
3859 CreateVectorListIndexed(MCRegister Reg, unsigned Count, unsigned Index,
3860 bool isDoubleSpaced, SMLoc S, SMLoc E,
3861 ARMAsmParser &Parser) {
3862 auto Op = std::make_unique<ARMOperand>(args: k_VectorListIndexed, args&: Parser);
3863 Op->VectorList.RegNum = Reg;
3864 Op->VectorList.Count = Count;
3865 Op->VectorList.LaneIndex = Index;
3866 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3867 Op->StartLoc = S;
3868 Op->EndLoc = E;
3869 return Op;
3870 }
3871
3872 static std::unique_ptr<ARMOperand> CreateVectorIndex(unsigned Idx, SMLoc S,
3873 SMLoc E, MCContext &Ctx,
3874 ARMAsmParser &Parser) {
3875 auto Op = std::make_unique<ARMOperand>(args: k_VectorIndex, args&: Parser);
3876 Op->VectorIndex.Val = Idx;
3877 Op->StartLoc = S;
3878 Op->EndLoc = E;
3879 return Op;
3880 }
3881
3882 static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
3883 SMLoc E, ARMAsmParser &Parser) {
3884 auto Op = std::make_unique<ARMOperand>(args: k_Immediate, args&: Parser);
3885 Op->Imm.Val = Val;
3886 Op->StartLoc = S;
3887 Op->EndLoc = E;
3888 return Op;
3889 }
3890
3891 static std::unique_ptr<ARMOperand>
3892 CreateMem(MCRegister BaseReg, const MCExpr *OffsetImm, MCRegister OffsetReg,
3893 ARM_AM::ShiftOpc ShiftType, unsigned ShiftImm, unsigned Alignment,
3894 bool isNegative, SMLoc S, SMLoc E, ARMAsmParser &Parser,
3895 SMLoc AlignmentLoc = SMLoc()) {
3896 auto Op = std::make_unique<ARMOperand>(args: k_Memory, args&: Parser);
3897 Op->Memory.BaseRegNum = BaseReg;
3898 Op->Memory.OffsetImm = OffsetImm;
3899 Op->Memory.OffsetRegNum = OffsetReg;
3900 Op->Memory.ShiftType = ShiftType;
3901 Op->Memory.ShiftImm = ShiftImm;
3902 Op->Memory.Alignment = Alignment;
3903 Op->Memory.isNegative = isNegative;
3904 Op->StartLoc = S;
3905 Op->EndLoc = E;
3906 Op->AlignmentLoc = AlignmentLoc;
3907 return Op;
3908 }
3909
3910 static std::unique_ptr<ARMOperand>
3911 CreatePostIdxReg(MCRegister Reg, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
3912 unsigned ShiftImm, SMLoc S, SMLoc E, ARMAsmParser &Parser) {
3913 auto Op = std::make_unique<ARMOperand>(args: k_PostIndexRegister, args&: Parser);
3914 Op->PostIdxReg.RegNum = Reg;
3915 Op->PostIdxReg.isAdd = isAdd;
3916 Op->PostIdxReg.ShiftTy = ShiftTy;
3917 Op->PostIdxReg.ShiftImm = ShiftImm;
3918 Op->StartLoc = S;
3919 Op->EndLoc = E;
3920 return Op;
3921 }
3922
3923 static std::unique_ptr<ARMOperand>
3924 CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S, ARMAsmParser &Parser) {
3925 auto Op = std::make_unique<ARMOperand>(args: k_MemBarrierOpt, args&: Parser);
3926 Op->MBOpt.Val = Opt;
3927 Op->StartLoc = S;
3928 Op->EndLoc = S;
3929 return Op;
3930 }
3931
3932 static std::unique_ptr<ARMOperand>
3933 CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S,
3934 ARMAsmParser &Parser) {
3935 auto Op = std::make_unique<ARMOperand>(args: k_InstSyncBarrierOpt, args&: Parser);
3936 Op->ISBOpt.Val = Opt;
3937 Op->StartLoc = S;
3938 Op->EndLoc = S;
3939 return Op;
3940 }
3941
3942 static std::unique_ptr<ARMOperand>
3943 CreateTraceSyncBarrierOpt(ARM_TSB::TraceSyncBOpt Opt, SMLoc S,
3944 ARMAsmParser &Parser) {
3945 auto Op = std::make_unique<ARMOperand>(args: k_TraceSyncBarrierOpt, args&: Parser);
3946 Op->TSBOpt.Val = Opt;
3947 Op->StartLoc = S;
3948 Op->EndLoc = S;
3949 return Op;
3950 }
3951
3952 static std::unique_ptr<ARMOperand>
3953 CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S, ARMAsmParser &Parser) {
3954 auto Op = std::make_unique<ARMOperand>(args: k_ProcIFlags, args&: Parser);
3955 Op->IFlags.Val = IFlags;
3956 Op->StartLoc = S;
3957 Op->EndLoc = S;
3958 return Op;
3959 }
3960
3961 static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S,
3962 ARMAsmParser &Parser) {
3963 auto Op = std::make_unique<ARMOperand>(args: k_MSRMask, args&: Parser);
3964 Op->MMask.Val = MMask;
3965 Op->StartLoc = S;
3966 Op->EndLoc = S;
3967 return Op;
3968 }
3969
3970 static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S,
3971 ARMAsmParser &Parser) {
3972 auto Op = std::make_unique<ARMOperand>(args: k_BankedReg, args&: Parser);
3973 Op->BankedReg.Val = Reg;
3974 Op->StartLoc = S;
3975 Op->EndLoc = S;
3976 return Op;
3977 }
3978};
3979
3980} // end anonymous namespace.
3981
3982void ARMOperand::print(raw_ostream &OS, const MCAsmInfo &MAI) const {
3983 auto RegName = [](MCRegister Reg) {
3984 if (Reg)
3985 return ARMInstPrinter::getRegisterName(Reg);
3986 else
3987 return "noreg";
3988 };
3989
3990 switch (Kind) {
3991 case k_CondCode:
3992 OS << "<ARMCC::" << ARMCondCodeToString(CC: getCondCode()) << ">";
3993 break;
3994 case k_VPTPred:
3995 OS << "<ARMVCC::" << ARMVPTPredToString(CC: getVPTPred()) << ">";
3996 break;
3997 case k_CCOut:
3998 OS << "<ccout " << RegName(getReg()) << ">";
3999 break;
4000 case k_ITCondMask: {
4001 static const char *const MaskStr[] = {
4002 "(invalid)", "(tttt)", "(ttt)", "(ttte)",
4003 "(tt)", "(ttet)", "(tte)", "(ttee)",
4004 "(t)", "(tett)", "(tet)", "(tete)",
4005 "(te)", "(teet)", "(tee)", "(teee)",
4006 };
4007 assert((ITMask.Mask & 0xf) == ITMask.Mask);
4008 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
4009 break;
4010 }
4011 case k_CoprocNum:
4012 OS << "<coprocessor number: " << getCoproc() << ">";
4013 break;
4014 case k_CoprocReg:
4015 OS << "<coprocessor register: " << getCoproc() << ">";
4016 break;
4017 case k_CoprocOption:
4018 OS << "<coprocessor option: " << CoprocOption.Val << ">";
4019 break;
4020 case k_MSRMask:
4021 OS << "<mask: " << getMSRMask() << ">";
4022 break;
4023 case k_BankedReg:
4024 OS << "<banked reg: " << getBankedReg() << ">";
4025 break;
4026 case k_Immediate:
4027 MAI.printExpr(OS, *getImm());
4028 break;
4029 case k_MemBarrierOpt:
4030 OS << "<ARM_MB::" << MemBOptToString(val: getMemBarrierOpt(), HasV8: false) << ">";
4031 break;
4032 case k_InstSyncBarrierOpt:
4033 OS << "<ARM_ISB::" << InstSyncBOptToString(val: getInstSyncBarrierOpt()) << ">";
4034 break;
4035 case k_TraceSyncBarrierOpt:
4036 OS << "<ARM_TSB::" << TraceSyncBOptToString(val: getTraceSyncBarrierOpt()) << ">";
4037 break;
4038 case k_Memory:
4039 OS << "<memory";
4040 if (Memory.BaseRegNum)
4041 OS << " base:" << RegName(Memory.BaseRegNum);
4042 if (Memory.OffsetImm) {
4043 OS << " offset-imm:";
4044 MAI.printExpr(OS, *Memory.OffsetImm);
4045 }
4046 if (Memory.OffsetRegNum)
4047 OS << " offset-reg:" << (Memory.isNegative ? "-" : "")
4048 << RegName(Memory.OffsetRegNum);
4049 if (Memory.ShiftType != ARM_AM::no_shift) {
4050 OS << " shift-type:" << ARM_AM::getShiftOpcStr(Op: Memory.ShiftType);
4051 OS << " shift-imm:" << Memory.ShiftImm;
4052 }
4053 if (Memory.Alignment)
4054 OS << " alignment:" << Memory.Alignment;
4055 OS << ">";
4056 break;
4057 case k_PostIndexRegister:
4058 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
4059 << RegName(PostIdxReg.RegNum);
4060 if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
4061 OS << ARM_AM::getShiftOpcStr(Op: PostIdxReg.ShiftTy) << " "
4062 << PostIdxReg.ShiftImm;
4063 OS << ">";
4064 break;
4065 case k_ProcIFlags: {
4066 OS << "<ARM_PROC::";
4067 unsigned IFlags = getProcIFlags();
4068 for (int i=2; i >= 0; --i)
4069 if (IFlags & (1 << i))
4070 OS << ARM_PROC::IFlagsToString(val: 1 << i);
4071 OS << ">";
4072 break;
4073 }
4074 case k_Register:
4075 OS << "<register " << RegName(getReg()) << ">";
4076 break;
4077 case k_ShifterImmediate:
4078 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
4079 << " #" << ShifterImm.Imm << ">";
4080 break;
4081 case k_ShiftedRegister:
4082 OS << "<so_reg_reg " << RegName(RegShiftedReg.SrcReg) << " "
4083 << ARM_AM::getShiftOpcStr(Op: RegShiftedReg.ShiftTy) << " "
4084 << RegName(RegShiftedReg.ShiftReg) << ">";
4085 break;
4086 case k_ShiftedImmediate:
4087 OS << "<so_reg_imm " << RegName(RegShiftedImm.SrcReg) << " "
4088 << ARM_AM::getShiftOpcStr(Op: RegShiftedImm.ShiftTy) << " #"
4089 << RegShiftedImm.ShiftImm << ">";
4090 break;
4091 case k_RotateImmediate:
4092 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
4093 break;
4094 case k_ModifiedImmediate:
4095 OS << "<mod_imm #" << ModImm.Bits << ", #"
4096 << ModImm.Rot << ")>";
4097 break;
4098 case k_ConstantPoolImmediate:
4099 OS << "<constant_pool_imm #";
4100 MAI.printExpr(OS, *getConstantPoolImm());
4101 break;
4102 case k_BitfieldDescriptor:
4103 OS << "<bitfield " << "lsb: " << Bitfield.LSB
4104 << ", width: " << Bitfield.Width << ">";
4105 break;
4106 case k_RegisterList:
4107 case k_RegisterListWithAPSR:
4108 case k_DPRRegisterList:
4109 case k_SPRRegisterList:
4110 case k_FPSRegisterListWithVPR:
4111 case k_FPDRegisterListWithVPR: {
4112 OS << "<register_list ";
4113
4114 const SmallVectorImpl<MCRegister> &RegList = getRegList();
4115 for (auto I = RegList.begin(), E = RegList.end(); I != E;) {
4116 OS << RegName(*I);
4117 if (++I < E) OS << ", ";
4118 }
4119
4120 OS << ">";
4121 break;
4122 }
4123 case k_VectorList:
4124 OS << "<vector_list " << VectorList.Count << " * "
4125 << RegName(VectorList.RegNum) << ">";
4126 break;
4127 case k_VectorListAllLanes:
4128 OS << "<vector_list(all lanes) " << VectorList.Count << " * "
4129 << RegName(VectorList.RegNum) << ">";
4130 break;
4131 case k_VectorListIndexed:
4132 OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
4133 << VectorList.Count << " * " << RegName(VectorList.RegNum) << ">";
4134 break;
4135 case k_Token:
4136 OS << "'" << getToken() << "'";
4137 break;
4138 case k_VectorIndex:
4139 OS << "<vectorindex " << getVectorIndex() << ">";
4140 break;
4141 }
4142}
4143
4144/// @name Auto-generated Match Functions
4145/// {
4146
4147static MCRegister MatchRegisterName(StringRef Name);
4148
4149/// }
4150
4151static bool isDataTypeToken(StringRef Tok) {
4152 static const DenseSet<StringRef> DataTypes{
4153 ".8", ".16", ".32", ".64", ".i8", ".i16", ".i32", ".i64",
4154 ".u8", ".u16", ".u32", ".u64", ".s8", ".s16", ".s32", ".s64",
4155 ".p8", ".p16", ".f32", ".f64", ".f", ".d"};
4156 return DataTypes.contains(V: Tok);
4157}
4158
4159static unsigned getMnemonicOpsEndInd(const OperandVector &Operands) {
4160 unsigned MnemonicOpsEndInd = 1;
4161 // Special case for CPS which has a Mnemonic side token for possibly storing
4162 // ie/id variant
4163 if (Operands[0]->isToken() &&
4164 static_cast<ARMOperand &>(*Operands[0]).getToken() == "cps") {
4165 if (Operands.size() > 1 && Operands[1]->isImm() &&
4166 static_cast<ARMOperand &>(*Operands[1]).getImm()->getKind() ==
4167 llvm::MCExpr::Constant &&
4168 (dyn_cast<MCConstantExpr>(
4169 Val: static_cast<ARMOperand &>(*Operands[1]).getImm())
4170 ->getValue() == ARM_PROC::IE ||
4171 dyn_cast<MCConstantExpr>(
4172 Val: static_cast<ARMOperand &>(*Operands[1]).getImm())
4173 ->getValue() == ARM_PROC::ID))
4174 ++MnemonicOpsEndInd;
4175 }
4176
4177 // In some circumstances the condition code moves to the right
4178 bool RHSCondCode = false;
4179 while (MnemonicOpsEndInd < Operands.size()) {
4180 auto Op = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]);
4181 // Special case for it instructions which have a condition code on the RHS
4182 if (Op.isITMask()) {
4183 RHSCondCode = true;
4184 MnemonicOpsEndInd++;
4185 } else if (Op.isToken() &&
4186 (
4187 // There are several special cases not covered by
4188 // isDataTypeToken
4189 Op.getToken() == ".w" || Op.getToken() == ".bf16" ||
4190 Op.getToken() == ".p64" || Op.getToken() == ".f16" ||
4191 isDataTypeToken(Tok: Op.getToken()))) {
4192 // In the mnemonic operators the cond code must always precede the data
4193 // type. So we can now safely assume any subsequent cond code is on the
4194 // RHS. As is the case for VCMP and VPT.
4195 RHSCondCode = true;
4196 MnemonicOpsEndInd++;
4197 }
4198 // Skip all mnemonic operator types
4199 else if (Op.isCCOut() || (Op.isCondCode() && !RHSCondCode) ||
4200 Op.isVPTPred() || (Op.isToken() && Op.getToken() == ".w"))
4201 MnemonicOpsEndInd++;
4202 else
4203 break;
4204 }
4205 return MnemonicOpsEndInd;
4206}
4207
4208bool ARMAsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
4209 SMLoc &EndLoc) {
4210 const AsmToken &Tok = getParser().getTok();
4211 StartLoc = Tok.getLoc();
4212 EndLoc = Tok.getEndLoc();
4213 Reg = tryParseRegister();
4214
4215 return !Reg;
4216}
4217
4218ParseStatus ARMAsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
4219 SMLoc &EndLoc) {
4220 if (parseRegister(Reg, StartLoc, EndLoc))
4221 return ParseStatus::NoMatch;
4222 return ParseStatus::Success;
4223}
4224
4225/// Try to parse a register name. The token must be an Identifier when called,
4226/// and if it is a register name the token is eaten and the register is
4227/// returned. Otherwise return an invalid MCRegister.
4228MCRegister ARMAsmParser::tryParseRegister(bool AllowOutOfBoundReg) {
4229 MCAsmParser &Parser = getParser();
4230 const AsmToken &Tok = Parser.getTok();
4231 if (Tok.isNot(K: AsmToken::Identifier))
4232 return MCRegister();
4233
4234 std::string lowerCase = Tok.getString().lower();
4235 MCRegister Reg = MatchRegisterName(Name: lowerCase);
4236 if (!Reg) {
4237 Reg = StringSwitch<MCRegister>(lowerCase)
4238 .Case(S: "r13", Value: ARM::SP)
4239 .Case(S: "r14", Value: ARM::LR)
4240 .Case(S: "r15", Value: ARM::PC)
4241 .Case(S: "ip", Value: ARM::R12)
4242 // Additional register name aliases for 'gas' compatibility.
4243 .Case(S: "a1", Value: ARM::R0)
4244 .Case(S: "a2", Value: ARM::R1)
4245 .Case(S: "a3", Value: ARM::R2)
4246 .Case(S: "a4", Value: ARM::R3)
4247 .Case(S: "v1", Value: ARM::R4)
4248 .Case(S: "v2", Value: ARM::R5)
4249 .Case(S: "v3", Value: ARM::R6)
4250 .Case(S: "v4", Value: ARM::R7)
4251 .Case(S: "v5", Value: ARM::R8)
4252 .Case(S: "v6", Value: ARM::R9)
4253 .Case(S: "v7", Value: ARM::R10)
4254 .Case(S: "v8", Value: ARM::R11)
4255 .Case(S: "sb", Value: ARM::R9)
4256 .Case(S: "sl", Value: ARM::R10)
4257 .Case(S: "fp", Value: ARM::R11)
4258 .Default(Value: MCRegister());
4259 }
4260 if (!Reg) {
4261 // Check for aliases registered via .req. Canonicalize to lower case.
4262 // That's more consistent since register names are case insensitive, and
4263 // it's how the original entry was passed in from MC/MCParser/AsmParser.
4264 auto Entry = RegisterReqs.find(Key: lowerCase);
4265 // If no match, return failure.
4266 if (Entry == RegisterReqs.end())
4267 return MCRegister();
4268 Parser.Lex(); // Eat identifier token.
4269 return Entry->getValue();
4270 }
4271
4272 // Some FPUs only have 16 D registers, so D16-D31 are invalid
4273 if (!AllowOutOfBoundReg && !hasD32() && Reg >= ARM::D16 && Reg <= ARM::D31)
4274 return MCRegister();
4275
4276 Parser.Lex(); // Eat identifier token.
4277
4278 return Reg;
4279}
4280
4281std::optional<ARM_AM::ShiftOpc> ARMAsmParser::tryParseShiftToken() {
4282 MCAsmParser &Parser = getParser();
4283 const AsmToken &Tok = Parser.getTok();
4284 if (Tok.isNot(K: AsmToken::Identifier))
4285 return std::nullopt;
4286
4287 std::string lowerCase = Tok.getString().lower();
4288 return StringSwitch<std::optional<ARM_AM::ShiftOpc>>(lowerCase)
4289 .Case(S: "asl", Value: ARM_AM::lsl)
4290 .Case(S: "lsl", Value: ARM_AM::lsl)
4291 .Case(S: "lsr", Value: ARM_AM::lsr)
4292 .Case(S: "asr", Value: ARM_AM::asr)
4293 .Case(S: "ror", Value: ARM_AM::ror)
4294 .Case(S: "rrx", Value: ARM_AM::rrx)
4295 .Default(Value: std::nullopt);
4296}
4297
4298// Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0.
4299// If a recoverable error occurs, return 1. If an irrecoverable error
4300// occurs, return -1. An irrecoverable error is one where tokens have been
4301// consumed in the process of trying to parse the shifter (i.e., when it is
4302// indeed a shifter operand, but malformed).
4303int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
4304 MCAsmParser &Parser = getParser();
4305 SMLoc S = Parser.getTok().getLoc();
4306
4307 auto ShiftTyOpt = tryParseShiftToken();
4308 if (ShiftTyOpt == std::nullopt)
4309 return 1;
4310 auto ShiftTy = ShiftTyOpt.value();
4311
4312 Parser.Lex(); // Eat the operator.
4313
4314 // The source register for the shift has already been added to the
4315 // operand list, so we need to pop it off and combine it into the shifted
4316 // register operand instead.
4317 std::unique_ptr<ARMOperand> PrevOp(
4318 (ARMOperand *)Operands.pop_back_val().release());
4319 if (!PrevOp->isReg())
4320 return Error(L: PrevOp->getStartLoc(), Msg: "shift must be of a register");
4321 MCRegister SrcReg = PrevOp->getReg();
4322
4323 SMLoc EndLoc;
4324 int64_t Imm = 0;
4325 MCRegister ShiftReg;
4326 if (ShiftTy == ARM_AM::rrx) {
4327 // RRX Doesn't have an explicit shift amount. The encoder expects
4328 // the shift register to be the same as the source register. Seems odd,
4329 // but OK.
4330 ShiftReg = SrcReg;
4331 } else {
4332 // Figure out if this is shifted by a constant or a register (for non-RRX).
4333 if (Parser.getTok().is(K: AsmToken::Hash) ||
4334 Parser.getTok().is(K: AsmToken::Dollar)) {
4335 Parser.Lex(); // Eat hash.
4336 SMLoc ImmLoc = Parser.getTok().getLoc();
4337 const MCExpr *ShiftExpr = nullptr;
4338 if (getParser().parseExpression(Res&: ShiftExpr, EndLoc)) {
4339 Error(L: ImmLoc, Msg: "invalid immediate shift value");
4340 return -1;
4341 }
4342 // The expression must be evaluatable as an immediate.
4343 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: ShiftExpr);
4344 if (!CE) {
4345 Error(L: ImmLoc, Msg: "invalid immediate shift value");
4346 return -1;
4347 }
4348 // Range check the immediate.
4349 // lsl, ror: 0 <= imm <= 31
4350 // lsr, asr: 0 <= imm <= 32
4351 Imm = CE->getValue();
4352 if (Imm < 0 ||
4353 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
4354 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
4355 Error(L: ImmLoc, Msg: "immediate shift value out of range");
4356 return -1;
4357 }
4358 // shift by zero is a nop. Always send it through as lsl.
4359 // ('as' compatibility)
4360 if (Imm == 0)
4361 ShiftTy = ARM_AM::lsl;
4362 } else if (Parser.getTok().is(K: AsmToken::Identifier)) {
4363 SMLoc L = Parser.getTok().getLoc();
4364 EndLoc = Parser.getTok().getEndLoc();
4365 ShiftReg = tryParseRegister();
4366 if (!ShiftReg) {
4367 Error(L, Msg: "expected immediate or register in shift operand");
4368 return -1;
4369 }
4370 } else {
4371 Error(L: Parser.getTok().getLoc(),
4372 Msg: "expected immediate or register in shift operand");
4373 return -1;
4374 }
4375 }
4376
4377 if (ShiftReg && ShiftTy != ARM_AM::rrx)
4378 Operands.push_back(Elt: ARMOperand::CreateShiftedRegister(
4379 ShTy: ShiftTy, SrcReg, ShiftReg, ShiftImm: Imm, S, E: EndLoc, Parser&: *this));
4380 else
4381 Operands.push_back(Elt: ARMOperand::CreateShiftedImmediate(ShTy: ShiftTy, SrcReg, ShiftImm: Imm,
4382 S, E: EndLoc, Parser&: *this));
4383
4384 return 0;
4385}
4386
4387/// Try to parse a register name. The token must be an Identifier when called.
4388/// If it's a register, an AsmOperand is created. Another AsmOperand is created
4389/// if there is a "writeback". 'true' if it's not a register.
4390///
4391/// TODO this is likely to change to allow different register types and or to
4392/// parse for a specific register type.
4393bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
4394 MCAsmParser &Parser = getParser();
4395 SMLoc RegStartLoc = Parser.getTok().getLoc();
4396 SMLoc RegEndLoc = Parser.getTok().getEndLoc();
4397 MCRegister Reg = tryParseRegister();
4398 if (!Reg)
4399 return true;
4400
4401 Operands.push_back(Elt: ARMOperand::CreateReg(Reg, S: RegStartLoc, E: RegEndLoc, Parser&: *this));
4402
4403 const AsmToken &ExclaimTok = Parser.getTok();
4404 if (ExclaimTok.is(K: AsmToken::Exclaim)) {
4405 Operands.push_back(Elt: ARMOperand::CreateToken(Str: ExclaimTok.getString(),
4406 S: ExclaimTok.getLoc(), Parser&: *this));
4407 Parser.Lex(); // Eat exclaim token
4408 return false;
4409 }
4410
4411 // Also check for an index operand. This is only legal for vector registers,
4412 // but that'll get caught OK in operand matching, so we don't need to
4413 // explicitly filter everything else out here.
4414 if (Parser.getTok().is(K: AsmToken::LBrac)) {
4415 SMLoc SIdx = Parser.getTok().getLoc();
4416 Parser.Lex(); // Eat left bracket token.
4417
4418 const MCExpr *ImmVal;
4419 if (getParser().parseExpression(Res&: ImmVal))
4420 return true;
4421 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Val: ImmVal);
4422 if (!MCE)
4423 return TokError(Msg: "immediate value expected for vector index");
4424
4425 if (Parser.getTok().isNot(K: AsmToken::RBrac))
4426 return Error(L: Parser.getTok().getLoc(), Msg: "']' expected");
4427
4428 SMLoc E = Parser.getTok().getEndLoc();
4429 Parser.Lex(); // Eat right bracket token.
4430
4431 Operands.push_back(Elt: ARMOperand::CreateVectorIndex(Idx: MCE->getValue(), S: SIdx, E,
4432 Ctx&: getContext(), Parser&: *this));
4433 }
4434
4435 return false;
4436}
4437
4438/// MatchCoprocessorOperandName - Try to parse an coprocessor related
4439/// instruction with a symbolic operand name.
4440/// We accept "crN" syntax for GAS compatibility.
4441/// <operand-name> ::= <prefix><number>
4442/// If CoprocOp is 'c', then:
4443/// <prefix> ::= c | cr
4444/// If CoprocOp is 'p', then :
4445/// <prefix> ::= p
4446/// <number> ::= integer in range [0, 15]
4447static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
4448 // Use the same layout as the tablegen'erated register name matcher. Ugly,
4449 // but efficient.
4450 if (Name.size() < 2 || Name[0] != CoprocOp)
4451 return -1;
4452 Name = (Name[1] == 'r') ? Name.drop_front(N: 2) : Name.drop_front();
4453
4454 switch (Name.size()) {
4455 default: return -1;
4456 case 1:
4457 switch (Name[0]) {
4458 default: return -1;
4459 case '0': return 0;
4460 case '1': return 1;
4461 case '2': return 2;
4462 case '3': return 3;
4463 case '4': return 4;
4464 case '5': return 5;
4465 case '6': return 6;
4466 case '7': return 7;
4467 case '8': return 8;
4468 case '9': return 9;
4469 }
4470 case 2:
4471 if (Name[0] != '1')
4472 return -1;
4473 switch (Name[1]) {
4474 default: return -1;
4475 // CP10 and CP11 are VFP/NEON and so vector instructions should be used.
4476 // However, old cores (v5/v6) did use them in that way.
4477 case '0': return 10;
4478 case '1': return 11;
4479 case '2': return 12;
4480 case '3': return 13;
4481 case '4': return 14;
4482 case '5': return 15;
4483 }
4484 }
4485}
4486
4487/// parseITCondCode - Try to parse a condition code for an IT instruction.
4488ParseStatus ARMAsmParser::parseITCondCode(OperandVector &Operands) {
4489 MCAsmParser &Parser = getParser();
4490 SMLoc S = Parser.getTok().getLoc();
4491 const AsmToken &Tok = Parser.getTok();
4492 if (!Tok.is(K: AsmToken::Identifier))
4493 return ParseStatus::NoMatch;
4494 unsigned CC = ARMCondCodeFromString(CC: Tok.getString());
4495 if (CC == ~0U)
4496 return ParseStatus::NoMatch;
4497 Parser.Lex(); // Eat the token.
4498
4499 Operands.push_back(
4500 Elt: ARMOperand::CreateCondCode(CC: ARMCC::CondCodes(CC), S, Parser&: *this));
4501
4502 return ParseStatus::Success;
4503}
4504
4505/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
4506/// token must be an Identifier when called, and if it is a coprocessor
4507/// number, the token is eaten and the operand is added to the operand list.
4508ParseStatus ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
4509 MCAsmParser &Parser = getParser();
4510 SMLoc S = Parser.getTok().getLoc();
4511 const AsmToken &Tok = Parser.getTok();
4512 if (Tok.isNot(K: AsmToken::Identifier))
4513 return ParseStatus::NoMatch;
4514
4515 int Num = MatchCoprocessorOperandName(Name: Tok.getString().lower(), CoprocOp: 'p');
4516 if (Num == -1)
4517 return ParseStatus::NoMatch;
4518 if (!isValidCoprocessorNumber(Num, featureBits: getSTI().getFeatureBits()))
4519 return ParseStatus::NoMatch;
4520
4521 Parser.Lex(); // Eat identifier token.
4522 Operands.push_back(Elt: ARMOperand::CreateCoprocNum(CopVal: Num, S, Parser&: *this));
4523 return ParseStatus::Success;
4524}
4525
4526/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
4527/// token must be an Identifier when called, and if it is a coprocessor
4528/// number, the token is eaten and the operand is added to the operand list.
4529ParseStatus ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
4530 MCAsmParser &Parser = getParser();
4531 SMLoc S = Parser.getTok().getLoc();
4532 const AsmToken &Tok = Parser.getTok();
4533 if (Tok.isNot(K: AsmToken::Identifier))
4534 return ParseStatus::NoMatch;
4535
4536 int Reg = MatchCoprocessorOperandName(Name: Tok.getString().lower(), CoprocOp: 'c');
4537 if (Reg == -1)
4538 return ParseStatus::NoMatch;
4539
4540 Parser.Lex(); // Eat identifier token.
4541 Operands.push_back(Elt: ARMOperand::CreateCoprocReg(CopVal: Reg, S, Parser&: *this));
4542 return ParseStatus::Success;
4543}
4544
4545/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
4546/// coproc_option : '{' imm0_255 '}'
4547ParseStatus ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
4548 MCAsmParser &Parser = getParser();
4549 SMLoc S = Parser.getTok().getLoc();
4550
4551 // If this isn't a '{', this isn't a coprocessor immediate operand.
4552 if (Parser.getTok().isNot(K: AsmToken::LCurly))
4553 return ParseStatus::NoMatch;
4554 Parser.Lex(); // Eat the '{'
4555
4556 const MCExpr *Expr;
4557 SMLoc Loc = Parser.getTok().getLoc();
4558 if (getParser().parseExpression(Res&: Expr))
4559 return Error(L: Loc, Msg: "illegal expression");
4560 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Expr);
4561 if (!CE || CE->getValue() < 0 || CE->getValue() > 255)
4562 return Error(L: Loc,
4563 Msg: "coprocessor option must be an immediate in range [0, 255]");
4564 int Val = CE->getValue();
4565
4566 // Check for and consume the closing '}'
4567 if (Parser.getTok().isNot(K: AsmToken::RCurly))
4568 return ParseStatus::Failure;
4569 SMLoc E = Parser.getTok().getEndLoc();
4570 Parser.Lex(); // Eat the '}'
4571
4572 Operands.push_back(Elt: ARMOperand::CreateCoprocOption(Val, S, E, Parser&: *this));
4573 return ParseStatus::Success;
4574}
4575
4576// For register list parsing, we need to map from raw GPR register numbering
4577// to the enumeration values. The enumeration values aren't sorted by
4578// register number due to our using "sp", "lr" and "pc" as canonical names.
4579static MCRegister getNextRegister(MCRegister Reg) {
4580 // If this is a GPR, we need to do it manually, otherwise we can rely
4581 // on the sort ordering of the enumeration since the other reg-classes
4582 // are sane.
4583 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4584 return Reg + 1;
4585 switch (Reg.id()) {
4586 default: llvm_unreachable("Invalid GPR number!");
4587 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2;
4588 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4;
4589 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6;
4590 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8;
4591 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10;
4592 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
4593 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR;
4594 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0;
4595 }
4596}
4597
4598// Insert an <Encoding, Register> pair in an ordered vector. Return true on
4599// success, or false, if duplicate encoding found.
4600static bool
4601insertNoDuplicates(SmallVectorImpl<std::pair<unsigned, MCRegister>> &Regs,
4602 unsigned Enc, MCRegister Reg) {
4603 Regs.emplace_back(Args&: Enc, Args&: Reg);
4604 for (auto I = Regs.rbegin(), J = I + 1, E = Regs.rend(); J != E; ++I, ++J) {
4605 if (J->first == Enc) {
4606 Regs.erase(CI: J.base());
4607 return false;
4608 }
4609 if (J->first < Enc)
4610 break;
4611 std::swap(x&: *I, y&: *J);
4612 }
4613 return true;
4614}
4615
4616/// Parse a register list.
4617bool ARMAsmParser::parseRegisterList(OperandVector &Operands, bool EnforceOrder,
4618 bool AllowRAAC, bool IsLazyLoadStore,
4619 bool IsVSCCLRM) {
4620 MCAsmParser &Parser = getParser();
4621 if (Parser.getTok().isNot(K: AsmToken::LCurly))
4622 return TokError(Msg: "Token is not a Left Curly Brace");
4623 SMLoc S = Parser.getTok().getLoc();
4624 Parser.Lex(); // Eat '{' token.
4625 SMLoc RegLoc = Parser.getTok().getLoc();
4626
4627 // Check the first register in the list to see what register class
4628 // this is a list of.
4629 bool AllowOutOfBoundReg = IsLazyLoadStore || IsVSCCLRM;
4630 MCRegister Reg = tryParseRegister(AllowOutOfBoundReg);
4631 if (!Reg)
4632 return Error(L: RegLoc, Msg: "register expected");
4633 if (!AllowRAAC && Reg == ARM::RA_AUTH_CODE)
4634 return Error(L: RegLoc, Msg: "pseudo-register not allowed");
4635 // The reglist instructions have at most 32 registers, so reserve
4636 // space for that many.
4637 int EReg = 0;
4638 SmallVector<std::pair<unsigned, MCRegister>, 32> Registers;
4639
4640 // Single-precision VSCCLRM can have double-precision registers in the
4641 // register list. When VSCCLRMAdjustEncoding is true then we've switched from
4642 // single-precision to double-precision and we pretend that these registers
4643 // are encoded as S32 onwards, which we can do by adding 16 to the encoding
4644 // value.
4645 bool VSCCLRMAdjustEncoding = false;
4646
4647 // Allow Q regs and just interpret them as the two D sub-registers.
4648 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4649 Reg = getDRegFromQReg(QReg: Reg);
4650 EReg = MRI->getEncodingValue(Reg);
4651 Registers.emplace_back(Args&: EReg, Args&: Reg);
4652 Reg = Reg + 1;
4653 }
4654 const MCRegisterClass *RC;
4655 if (Reg == ARM::RA_AUTH_CODE ||
4656 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4657 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
4658 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
4659 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
4660 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
4661 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
4662 else if (ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4663 RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4664 else if (Reg == ARM::VPR)
4665 RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4666 else
4667 return Error(L: RegLoc, Msg: "invalid register in register list");
4668
4669 // Store the register.
4670 EReg = MRI->getEncodingValue(Reg);
4671 Registers.emplace_back(Args&: EReg, Args&: Reg);
4672
4673 // This starts immediately after the first register token in the list,
4674 // so we can see either a comma or a minus (range separator) as a legal
4675 // next token.
4676 while (Parser.getTok().is(K: AsmToken::Comma) ||
4677 Parser.getTok().is(K: AsmToken::Minus)) {
4678 if (Parser.getTok().is(K: AsmToken::Minus)) {
4679 if (Reg == ARM::RA_AUTH_CODE)
4680 return Error(L: RegLoc, Msg: "pseudo-register not allowed");
4681 Parser.Lex(); // Eat the minus.
4682 SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4683 MCRegister EndReg = tryParseRegister(AllowOutOfBoundReg);
4684 if (!EndReg)
4685 return Error(L: AfterMinusLoc, Msg: "register expected");
4686 if (EndReg == ARM::RA_AUTH_CODE)
4687 return Error(L: AfterMinusLoc, Msg: "pseudo-register not allowed");
4688 // Allow Q regs and just interpret them as the two D sub-registers.
4689 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg: EndReg))
4690 EndReg = getDRegFromQReg(QReg: EndReg) + 1;
4691 // If the register is the same as the start reg, there's nothing
4692 // more to do.
4693 if (Reg == EndReg)
4694 continue;
4695 // The register must be in the same register class as the first.
4696 if (!RC->contains(Reg))
4697 return Error(L: AfterMinusLoc, Msg: "invalid register in register list");
4698 // Ranges must go from low to high.
4699 if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(Reg: EndReg))
4700 return Error(L: AfterMinusLoc, Msg: "bad range in register list");
4701
4702 // Add all the registers in the range to the register list.
4703 while (Reg != EndReg) {
4704 Reg = getNextRegister(Reg);
4705 EReg = MRI->getEncodingValue(Reg);
4706 if (VSCCLRMAdjustEncoding)
4707 EReg += 16;
4708 if (!insertNoDuplicates(Regs&: Registers, Enc: EReg, Reg)) {
4709 Warning(L: AfterMinusLoc, Msg: StringRef("duplicated register (") +
4710 ARMInstPrinter::getRegisterName(Reg) +
4711 ") in register list");
4712 }
4713 }
4714 continue;
4715 }
4716 Parser.Lex(); // Eat the comma.
4717 RegLoc = Parser.getTok().getLoc();
4718 MCRegister OldReg = Reg;
4719 int EOldReg = EReg;
4720 const AsmToken RegTok = Parser.getTok();
4721 Reg = tryParseRegister(AllowOutOfBoundReg);
4722 if (!Reg)
4723 return Error(L: RegLoc, Msg: "register expected");
4724 if (!AllowRAAC && Reg == ARM::RA_AUTH_CODE)
4725 return Error(L: RegLoc, Msg: "pseudo-register not allowed");
4726 // Allow Q regs and just interpret them as the two D sub-registers.
4727 bool isQReg = false;
4728 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4729 Reg = getDRegFromQReg(QReg: Reg);
4730 isQReg = true;
4731 }
4732 if (Reg != ARM::RA_AUTH_CODE && !RC->contains(Reg) &&
4733 RC->getID() == ARMMCRegisterClasses[ARM::GPRRegClassID].getID() &&
4734 ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg)) {
4735 // switch the register classes, as GPRwithAPSRnospRegClassID is a partial
4736 // subset of GPRRegClassId except it contains APSR as well.
4737 RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4738 }
4739 if (Reg == ARM::VPR &&
4740 (RC == &ARMMCRegisterClasses[ARM::SPRRegClassID] ||
4741 RC == &ARMMCRegisterClasses[ARM::DPRRegClassID] ||
4742 RC == &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID])) {
4743 RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4744 EReg = MRI->getEncodingValue(Reg);
4745 if (!insertNoDuplicates(Regs&: Registers, Enc: EReg, Reg)) {
4746 Warning(L: RegLoc, Msg: "duplicated register (" + RegTok.getString() +
4747 ") in register list");
4748 }
4749 continue;
4750 }
4751 // VSCCLRM can switch from single-precision to double-precision only when
4752 // S31 is followed by D16.
4753 if (IsVSCCLRM && OldReg == ARM::S31 && Reg == ARM::D16) {
4754 VSCCLRMAdjustEncoding = true;
4755 RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4756 }
4757 // The register must be in the same register class as the first.
4758 if ((Reg == ARM::RA_AUTH_CODE &&
4759 RC != &ARMMCRegisterClasses[ARM::GPRRegClassID]) ||
4760 (Reg != ARM::RA_AUTH_CODE && !RC->contains(Reg)))
4761 return Error(L: RegLoc, Msg: "invalid register in register list");
4762 // In most cases, the list must be monotonically increasing. An
4763 // exception is CLRM, which is order-independent anyway, so
4764 // there's no potential for confusion if you write clrm {r2,r1}
4765 // instead of clrm {r1,r2}.
4766 EReg = MRI->getEncodingValue(Reg);
4767 if (VSCCLRMAdjustEncoding)
4768 EReg += 16;
4769 if (EnforceOrder && EReg < EOldReg) {
4770 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4771 Warning(L: RegLoc, Msg: "register list not in ascending order");
4772 else if (!ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4773 return Error(L: RegLoc, Msg: "register list not in ascending order");
4774 }
4775 // VFP register lists must also be contiguous.
4776 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
4777 RC != &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID] &&
4778 EReg != EOldReg + 1)
4779 return Error(L: RegLoc, Msg: "non-contiguous register range");
4780
4781 if (!insertNoDuplicates(Regs&: Registers, Enc: EReg, Reg)) {
4782 Warning(L: RegLoc, Msg: "duplicated register (" + RegTok.getString() +
4783 ") in register list");
4784 }
4785 if (isQReg) {
4786 Reg = Reg + 1;
4787 EReg = MRI->getEncodingValue(Reg);
4788 Registers.emplace_back(Args&: EReg, Args&: Reg);
4789 }
4790 }
4791
4792 if (Parser.getTok().isNot(K: AsmToken::RCurly))
4793 return Error(L: Parser.getTok().getLoc(), Msg: "'}' expected");
4794 SMLoc E = Parser.getTok().getEndLoc();
4795 Parser.Lex(); // Eat '}' token.
4796
4797 // Push the register list operand.
4798 Operands.push_back(Elt: ARMOperand::CreateRegList(Regs&: Registers, StartLoc: S, EndLoc: E, Parser&: *this));
4799
4800 // The ARM system instruction variants for LDM/STM have a '^' token here.
4801 if (Parser.getTok().is(K: AsmToken::Caret)) {
4802 Operands.push_back(
4803 Elt: ARMOperand::CreateToken(Str: "^", S: Parser.getTok().getLoc(), Parser&: *this));
4804 Parser.Lex(); // Eat '^' token.
4805 }
4806
4807 return false;
4808}
4809
4810// Helper function to parse the lane index for vector lists.
4811ParseStatus ARMAsmParser::parseVectorLane(VectorLaneTy &LaneKind,
4812 unsigned &Index, SMLoc &EndLoc) {
4813 MCAsmParser &Parser = getParser();
4814 Index = 0; // Always return a defined index value.
4815 if (Parser.getTok().is(K: AsmToken::LBrac)) {
4816 Parser.Lex(); // Eat the '['.
4817 if (Parser.getTok().is(K: AsmToken::RBrac)) {
4818 // "Dn[]" is the 'all lanes' syntax.
4819 LaneKind = AllLanes;
4820 EndLoc = Parser.getTok().getEndLoc();
4821 Parser.Lex(); // Eat the ']'.
4822 return ParseStatus::Success;
4823 }
4824
4825 // There's an optional '#' token here. Normally there wouldn't be, but
4826 // inline assemble puts one in, and it's friendly to accept that.
4827 if (Parser.getTok().is(K: AsmToken::Hash))
4828 Parser.Lex(); // Eat '#' or '$'.
4829
4830 const MCExpr *LaneIndex;
4831 SMLoc Loc = Parser.getTok().getLoc();
4832 if (getParser().parseExpression(Res&: LaneIndex))
4833 return Error(L: Loc, Msg: "illegal expression");
4834 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: LaneIndex);
4835 if (!CE)
4836 return Error(L: Loc, Msg: "lane index must be empty or an integer");
4837 if (Parser.getTok().isNot(K: AsmToken::RBrac))
4838 return Error(L: Parser.getTok().getLoc(), Msg: "']' expected");
4839 EndLoc = Parser.getTok().getEndLoc();
4840 Parser.Lex(); // Eat the ']'.
4841 int64_t Val = CE->getValue();
4842
4843 // FIXME: Make this range check context sensitive for .8, .16, .32.
4844 if (Val < 0 || Val > 7)
4845 return Error(L: Parser.getTok().getLoc(), Msg: "lane index out of range");
4846 Index = Val;
4847 LaneKind = IndexedLane;
4848 return ParseStatus::Success;
4849 }
4850 LaneKind = NoLanes;
4851 return ParseStatus::Success;
4852}
4853
4854// parse a vector register list
4855ParseStatus ARMAsmParser::parseVectorList(OperandVector &Operands) {
4856 MCAsmParser &Parser = getParser();
4857 VectorLaneTy LaneKind;
4858 unsigned LaneIndex;
4859 SMLoc S = Parser.getTok().getLoc();
4860 // As an extension (to match gas), support a plain D register or Q register
4861 // (without encosing curly braces) as a single or double entry list,
4862 // respectively.
4863 // If there is no lane supplied, just parse as a register and
4864 // use the custom matcher to convert to list if necessary
4865 if (!hasMVE() && Parser.getTok().is(K: AsmToken::Identifier)) {
4866 SMLoc E = Parser.getTok().getEndLoc();
4867 MCRegister Reg = tryParseRegister();
4868 if (!Reg)
4869 return ParseStatus::NoMatch;
4870 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
4871 ParseStatus Res = parseVectorLane(LaneKind, Index&: LaneIndex, EndLoc&: E);
4872 if (!Res.isSuccess())
4873 return Res;
4874 switch (LaneKind) {
4875 case NoLanes:
4876 Operands.push_back(Elt: ARMOperand::CreateReg(Reg, S, E, Parser&: *this));
4877 break;
4878 case AllLanes:
4879 Operands.push_back(
4880 Elt: ARMOperand::CreateVectorListAllLanes(Reg, Count: 1, isDoubleSpaced: false, S, E, Parser&: *this));
4881 break;
4882 case IndexedLane:
4883 Operands.push_back(Elt: ARMOperand::CreateVectorListIndexed(
4884 Reg, Count: 1, Index: LaneIndex, isDoubleSpaced: false, S, E, Parser&: *this));
4885 break;
4886 }
4887 return ParseStatus::Success;
4888 }
4889 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4890 Reg = getDRegFromQReg(QReg: Reg);
4891 ParseStatus Res = parseVectorLane(LaneKind, Index&: LaneIndex, EndLoc&: E);
4892 if (!Res.isSuccess())
4893 return Res;
4894 switch (LaneKind) {
4895 case NoLanes:
4896 Operands.push_back(Elt: ARMOperand::CreateReg(Reg, S, E, Parser&: *this));
4897 break;
4898 case AllLanes:
4899 Reg = MRI->getMatchingSuperReg(Reg, SubIdx: ARM::dsub_0,
4900 RC: &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4901 Operands.push_back(
4902 Elt: ARMOperand::CreateVectorListAllLanes(Reg, Count: 2, isDoubleSpaced: false, S, E, Parser&: *this));
4903 break;
4904 case IndexedLane:
4905 Operands.push_back(Elt: ARMOperand::CreateVectorListIndexed(
4906 Reg, Count: 2, Index: LaneIndex, isDoubleSpaced: false, S, E, Parser&: *this));
4907 break;
4908 }
4909 return ParseStatus::Success;
4910 }
4911 Operands.push_back(Elt: ARMOperand::CreateReg(Reg, S, E, Parser&: *this));
4912 return ParseStatus::Success;
4913 }
4914
4915 if (Parser.getTok().isNot(K: AsmToken::LCurly))
4916 return ParseStatus::NoMatch;
4917
4918 Parser.Lex(); // Eat '{' token.
4919 SMLoc RegLoc = Parser.getTok().getLoc();
4920
4921 MCRegister Reg = tryParseRegister();
4922 if (!Reg)
4923 return Error(L: RegLoc, Msg: "register expected");
4924 unsigned Count = 1;
4925 int Spacing = 0;
4926 MCRegister FirstReg = Reg;
4927
4928 if (hasMVE() && !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg))
4929 return Error(L: Parser.getTok().getLoc(),
4930 Msg: "vector register in range Q0-Q7 expected");
4931 // The list is of D registers, but we also allow Q regs and just interpret
4932 // them as the two D sub-registers.
4933 else if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4934 FirstReg = Reg = getDRegFromQReg(QReg: Reg);
4935 Spacing = 1; // double-spacing requires explicit D registers, otherwise
4936 // it's ambiguous with four-register single spaced.
4937 Reg = Reg + 1;
4938 ++Count;
4939 }
4940
4941 SMLoc E;
4942 if (!parseVectorLane(LaneKind, Index&: LaneIndex, EndLoc&: E).isSuccess())
4943 return ParseStatus::Failure;
4944
4945 while (Parser.getTok().is(K: AsmToken::Comma) ||
4946 Parser.getTok().is(K: AsmToken::Minus)) {
4947 if (Parser.getTok().is(K: AsmToken::Minus)) {
4948 if (!Spacing)
4949 Spacing = 1; // Register range implies a single spaced list.
4950 else if (Spacing == 2)
4951 return Error(L: Parser.getTok().getLoc(),
4952 Msg: "sequential registers in double spaced list");
4953 Parser.Lex(); // Eat the minus.
4954 SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4955 MCRegister EndReg = tryParseRegister();
4956 if (!EndReg)
4957 return Error(L: AfterMinusLoc, Msg: "register expected");
4958 // Allow Q regs and just interpret them as the two D sub-registers.
4959 if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg: EndReg))
4960 EndReg = getDRegFromQReg(QReg: EndReg) + 1;
4961 // If the register is the same as the start reg, there's nothing
4962 // more to do.
4963 if (Reg == EndReg)
4964 continue;
4965 // The register must be in the same register class as the first.
4966 if ((hasMVE() &&
4967 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg: EndReg)) ||
4968 (!hasMVE() &&
4969 !ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg: EndReg)))
4970 return Error(L: AfterMinusLoc, Msg: "invalid register in register list");
4971 // Ranges must go from low to high.
4972 if (Reg > EndReg)
4973 return Error(L: AfterMinusLoc, Msg: "bad range in register list");
4974 // Parse the lane specifier if present.
4975 VectorLaneTy NextLaneKind;
4976 unsigned NextLaneIndex;
4977 if (!parseVectorLane(LaneKind&: NextLaneKind, Index&: NextLaneIndex, EndLoc&: E).isSuccess())
4978 return ParseStatus::Failure;
4979 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4980 return Error(L: AfterMinusLoc, Msg: "mismatched lane index in register list");
4981
4982 // Add all the registers in the range to the register list.
4983 Count += EndReg - Reg;
4984 Reg = EndReg;
4985 continue;
4986 }
4987 Parser.Lex(); // Eat the comma.
4988 RegLoc = Parser.getTok().getLoc();
4989 MCRegister OldReg = Reg;
4990 Reg = tryParseRegister();
4991 if (!Reg)
4992 return Error(L: RegLoc, Msg: "register expected");
4993
4994 if (hasMVE()) {
4995 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg))
4996 return Error(L: RegLoc, Msg: "vector register in range Q0-Q7 expected");
4997 Spacing = 1;
4998 }
4999 // vector register lists must be contiguous.
5000 // It's OK to use the enumeration values directly here rather, as the
5001 // VFP register classes have the enum sorted properly.
5002 //
5003 // The list is of D registers, but we also allow Q regs and just interpret
5004 // them as the two D sub-registers.
5005 else if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
5006 if (!Spacing)
5007 Spacing = 1; // Register range implies a single spaced list.
5008 else if (Spacing == 2)
5009 return Error(
5010 L: RegLoc,
5011 Msg: "invalid register in double-spaced list (must be 'D' register')");
5012 Reg = getDRegFromQReg(QReg: Reg);
5013 if (Reg != OldReg + 1)
5014 return Error(L: RegLoc, Msg: "non-contiguous register range");
5015 Reg = Reg + 1;
5016 Count += 2;
5017 // Parse the lane specifier if present.
5018 VectorLaneTy NextLaneKind;
5019 unsigned NextLaneIndex;
5020 SMLoc LaneLoc = Parser.getTok().getLoc();
5021 if (!parseVectorLane(LaneKind&: NextLaneKind, Index&: NextLaneIndex, EndLoc&: E).isSuccess())
5022 return ParseStatus::Failure;
5023 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
5024 return Error(L: LaneLoc, Msg: "mismatched lane index in register list");
5025 continue;
5026 }
5027 // Normal D register.
5028 // Figure out the register spacing (single or double) of the list if
5029 // we don't know it already.
5030 if (!Spacing)
5031 Spacing = 1 + (Reg == OldReg + 2);
5032
5033 // Just check that it's contiguous and keep going.
5034 if (Reg != OldReg + Spacing)
5035 return Error(L: RegLoc, Msg: "non-contiguous register range");
5036 ++Count;
5037 // Parse the lane specifier if present.
5038 VectorLaneTy NextLaneKind;
5039 unsigned NextLaneIndex;
5040 SMLoc EndLoc = Parser.getTok().getLoc();
5041 if (!parseVectorLane(LaneKind&: NextLaneKind, Index&: NextLaneIndex, EndLoc&: E).isSuccess())
5042 return ParseStatus::Failure;
5043 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
5044 return Error(L: EndLoc, Msg: "mismatched lane index in register list");
5045 }
5046
5047 if (Parser.getTok().isNot(K: AsmToken::RCurly))
5048 return Error(L: Parser.getTok().getLoc(), Msg: "'}' expected");
5049 E = Parser.getTok().getEndLoc();
5050 Parser.Lex(); // Eat '}' token.
5051
5052 switch (LaneKind) {
5053 case NoLanes:
5054 case AllLanes: {
5055 // Two-register operands have been converted to the
5056 // composite register classes.
5057 if (Count == 2 && !hasMVE()) {
5058 const MCRegisterClass *RC = (Spacing == 1) ?
5059 &ARMMCRegisterClasses[ARM::DPairRegClassID] :
5060 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
5061 FirstReg = MRI->getMatchingSuperReg(Reg: FirstReg, SubIdx: ARM::dsub_0, RC);
5062 }
5063 auto Create = (LaneKind == NoLanes ? ARMOperand::CreateVectorList :
5064 ARMOperand::CreateVectorListAllLanes);
5065 Operands.push_back(Elt: Create(FirstReg, Count, (Spacing == 2), S, E, *this));
5066 break;
5067 }
5068 case IndexedLane:
5069 Operands.push_back(Elt: ARMOperand::CreateVectorListIndexed(
5070 Reg: FirstReg, Count, Index: LaneIndex, isDoubleSpaced: (Spacing == 2), S, E, Parser&: *this));
5071 break;
5072 }
5073 return ParseStatus::Success;
5074}
5075
5076/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
5077ParseStatus ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
5078 MCAsmParser &Parser = getParser();
5079 SMLoc S = Parser.getTok().getLoc();
5080 const AsmToken &Tok = Parser.getTok();
5081 unsigned Opt;
5082
5083 if (Tok.is(K: AsmToken::Identifier)) {
5084 StringRef OptStr = Tok.getString();
5085
5086 Opt = StringSwitch<unsigned>(OptStr.lower())
5087 .Case(S: "sy", Value: ARM_MB::SY)
5088 .Case(S: "st", Value: ARM_MB::ST)
5089 .Case(S: "ld", Value: ARM_MB::LD)
5090 .Case(S: "sh", Value: ARM_MB::ISH)
5091 .Case(S: "ish", Value: ARM_MB::ISH)
5092 .Case(S: "shst", Value: ARM_MB::ISHST)
5093 .Case(S: "ishst", Value: ARM_MB::ISHST)
5094 .Case(S: "ishld", Value: ARM_MB::ISHLD)
5095 .Case(S: "nsh", Value: ARM_MB::NSH)
5096 .Case(S: "un", Value: ARM_MB::NSH)
5097 .Case(S: "nshst", Value: ARM_MB::NSHST)
5098 .Case(S: "nshld", Value: ARM_MB::NSHLD)
5099 .Case(S: "unst", Value: ARM_MB::NSHST)
5100 .Case(S: "osh", Value: ARM_MB::OSH)
5101 .Case(S: "oshst", Value: ARM_MB::OSHST)
5102 .Case(S: "oshld", Value: ARM_MB::OSHLD)
5103 .Default(Value: ~0U);
5104
5105 // ishld, oshld, nshld and ld are only available from ARMv8.
5106 if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
5107 Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
5108 Opt = ~0U;
5109
5110 if (Opt == ~0U)
5111 return ParseStatus::NoMatch;
5112
5113 Parser.Lex(); // Eat identifier token.
5114 } else if (Tok.is(K: AsmToken::Hash) ||
5115 Tok.is(K: AsmToken::Dollar) ||
5116 Tok.is(K: AsmToken::Integer)) {
5117 if (Parser.getTok().isNot(K: AsmToken::Integer))
5118 Parser.Lex(); // Eat '#' or '$'.
5119 SMLoc Loc = Parser.getTok().getLoc();
5120
5121 const MCExpr *MemBarrierID;
5122 if (getParser().parseExpression(Res&: MemBarrierID))
5123 return Error(L: Loc, Msg: "illegal expression");
5124
5125 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: MemBarrierID);
5126 if (!CE)
5127 return Error(L: Loc, Msg: "constant expression expected");
5128
5129 int Val = CE->getValue();
5130 if (Val & ~0xf)
5131 return Error(L: Loc, Msg: "immediate value out of range");
5132
5133 Opt = ARM_MB::RESERVED_0 + Val;
5134 } else
5135 return Error(L: Parser.getTok().getLoc(),
5136 Msg: "expected an immediate or barrier type");
5137
5138 Operands.push_back(
5139 Elt: ARMOperand::CreateMemBarrierOpt(Opt: (ARM_MB::MemBOpt)Opt, S, Parser&: *this));
5140 return ParseStatus::Success;
5141}
5142
5143ParseStatus
5144ARMAsmParser::parseTraceSyncBarrierOptOperand(OperandVector &Operands) {
5145 MCAsmParser &Parser = getParser();
5146 SMLoc S = Parser.getTok().getLoc();
5147 const AsmToken &Tok = Parser.getTok();
5148
5149 if (Tok.isNot(K: AsmToken::Identifier))
5150 return ParseStatus::NoMatch;
5151
5152 if (!Tok.getString().equals_insensitive(RHS: "csync"))
5153 return ParseStatus::NoMatch;
5154
5155 Parser.Lex(); // Eat identifier token.
5156
5157 Operands.push_back(
5158 Elt: ARMOperand::CreateTraceSyncBarrierOpt(Opt: ARM_TSB::CSYNC, S, Parser&: *this));
5159 return ParseStatus::Success;
5160}
5161
5162/// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
5163ParseStatus
5164ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) {
5165 MCAsmParser &Parser = getParser();
5166 SMLoc S = Parser.getTok().getLoc();
5167 const AsmToken &Tok = Parser.getTok();
5168 unsigned Opt;
5169
5170 if (Tok.is(K: AsmToken::Identifier)) {
5171 StringRef OptStr = Tok.getString();
5172
5173 if (OptStr.equals_insensitive(RHS: "sy"))
5174 Opt = ARM_ISB::SY;
5175 else
5176 return ParseStatus::NoMatch;
5177
5178 Parser.Lex(); // Eat identifier token.
5179 } else if (Tok.is(K: AsmToken::Hash) ||
5180 Tok.is(K: AsmToken::Dollar) ||
5181 Tok.is(K: AsmToken::Integer)) {
5182 if (Parser.getTok().isNot(K: AsmToken::Integer))
5183 Parser.Lex(); // Eat '#' or '$'.
5184 SMLoc Loc = Parser.getTok().getLoc();
5185
5186 const MCExpr *ISBarrierID;
5187 if (getParser().parseExpression(Res&: ISBarrierID))
5188 return Error(L: Loc, Msg: "illegal expression");
5189
5190 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: ISBarrierID);
5191 if (!CE)
5192 return Error(L: Loc, Msg: "constant expression expected");
5193
5194 int Val = CE->getValue();
5195 if (Val & ~0xf)
5196 return Error(L: Loc, Msg: "immediate value out of range");
5197
5198 Opt = ARM_ISB::RESERVED_0 + Val;
5199 } else
5200 return Error(L: Parser.getTok().getLoc(),
5201 Msg: "expected an immediate or barrier type");
5202
5203 Operands.push_back(Elt: ARMOperand::CreateInstSyncBarrierOpt(
5204 Opt: (ARM_ISB::InstSyncBOpt)Opt, S, Parser&: *this));
5205 return ParseStatus::Success;
5206}
5207
5208/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
5209ParseStatus ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) {
5210 MCAsmParser &Parser = getParser();
5211 SMLoc S = Parser.getTok().getLoc();
5212 const AsmToken &Tok = Parser.getTok();
5213 if (!Tok.is(K: AsmToken::Identifier))
5214 return ParseStatus::NoMatch;
5215 StringRef IFlagsStr = Tok.getString();
5216
5217 // An iflags string of "none" is interpreted to mean that none of the AIF
5218 // bits are set. Not a terribly useful instruction, but a valid encoding.
5219 unsigned IFlags = 0;
5220 if (IFlagsStr != "none") {
5221 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
5222 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(Start: i, N: 1).lower())
5223 .Case(S: "a", Value: ARM_PROC::A)
5224 .Case(S: "i", Value: ARM_PROC::I)
5225 .Case(S: "f", Value: ARM_PROC::F)
5226 .Default(Value: ~0U);
5227
5228 // If some specific iflag is already set, it means that some letter is
5229 // present more than once, this is not acceptable.
5230 if (Flag == ~0U || (IFlags & Flag))
5231 return ParseStatus::NoMatch;
5232
5233 IFlags |= Flag;
5234 }
5235 }
5236
5237 Parser.Lex(); // Eat identifier token.
5238 Operands.push_back(
5239 Elt: ARMOperand::CreateProcIFlags(IFlags: (ARM_PROC::IFlags)IFlags, S, Parser&: *this));
5240 return ParseStatus::Success;
5241}
5242
5243/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
5244ParseStatus ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
5245 // Don't parse two MSR registers in a row
5246 if (static_cast<ARMOperand &>(*Operands.back()).isMSRMask() ||
5247 static_cast<ARMOperand &>(*Operands.back()).isBankedReg())
5248 return ParseStatus::NoMatch;
5249 MCAsmParser &Parser = getParser();
5250 SMLoc S = Parser.getTok().getLoc();
5251 const AsmToken &Tok = Parser.getTok();
5252
5253 if (Tok.is(K: AsmToken::Integer)) {
5254 int64_t Val = Tok.getIntVal();
5255 if (Val > 255 || Val < 0) {
5256 return ParseStatus::NoMatch;
5257 }
5258 unsigned SYSmvalue = Val & 0xFF;
5259 Parser.Lex();
5260 Operands.push_back(Elt: ARMOperand::CreateMSRMask(MMask: SYSmvalue, S, Parser&: *this));
5261 return ParseStatus::Success;
5262 }
5263
5264 if (!Tok.is(K: AsmToken::Identifier))
5265 return ParseStatus::NoMatch;
5266 StringRef Mask = Tok.getString();
5267
5268 if (isMClass()) {
5269 auto TheReg = ARMSysReg::lookupMClassSysRegByName(Name: Mask.lower());
5270 if (!TheReg || !TheReg->hasRequiredFeatures(ActiveFeatures: getSTI().getFeatureBits()))
5271 return ParseStatus::NoMatch;
5272
5273 unsigned SYSmvalue = TheReg->Encoding & 0xFFF;
5274
5275 Parser.Lex(); // Eat identifier token.
5276 Operands.push_back(Elt: ARMOperand::CreateMSRMask(MMask: SYSmvalue, S, Parser&: *this));
5277 return ParseStatus::Success;
5278 }
5279
5280 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
5281 size_t Start = 0, Next = Mask.find(C: '_');
5282 StringRef Flags = "";
5283 std::string SpecReg = Mask.slice(Start, End: Next).lower();
5284 if (Next != StringRef::npos)
5285 Flags = Mask.substr(Start: Next + 1);
5286
5287 // FlagsVal contains the complete mask:
5288 // 3-0: Mask
5289 // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
5290 unsigned FlagsVal = 0;
5291
5292 if (SpecReg == "apsr") {
5293 FlagsVal = StringSwitch<unsigned>(Flags)
5294 .Case(S: "nzcvq", Value: 0x8) // same as CPSR_f
5295 .Case(S: "g", Value: 0x4) // same as CPSR_s
5296 .Case(S: "nzcvqg", Value: 0xc) // same as CPSR_fs
5297 .Default(Value: ~0U);
5298
5299 if (FlagsVal == ~0U) {
5300 if (!Flags.empty())
5301 return ParseStatus::NoMatch;
5302 else
5303 FlagsVal = 8; // No flag
5304 }
5305 } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
5306 // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
5307 if (Flags == "all" || Flags == "")
5308 Flags = "fc";
5309 for (int i = 0, e = Flags.size(); i != e; ++i) {
5310 unsigned Flag = StringSwitch<unsigned>(Flags.substr(Start: i, N: 1))
5311 .Case(S: "c", Value: 1)
5312 .Case(S: "x", Value: 2)
5313 .Case(S: "s", Value: 4)
5314 .Case(S: "f", Value: 8)
5315 .Default(Value: ~0U);
5316
5317 // If some specific flag is already set, it means that some letter is
5318 // present more than once, this is not acceptable.
5319 if (Flag == ~0U || (FlagsVal & Flag))
5320 return ParseStatus::NoMatch;
5321 FlagsVal |= Flag;
5322 }
5323 } else // No match for special register.
5324 return ParseStatus::NoMatch;
5325
5326 // Special register without flags is NOT equivalent to "fc" flags.
5327 // NOTE: This is a divergence from gas' behavior. Uncommenting the following
5328 // two lines would enable gas compatibility at the expense of breaking
5329 // round-tripping.
5330 //
5331 // if (!FlagsVal)
5332 // FlagsVal = 0x9;
5333
5334 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
5335 if (SpecReg == "spsr")
5336 FlagsVal |= 16;
5337
5338 Parser.Lex(); // Eat identifier token.
5339 Operands.push_back(Elt: ARMOperand::CreateMSRMask(MMask: FlagsVal, S, Parser&: *this));
5340 return ParseStatus::Success;
5341}
5342
5343/// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for
5344/// use in the MRS/MSR instructions added to support virtualization.
5345ParseStatus ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) {
5346 // Don't parse two Banked registers in a row
5347 if (static_cast<ARMOperand &>(*Operands.back()).isBankedReg() ||
5348 static_cast<ARMOperand &>(*Operands.back()).isMSRMask())
5349 return ParseStatus::NoMatch;
5350 MCAsmParser &Parser = getParser();
5351 SMLoc S = Parser.getTok().getLoc();
5352 const AsmToken &Tok = Parser.getTok();
5353 if (!Tok.is(K: AsmToken::Identifier))
5354 return ParseStatus::NoMatch;
5355 StringRef RegName = Tok.getString();
5356
5357 auto TheReg = ARMBankedReg::lookupBankedRegByName(Name: RegName.lower());
5358 if (!TheReg)
5359 return ParseStatus::NoMatch;
5360 unsigned Encoding = TheReg->Encoding;
5361
5362 Parser.Lex(); // Eat identifier token.
5363 Operands.push_back(Elt: ARMOperand::CreateBankedReg(Reg: Encoding, S, Parser&: *this));
5364 return ParseStatus::Success;
5365}
5366
5367// FIXME: Unify the different methods for handling shift operators
5368// and use TableGen matching mechanisms to do the validation rather than
5369// separate parsing paths.
5370ParseStatus ARMAsmParser::parsePKHImm(OperandVector &Operands,
5371 ARM_AM::ShiftOpc Op, int Low, int High) {
5372 MCAsmParser &Parser = getParser();
5373 auto ShiftCodeOpt = tryParseShiftToken();
5374
5375 if (!ShiftCodeOpt.has_value())
5376 return ParseStatus::NoMatch;
5377 auto ShiftCode = ShiftCodeOpt.value();
5378
5379 // The wrong shift code has been provided. Can error here as has matched the
5380 // correct operand in this case.
5381 if (ShiftCode != Op)
5382 return Error(L: Parser.getTok().getLoc(),
5383 Msg: ARM_AM::getShiftOpcStr(Op) + " operand expected.");
5384
5385 Parser.Lex(); // Eat shift type token.
5386
5387 // There must be a '#' and a shift amount.
5388 if (Parser.getTok().isNot(K: AsmToken::Hash) &&
5389 Parser.getTok().isNot(K: AsmToken::Dollar))
5390 return ParseStatus::NoMatch;
5391 Parser.Lex(); // Eat hash token.
5392
5393 const MCExpr *ShiftAmount;
5394 SMLoc Loc = Parser.getTok().getLoc();
5395 SMLoc EndLoc;
5396 if (getParser().parseExpression(Res&: ShiftAmount, EndLoc))
5397 return Error(L: Loc, Msg: "illegal expression");
5398 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: ShiftAmount);
5399 if (!CE)
5400 return Error(L: Loc, Msg: "constant expression expected");
5401 int Val = CE->getValue();
5402 if (Val < Low || Val > High)
5403 return Error(L: Loc, Msg: "immediate value out of range");
5404
5405 Operands.push_back(Elt: ARMOperand::CreateImm(Val: CE, S: Loc, E: EndLoc, Parser&: *this));
5406
5407 return ParseStatus::Success;
5408}
5409
5410ParseStatus ARMAsmParser::parseSetEndImm(OperandVector &Operands) {
5411 MCAsmParser &Parser = getParser();
5412 const AsmToken &Tok = Parser.getTok();
5413 SMLoc S = Tok.getLoc();
5414 if (Tok.isNot(K: AsmToken::Identifier))
5415 return Error(L: S, Msg: "'be' or 'le' operand expected");
5416 int Val = StringSwitch<int>(Tok.getString().lower())
5417 .Case(S: "be", Value: 1)
5418 .Case(S: "le", Value: 0)
5419 .Default(Value: -1);
5420 Parser.Lex(); // Eat the token.
5421
5422 if (Val == -1)
5423 return Error(L: S, Msg: "'be' or 'le' operand expected");
5424 Operands.push_back(Elt: ARMOperand::CreateImm(
5425 Val: MCConstantExpr::create(Value: Val, Ctx&: getContext()), S, E: Tok.getEndLoc(), Parser&: *this));
5426 return ParseStatus::Success;
5427}
5428
5429/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
5430/// instructions. Legal values are:
5431/// lsl #n 'n' in [0,31]
5432/// asr #n 'n' in [1,32]
5433/// n == 32 encoded as n == 0.
5434ParseStatus ARMAsmParser::parseShifterImm(OperandVector &Operands) {
5435 MCAsmParser &Parser = getParser();
5436 const AsmToken &Tok = Parser.getTok();
5437 SMLoc S = Tok.getLoc();
5438 if (Tok.isNot(K: AsmToken::Identifier))
5439 return ParseStatus::NoMatch;
5440 StringRef ShiftName = Tok.getString();
5441 bool isASR;
5442 if (ShiftName == "lsl" || ShiftName == "LSL")
5443 isASR = false;
5444 else if (ShiftName == "asr" || ShiftName == "ASR")
5445 isASR = true;
5446 else
5447 return ParseStatus::NoMatch;
5448 Parser.Lex(); // Eat the operator.
5449
5450 // A '#' and a shift amount.
5451 if (Parser.getTok().isNot(K: AsmToken::Hash) &&
5452 Parser.getTok().isNot(K: AsmToken::Dollar))
5453 return Error(L: Parser.getTok().getLoc(), Msg: "'#' expected");
5454 Parser.Lex(); // Eat hash token.
5455 SMLoc ExLoc = Parser.getTok().getLoc();
5456
5457 const MCExpr *ShiftAmount;
5458 SMLoc EndLoc;
5459 if (getParser().parseExpression(Res&: ShiftAmount, EndLoc))
5460 return Error(L: ExLoc, Msg: "malformed shift expression");
5461 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: ShiftAmount);
5462 if (!CE)
5463 return Error(L: ExLoc, Msg: "shift amount must be an immediate");
5464
5465 int64_t Val = CE->getValue();
5466 if (isASR) {
5467 // Shift amount must be in [1,32]
5468 if (Val < 1 || Val > 32)
5469 return Error(L: ExLoc, Msg: "'asr' shift amount must be in range [1,32]");
5470 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
5471 if (isThumb() && Val == 32)
5472 return Error(L: ExLoc, Msg: "'asr #32' shift amount not allowed in Thumb mode");
5473 if (Val == 32) Val = 0;
5474 } else {
5475 // Shift amount must be in [1,32]
5476 if (Val < 0 || Val > 31)
5477 return Error(L: ExLoc, Msg: "'lsr' shift amount must be in range [0,31]");
5478 }
5479
5480 Operands.push_back(
5481 Elt: ARMOperand::CreateShifterImm(isASR, Imm: Val, S, E: EndLoc, Parser&: *this));
5482
5483 return ParseStatus::Success;
5484}
5485
5486/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
5487/// of instructions. Legal values are:
5488/// ror #n 'n' in {0, 8, 16, 24}
5489ParseStatus ARMAsmParser::parseRotImm(OperandVector &Operands) {
5490 MCAsmParser &Parser = getParser();
5491 const AsmToken &Tok = Parser.getTok();
5492 SMLoc S = Tok.getLoc();
5493 if (Tok.isNot(K: AsmToken::Identifier))
5494 return ParseStatus::NoMatch;
5495 StringRef ShiftName = Tok.getString();
5496 if (ShiftName != "ror" && ShiftName != "ROR")
5497 return ParseStatus::NoMatch;
5498 Parser.Lex(); // Eat the operator.
5499
5500 // A '#' and a rotate amount.
5501 if (Parser.getTok().isNot(K: AsmToken::Hash) &&
5502 Parser.getTok().isNot(K: AsmToken::Dollar))
5503 return Error(L: Parser.getTok().getLoc(), Msg: "'#' expected");
5504 Parser.Lex(); // Eat hash token.
5505 SMLoc ExLoc = Parser.getTok().getLoc();
5506
5507 const MCExpr *ShiftAmount;
5508 SMLoc EndLoc;
5509 if (getParser().parseExpression(Res&: ShiftAmount, EndLoc))
5510 return Error(L: ExLoc, Msg: "malformed rotate expression");
5511 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: ShiftAmount);
5512 if (!CE)
5513 return Error(L: ExLoc, Msg: "rotate amount must be an immediate");
5514
5515 int64_t Val = CE->getValue();
5516 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
5517 // normally, zero is represented in asm by omitting the rotate operand
5518 // entirely.
5519 if (Val != 8 && Val != 16 && Val != 24 && Val != 0)
5520 return Error(L: ExLoc, Msg: "'ror' rotate amount must be 8, 16, or 24");
5521
5522 Operands.push_back(Elt: ARMOperand::CreateRotImm(Imm: Val, S, E: EndLoc, Parser&: *this));
5523
5524 return ParseStatus::Success;
5525}
5526
5527ParseStatus ARMAsmParser::parseModImm(OperandVector &Operands) {
5528 MCAsmParser &Parser = getParser();
5529 AsmLexer &Lexer = getLexer();
5530 int64_t Imm1, Imm2;
5531
5532 SMLoc S = Parser.getTok().getLoc();
5533
5534 // 1) A mod_imm operand can appear in the place of a register name:
5535 // add r0, #mod_imm
5536 // add r0, r0, #mod_imm
5537 // to correctly handle the latter, we bail out as soon as we see an
5538 // identifier.
5539 //
5540 // 2) Similarly, we do not want to parse into complex operands:
5541 // mov r0, #mod_imm
5542 // mov r0, :lower16:(_foo)
5543 if (Parser.getTok().is(K: AsmToken::Identifier) ||
5544 Parser.getTok().is(K: AsmToken::Colon))
5545 return ParseStatus::NoMatch;
5546
5547 // Hash (dollar) is optional as per the ARMARM
5548 if (Parser.getTok().is(K: AsmToken::Hash) ||
5549 Parser.getTok().is(K: AsmToken::Dollar)) {
5550 // Avoid parsing into complex operands (#:)
5551 if (Lexer.peekTok().is(K: AsmToken::Colon))
5552 return ParseStatus::NoMatch;
5553
5554 // Eat the hash (dollar)
5555 Parser.Lex();
5556 }
5557
5558 SMLoc Sx1, Ex1;
5559 Sx1 = Parser.getTok().getLoc();
5560 const MCExpr *Imm1Exp;
5561 if (getParser().parseExpression(Res&: Imm1Exp, EndLoc&: Ex1))
5562 return Error(L: Sx1, Msg: "malformed expression");
5563
5564 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Imm1Exp);
5565
5566 if (CE) {
5567 // Immediate must fit within 32-bits
5568 Imm1 = CE->getValue();
5569 int Enc = ARM_AM::getSOImmVal(Arg: Imm1);
5570 if (Enc != -1 && Parser.getTok().is(K: AsmToken::EndOfStatement)) {
5571 // We have a match!
5572 Operands.push_back(Elt: ARMOperand::CreateModImm(
5573 Bits: (Enc & 0xFF), Rot: (Enc & 0xF00) >> 7, S: Sx1, E: Ex1, Parser&: *this));
5574 return ParseStatus::Success;
5575 }
5576
5577 // We have parsed an immediate which is not for us, fallback to a plain
5578 // immediate. This can happen for instruction aliases. For an example,
5579 // ARMInstrInfo.td defines the alias [mov <-> mvn] which can transform
5580 // a mov (mvn) with a mod_imm_neg/mod_imm_not operand into the opposite
5581 // instruction with a mod_imm operand. The alias is defined such that the
5582 // parser method is shared, that's why we have to do this here.
5583 if (Parser.getTok().is(K: AsmToken::EndOfStatement)) {
5584 Operands.push_back(Elt: ARMOperand::CreateImm(Val: Imm1Exp, S: Sx1, E: Ex1, Parser&: *this));
5585 return ParseStatus::Success;
5586 }
5587 } else {
5588 // Operands like #(l1 - l2) can only be evaluated at a later stage (via an
5589 // MCFixup). Fallback to a plain immediate.
5590 Operands.push_back(Elt: ARMOperand::CreateImm(Val: Imm1Exp, S: Sx1, E: Ex1, Parser&: *this));
5591 return ParseStatus::Success;
5592 }
5593
5594 // From this point onward, we expect the input to be a (#bits, #rot) pair
5595 if (Parser.getTok().isNot(K: AsmToken::Comma))
5596 return Error(L: Sx1,
5597 Msg: "expected modified immediate operand: #[0, 255], #even[0-30]");
5598
5599 if (Imm1 & ~0xFF)
5600 return Error(L: Sx1, Msg: "immediate operand must a number in the range [0, 255]");
5601
5602 // Eat the comma
5603 Parser.Lex();
5604
5605 // Repeat for #rot
5606 SMLoc Sx2, Ex2;
5607 Sx2 = Parser.getTok().getLoc();
5608
5609 // Eat the optional hash (dollar)
5610 if (Parser.getTok().is(K: AsmToken::Hash) ||
5611 Parser.getTok().is(K: AsmToken::Dollar))
5612 Parser.Lex();
5613
5614 const MCExpr *Imm2Exp;
5615 if (getParser().parseExpression(Res&: Imm2Exp, EndLoc&: Ex2))
5616 return Error(L: Sx2, Msg: "malformed expression");
5617
5618 CE = dyn_cast<MCConstantExpr>(Val: Imm2Exp);
5619
5620 if (CE) {
5621 Imm2 = CE->getValue();
5622 if (!(Imm2 & ~0x1E)) {
5623 // We have a match!
5624 Operands.push_back(Elt: ARMOperand::CreateModImm(Bits: Imm1, Rot: Imm2, S, E: Ex2, Parser&: *this));
5625 return ParseStatus::Success;
5626 }
5627 return Error(L: Sx2,
5628 Msg: "immediate operand must an even number in the range [0, 30]");
5629 } else {
5630 return Error(L: Sx2, Msg: "constant expression expected");
5631 }
5632}
5633
5634ParseStatus ARMAsmParser::parseBitfield(OperandVector &Operands) {
5635 MCAsmParser &Parser = getParser();
5636 SMLoc S = Parser.getTok().getLoc();
5637 // The bitfield descriptor is really two operands, the LSB and the width.
5638 if (Parser.getTok().isNot(K: AsmToken::Hash) &&
5639 Parser.getTok().isNot(K: AsmToken::Dollar))
5640 return ParseStatus::NoMatch;
5641 Parser.Lex(); // Eat hash token.
5642
5643 const MCExpr *LSBExpr;
5644 SMLoc E = Parser.getTok().getLoc();
5645 if (getParser().parseExpression(Res&: LSBExpr))
5646 return Error(L: E, Msg: "malformed immediate expression");
5647 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: LSBExpr);
5648 if (!CE)
5649 return Error(L: E, Msg: "'lsb' operand must be an immediate");
5650
5651 int64_t LSB = CE->getValue();
5652 // The LSB must be in the range [0,31]
5653 if (LSB < 0 || LSB > 31)
5654 return Error(L: E, Msg: "'lsb' operand must be in the range [0,31]");
5655 E = Parser.getTok().getLoc();
5656
5657 // Expect another immediate operand.
5658 if (Parser.getTok().isNot(K: AsmToken::Comma))
5659 return Error(L: Parser.getTok().getLoc(), Msg: "too few operands");
5660 Parser.Lex(); // Eat hash token.
5661 if (Parser.getTok().isNot(K: AsmToken::Hash) &&
5662 Parser.getTok().isNot(K: AsmToken::Dollar))
5663 return Error(L: Parser.getTok().getLoc(), Msg: "'#' expected");
5664 Parser.Lex(); // Eat hash token.
5665
5666 const MCExpr *WidthExpr;
5667 SMLoc EndLoc;
5668 if (getParser().parseExpression(Res&: WidthExpr, EndLoc))
5669 return Error(L: E, Msg: "malformed immediate expression");
5670 CE = dyn_cast<MCConstantExpr>(Val: WidthExpr);
5671 if (!CE)
5672 return Error(L: E, Msg: "'width' operand must be an immediate");
5673
5674 int64_t Width = CE->getValue();
5675 // The LSB must be in the range [1,32-lsb]
5676 if (Width < 1 || Width > 32 - LSB)
5677 return Error(L: E, Msg: "'width' operand must be in the range [1,32-lsb]");
5678
5679 Operands.push_back(Elt: ARMOperand::CreateBitfield(LSB, Width, S, E: EndLoc, Parser&: *this));
5680
5681 return ParseStatus::Success;
5682}
5683
5684ParseStatus ARMAsmParser::parsePostIdxReg(OperandVector &Operands) {
5685 // Check for a post-index addressing register operand. Specifically:
5686 // postidx_reg := '+' register {, shift}
5687 // | '-' register {, shift}
5688 // | register {, shift}
5689
5690 // This method must return ParseStatus::NoMatch without consuming any tokens
5691 // in the case where there is no match, as other alternatives take other
5692 // parse methods.
5693 MCAsmParser &Parser = getParser();
5694 AsmToken Tok = Parser.getTok();
5695 SMLoc S = Tok.getLoc();
5696 bool haveEaten = false;
5697 bool isAdd = true;
5698 if (Tok.is(K: AsmToken::Plus)) {
5699 Parser.Lex(); // Eat the '+' token.
5700 haveEaten = true;
5701 } else if (Tok.is(K: AsmToken::Minus)) {
5702 Parser.Lex(); // Eat the '-' token.
5703 isAdd = false;
5704 haveEaten = true;
5705 }
5706
5707 SMLoc E = Parser.getTok().getEndLoc();
5708 MCRegister Reg = tryParseRegister();
5709 if (!Reg) {
5710 if (!haveEaten)
5711 return ParseStatus::NoMatch;
5712 return Error(L: Parser.getTok().getLoc(), Msg: "register expected");
5713 }
5714
5715 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
5716 unsigned ShiftImm = 0;
5717 if (Parser.getTok().is(K: AsmToken::Comma)) {
5718 Parser.Lex(); // Eat the ','.
5719 if (parseMemRegOffsetShift(ShiftType&: ShiftTy, ShiftAmount&: ShiftImm))
5720 return ParseStatus::Failure;
5721
5722 // FIXME: Only approximates end...may include intervening whitespace.
5723 E = Parser.getTok().getLoc();
5724 }
5725
5726 Operands.push_back(
5727 Elt: ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, ShiftImm, S, E, Parser&: *this));
5728
5729 return ParseStatus::Success;
5730}
5731
5732ParseStatus ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
5733 // Check for a post-index addressing register operand. Specifically:
5734 // am3offset := '+' register
5735 // | '-' register
5736 // | register
5737 // | # imm
5738 // | # + imm
5739 // | # - imm
5740
5741 // This method must return ParseStatus::NoMatch without consuming any tokens
5742 // in the case where there is no match, as other alternatives take other
5743 // parse methods.
5744 MCAsmParser &Parser = getParser();
5745 AsmToken Tok = Parser.getTok();
5746 SMLoc S = Tok.getLoc();
5747
5748 // Do immediates first, as we always parse those if we have a '#'.
5749 if (Parser.getTok().is(K: AsmToken::Hash) ||
5750 Parser.getTok().is(K: AsmToken::Dollar)) {
5751 Parser.Lex(); // Eat '#' or '$'.
5752 // Explicitly look for a '-', as we need to encode negative zero
5753 // differently.
5754 bool isNegative = Parser.getTok().is(K: AsmToken::Minus);
5755 const MCExpr *Offset;
5756 SMLoc E;
5757 if (getParser().parseExpression(Res&: Offset, EndLoc&: E))
5758 return ParseStatus::Failure;
5759 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Offset);
5760 if (!CE)
5761 return Error(L: S, Msg: "constant expression expected");
5762 // Negative zero is encoded as the flag value
5763 // std::numeric_limits<int32_t>::min().
5764 int32_t Val = CE->getValue();
5765 if (isNegative && Val == 0)
5766 Val = std::numeric_limits<int32_t>::min();
5767
5768 Operands.push_back(Elt: ARMOperand::CreateImm(
5769 Val: MCConstantExpr::create(Value: Val, Ctx&: getContext()), S, E, Parser&: *this));
5770
5771 return ParseStatus::Success;
5772 }
5773
5774 bool haveEaten = false;
5775 bool isAdd = true;
5776 if (Tok.is(K: AsmToken::Plus)) {
5777 Parser.Lex(); // Eat the '+' token.
5778 haveEaten = true;
5779 } else if (Tok.is(K: AsmToken::Minus)) {
5780 Parser.Lex(); // Eat the '-' token.
5781 isAdd = false;
5782 haveEaten = true;
5783 }
5784
5785 Tok = Parser.getTok();
5786 MCRegister Reg = tryParseRegister();
5787 if (!Reg) {
5788 if (!haveEaten)
5789 return ParseStatus::NoMatch;
5790 return Error(L: Tok.getLoc(), Msg: "register expected");
5791 }
5792
5793 Operands.push_back(Elt: ARMOperand::CreatePostIdxReg(
5794 Reg, isAdd, ShiftTy: ARM_AM::no_shift, ShiftImm: 0, S, E: Tok.getEndLoc(), Parser&: *this));
5795
5796 return ParseStatus::Success;
5797}
5798
5799// Finds the index of the first CondCode operator, if there is none returns 0
5800unsigned findCondCodeInd(const OperandVector &Operands,
5801 unsigned MnemonicOpsEndInd) {
5802 for (unsigned I = 1; I < MnemonicOpsEndInd; ++I) {
5803 auto Op = static_cast<ARMOperand &>(*Operands[I]);
5804 if (Op.isCondCode())
5805 return I;
5806 }
5807 return 0;
5808}
5809
5810unsigned findCCOutInd(const OperandVector &Operands,
5811 unsigned MnemonicOpsEndInd) {
5812 for (unsigned I = 1; I < MnemonicOpsEndInd; ++I) {
5813 auto Op = static_cast<ARMOperand &>(*Operands[I]);
5814 if (Op.isCCOut())
5815 return I;
5816 }
5817 return 0;
5818}
5819
5820/// Convert parsed operands to MCInst. Needed here because this instruction
5821/// only has two register operands, but multiplication is commutative so
5822/// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
5823void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
5824 const OperandVector &Operands) {
5825 unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
5826 unsigned CondI = findCondCodeInd(Operands, MnemonicOpsEndInd);
5827 unsigned CondOutI = findCCOutInd(Operands, MnemonicOpsEndInd);
5828
5829 // 2 operand form
5830 unsigned RegRd = MnemonicOpsEndInd;
5831 unsigned RegRn = MnemonicOpsEndInd + 1;
5832 unsigned RegRm = MnemonicOpsEndInd;
5833
5834 if (Operands.size() == MnemonicOpsEndInd + 3) {
5835 // If we have a three-operand form, make sure to set Rn to be the operand
5836 // that isn't the same as Rd.
5837 if (((ARMOperand &)*Operands[RegRd]).getReg() ==
5838 ((ARMOperand &)*Operands[MnemonicOpsEndInd + 1]).getReg()) {
5839 RegRn = MnemonicOpsEndInd + 2;
5840 RegRm = MnemonicOpsEndInd + 1;
5841 } else {
5842 RegRn = MnemonicOpsEndInd + 1;
5843 RegRm = MnemonicOpsEndInd + 2;
5844 }
5845 }
5846
5847 // Rd
5848 ((ARMOperand &)*Operands[RegRd]).addRegOperands(Inst, N: 1);
5849 // CCOut
5850 if (CondOutI != 0) {
5851 ((ARMOperand &)*Operands[CondOutI]).addCCOutOperands(Inst, N: 1);
5852 } else {
5853 ARMOperand Op =
5854 *ARMOperand::CreateCCOut(Reg: 0, S: Operands[0]->getEndLoc(), Parser&: *this);
5855 Op.addCCOutOperands(Inst, N: 1);
5856 }
5857 // Rn
5858 ((ARMOperand &)*Operands[RegRn]).addRegOperands(Inst, N: 1);
5859 // Rm
5860 ((ARMOperand &)*Operands[RegRm]).addRegOperands(Inst, N: 1);
5861
5862 // Cond code
5863 if (CondI != 0) {
5864 ((ARMOperand &)*Operands[CondI]).addCondCodeOperands(Inst, N: 2);
5865 } else {
5866 ARMOperand Op = *ARMOperand::CreateCondCode(
5867 CC: llvm::ARMCC::AL, S: Operands[0]->getEndLoc(), Parser&: *this);
5868 Op.addCondCodeOperands(Inst, N: 2);
5869 }
5870}
5871
5872void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
5873 const OperandVector &Operands) {
5874 unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
5875 unsigned CondI = findCondCodeInd(Operands, MnemonicOpsEndInd);
5876 unsigned Cond =
5877 (CondI == 0 ? ARMCC::AL
5878 : static_cast<ARMOperand &>(*Operands[CondI]).getCondCode());
5879
5880 // first decide whether or not the branch should be conditional
5881 // by looking at it's location relative to an IT block
5882 if(inITBlock()) {
5883 // inside an IT block we cannot have any conditional branches. any
5884 // such instructions needs to be converted to unconditional form
5885 switch(Inst.getOpcode()) {
5886 case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
5887 case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
5888 }
5889 } else {
5890 switch(Inst.getOpcode()) {
5891 case ARM::tB:
5892 case ARM::tBcc:
5893 Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
5894 break;
5895 case ARM::t2B:
5896 case ARM::t2Bcc:
5897 Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
5898 break;
5899 }
5900 }
5901
5902 // now decide on encoding size based on branch target range
5903 switch(Inst.getOpcode()) {
5904 // classify tB as either t2B or t1B based on range of immediate operand
5905 case ARM::tB: {
5906 ARMOperand &op = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]);
5907 if (!op.isSignedOffset<11, 1>() && isThumb() && hasV8MBaseline())
5908 Inst.setOpcode(ARM::t2B);
5909 break;
5910 }
5911 // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
5912 case ARM::tBcc: {
5913 ARMOperand &op = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]);
5914 if (!op.isSignedOffset<8, 1>() && isThumb() && hasV8MBaseline())
5915 Inst.setOpcode(ARM::t2Bcc);
5916 break;
5917 }
5918 }
5919 ((ARMOperand &)*Operands[MnemonicOpsEndInd]).addImmOperands(Inst, N: 1);
5920 if (CondI != 0) {
5921 ((ARMOperand &)*Operands[CondI]).addCondCodeOperands(Inst, N: 2);
5922 } else {
5923 ARMOperand Op = *ARMOperand::CreateCondCode(
5924 CC: llvm::ARMCC::AL, S: Operands[0]->getEndLoc(), Parser&: *this);
5925 Op.addCondCodeOperands(Inst, N: 2);
5926 }
5927}
5928
5929void ARMAsmParser::cvtMVEVMOVQtoDReg(
5930 MCInst &Inst, const OperandVector &Operands) {
5931
5932 unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
5933 unsigned CondI = findCondCodeInd(Operands, MnemonicOpsEndInd);
5934
5935 // mnemonic, condition code, Rt, Rt2, Qd, idx, Qd again, idx2
5936 assert(Operands.size() == MnemonicOpsEndInd + 6);
5937
5938 ((ARMOperand &)*Operands[MnemonicOpsEndInd]).addRegOperands(Inst, N: 1); // Rt
5939 ((ARMOperand &)*Operands[MnemonicOpsEndInd + 1])
5940 .addRegOperands(Inst, N: 1); // Rt2
5941 ((ARMOperand &)*Operands[MnemonicOpsEndInd + 2])
5942 .addRegOperands(Inst, N: 1); // Qd
5943 ((ARMOperand &)*Operands[MnemonicOpsEndInd + 3])
5944 .addMVEPairVectorIndexOperands(Inst, N: 1); // idx
5945 // skip second copy of Qd in Operands[6]
5946 ((ARMOperand &)*Operands[MnemonicOpsEndInd + 5])
5947 .addMVEPairVectorIndexOperands(Inst, N: 1); // idx2
5948 if (CondI != 0) {
5949 ((ARMOperand &)*Operands[CondI])
5950 .addCondCodeOperands(Inst, N: 2); // condition code
5951 } else {
5952 ARMOperand Op =
5953 *ARMOperand::CreateCondCode(CC: ARMCC::AL, S: Operands[0]->getEndLoc(), Parser&: *this);
5954 Op.addCondCodeOperands(Inst, N: 2);
5955 }
5956}
5957
5958/// Parse an ARM memory expression, return false if successful else return true
5959/// or an error. The first token must be a '[' when called.
5960bool ARMAsmParser::parseMemory(OperandVector &Operands) {
5961 MCAsmParser &Parser = getParser();
5962 SMLoc S, E;
5963 if (Parser.getTok().isNot(K: AsmToken::LBrac))
5964 return TokError(Msg: "Token is not a Left Bracket");
5965 S = Parser.getTok().getLoc();
5966 Parser.Lex(); // Eat left bracket token.
5967
5968 const AsmToken &BaseRegTok = Parser.getTok();
5969 MCRegister BaseReg = tryParseRegister();
5970 if (!BaseReg)
5971 return Error(L: BaseRegTok.getLoc(), Msg: "register expected");
5972
5973 // The next token must either be a comma, a colon or a closing bracket.
5974 const AsmToken &Tok = Parser.getTok();
5975 if (!Tok.is(K: AsmToken::Colon) && !Tok.is(K: AsmToken::Comma) &&
5976 !Tok.is(K: AsmToken::RBrac))
5977 return Error(L: Tok.getLoc(), Msg: "malformed memory operand");
5978
5979 if (Tok.is(K: AsmToken::RBrac)) {
5980 E = Tok.getEndLoc();
5981 Parser.Lex(); // Eat right bracket token.
5982
5983 Operands.push_back(Elt: ARMOperand::CreateMem(
5984 BaseReg, OffsetImm: nullptr, OffsetReg: 0, ShiftType: ARM_AM::no_shift, ShiftImm: 0, Alignment: 0, isNegative: false, S, E, Parser&: *this));
5985
5986 // If there's a pre-indexing writeback marker, '!', just add it as a token
5987 // operand. It's rather odd, but syntactically valid.
5988 if (Parser.getTok().is(K: AsmToken::Exclaim)) {
5989 Operands.push_back(
5990 Elt: ARMOperand::CreateToken(Str: "!", S: Parser.getTok().getLoc(), Parser&: *this));
5991 Parser.Lex(); // Eat the '!'.
5992 }
5993
5994 return false;
5995 }
5996
5997 assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
5998 "Lost colon or comma in memory operand?!");
5999 if (Tok.is(K: AsmToken::Comma)) {
6000 Parser.Lex(); // Eat the comma.
6001 }
6002
6003 // If we have a ':', it's an alignment specifier.
6004 if (Parser.getTok().is(K: AsmToken::Colon)) {
6005 Parser.Lex(); // Eat the ':'.
6006 E = Parser.getTok().getLoc();
6007 SMLoc AlignmentLoc = Tok.getLoc();
6008
6009 const MCExpr *Expr;
6010 if (getParser().parseExpression(Res&: Expr))
6011 return true;
6012
6013 // The expression has to be a constant. Memory references with relocations
6014 // don't come through here, as they use the <label> forms of the relevant
6015 // instructions.
6016 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Expr);
6017 if (!CE)
6018 return Error (L: E, Msg: "constant expression expected");
6019
6020 unsigned Align = 0;
6021 switch (CE->getValue()) {
6022 default:
6023 return Error(L: E,
6024 Msg: "alignment specifier must be 16, 32, 64, 128, or 256 bits");
6025 case 16: Align = 2; break;
6026 case 32: Align = 4; break;
6027 case 64: Align = 8; break;
6028 case 128: Align = 16; break;
6029 case 256: Align = 32; break;
6030 }
6031
6032 // Now we should have the closing ']'
6033 if (Parser.getTok().isNot(K: AsmToken::RBrac))
6034 return Error(L: Parser.getTok().getLoc(), Msg: "']' expected");
6035 E = Parser.getTok().getEndLoc();
6036 Parser.Lex(); // Eat right bracket token.
6037
6038 // Don't worry about range checking the value here. That's handled by
6039 // the is*() predicates.
6040 Operands.push_back(Elt: ARMOperand::CreateMem(BaseReg, OffsetImm: nullptr, OffsetReg: 0,
6041 ShiftType: ARM_AM::no_shift, ShiftImm: 0, Alignment: Align, isNegative: false,
6042 S, E, Parser&: *this, AlignmentLoc));
6043
6044 // If there's a pre-indexing writeback marker, '!', just add it as a token
6045 // operand.
6046 if (Parser.getTok().is(K: AsmToken::Exclaim)) {
6047 Operands.push_back(
6048 Elt: ARMOperand::CreateToken(Str: "!", S: Parser.getTok().getLoc(), Parser&: *this));
6049 Parser.Lex(); // Eat the '!'.
6050 }
6051
6052 return false;
6053 }
6054
6055 // If we have a '#' or '$', it's an immediate offset, else assume it's a
6056 // register offset. Be friendly and also accept a plain integer or expression
6057 // (without a leading hash) for gas compatibility.
6058 if (Parser.getTok().is(K: AsmToken::Hash) ||
6059 Parser.getTok().is(K: AsmToken::Dollar) ||
6060 Parser.getTok().is(K: AsmToken::LParen) ||
6061 Parser.getTok().is(K: AsmToken::Integer)) {
6062 if (Parser.getTok().is(K: AsmToken::Hash) ||
6063 Parser.getTok().is(K: AsmToken::Dollar))
6064 Parser.Lex(); // Eat '#' or '$'
6065 E = Parser.getTok().getLoc();
6066
6067 bool isNegative = getParser().getTok().is(K: AsmToken::Minus);
6068 const MCExpr *Offset, *AdjustedOffset;
6069 if (getParser().parseExpression(Res&: Offset))
6070 return true;
6071
6072 if (const auto *CE = dyn_cast<MCConstantExpr>(Val: Offset)) {
6073 // If the constant was #-0, represent it as
6074 // std::numeric_limits<int32_t>::min().
6075 int32_t Val = CE->getValue();
6076 if (isNegative && Val == 0)
6077 CE = MCConstantExpr::create(Value: std::numeric_limits<int32_t>::min(),
6078 Ctx&: getContext());
6079 // Don't worry about range checking the value here. That's handled by
6080 // the is*() predicates.
6081 AdjustedOffset = CE;
6082 } else
6083 AdjustedOffset = Offset;
6084 Operands.push_back(Elt: ARMOperand::CreateMem(BaseReg, OffsetImm: AdjustedOffset, OffsetReg: 0,
6085 ShiftType: ARM_AM::no_shift, ShiftImm: 0, Alignment: 0, isNegative: false, S,
6086 E, Parser&: *this));
6087
6088 // Now we should have the closing ']'
6089 if (Parser.getTok().isNot(K: AsmToken::RBrac))
6090 return Error(L: Parser.getTok().getLoc(), Msg: "']' expected");
6091 E = Parser.getTok().getEndLoc();
6092 Parser.Lex(); // Eat right bracket token.
6093
6094 // If there's a pre-indexing writeback marker, '!', just add it as a token
6095 // operand.
6096 if (Parser.getTok().is(K: AsmToken::Exclaim)) {
6097 Operands.push_back(
6098 Elt: ARMOperand::CreateToken(Str: "!", S: Parser.getTok().getLoc(), Parser&: *this));
6099 Parser.Lex(); // Eat the '!'.
6100 }
6101
6102 return false;
6103 }
6104
6105 // The register offset is optionally preceded by a '+' or '-'
6106 bool isNegative = false;
6107 if (Parser.getTok().is(K: AsmToken::Minus)) {
6108 isNegative = true;
6109 Parser.Lex(); // Eat the '-'.
6110 } else if (Parser.getTok().is(K: AsmToken::Plus)) {
6111 // Nothing to do.
6112 Parser.Lex(); // Eat the '+'.
6113 }
6114
6115 E = Parser.getTok().getLoc();
6116 MCRegister OffsetReg = tryParseRegister();
6117 if (!OffsetReg)
6118 return Error(L: E, Msg: "register expected");
6119
6120 // If there's a shift operator, handle it.
6121 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
6122 unsigned ShiftImm = 0;
6123 if (Parser.getTok().is(K: AsmToken::Comma)) {
6124 Parser.Lex(); // Eat the ','.
6125 if (parseMemRegOffsetShift(ShiftType, ShiftAmount&: ShiftImm))
6126 return true;
6127 }
6128
6129 // Now we should have the closing ']'
6130 if (Parser.getTok().isNot(K: AsmToken::RBrac))
6131 return Error(L: Parser.getTok().getLoc(), Msg: "']' expected");
6132 E = Parser.getTok().getEndLoc();
6133 Parser.Lex(); // Eat right bracket token.
6134
6135 Operands.push_back(Elt: ARMOperand::CreateMem(BaseReg, OffsetImm: nullptr, OffsetReg,
6136 ShiftType, ShiftImm, Alignment: 0, isNegative,
6137 S, E, Parser&: *this));
6138
6139 // If there's a pre-indexing writeback marker, '!', just add it as a token
6140 // operand.
6141 if (Parser.getTok().is(K: AsmToken::Exclaim)) {
6142 Operands.push_back(
6143 Elt: ARMOperand::CreateToken(Str: "!", S: Parser.getTok().getLoc(), Parser&: *this));
6144 Parser.Lex(); // Eat the '!'.
6145 }
6146
6147 return false;
6148}
6149
6150/// parseMemRegOffsetShift - one of these two:
6151/// ( lsl | lsr | asr | ror ) , # shift_amount
6152/// rrx
6153/// return true if it parses a shift otherwise it returns false.
6154bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
6155 unsigned &Amount) {
6156 MCAsmParser &Parser = getParser();
6157 SMLoc Loc = Parser.getTok().getLoc();
6158 const AsmToken &Tok = Parser.getTok();
6159 if (Tok.isNot(K: AsmToken::Identifier))
6160 return Error(L: Loc, Msg: "illegal shift operator");
6161 StringRef ShiftName = Tok.getString();
6162 if (ShiftName == "lsl" || ShiftName == "LSL" ||
6163 ShiftName == "asl" || ShiftName == "ASL")
6164 St = ARM_AM::lsl;
6165 else if (ShiftName == "lsr" || ShiftName == "LSR")
6166 St = ARM_AM::lsr;
6167 else if (ShiftName == "asr" || ShiftName == "ASR")
6168 St = ARM_AM::asr;
6169 else if (ShiftName == "ror" || ShiftName == "ROR")
6170 St = ARM_AM::ror;
6171 else if (ShiftName == "rrx" || ShiftName == "RRX")
6172 St = ARM_AM::rrx;
6173 else if (ShiftName == "uxtw" || ShiftName == "UXTW")
6174 St = ARM_AM::uxtw;
6175 else
6176 return Error(L: Loc, Msg: "illegal shift operator");
6177 Parser.Lex(); // Eat shift type token.
6178
6179 // rrx stands alone.
6180 Amount = 0;
6181 if (St != ARM_AM::rrx) {
6182 Loc = Parser.getTok().getLoc();
6183 // A '#' and a shift amount.
6184 const AsmToken &HashTok = Parser.getTok();
6185 if (HashTok.isNot(K: AsmToken::Hash) &&
6186 HashTok.isNot(K: AsmToken::Dollar))
6187 return Error(L: HashTok.getLoc(), Msg: "'#' expected");
6188 Parser.Lex(); // Eat hash token.
6189
6190 const MCExpr *Expr;
6191 if (getParser().parseExpression(Res&: Expr))
6192 return true;
6193 // Range check the immediate.
6194 // lsl, ror: 0 <= imm <= 31
6195 // lsr, asr: 0 <= imm <= 32
6196 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Expr);
6197 if (!CE)
6198 return Error(L: Loc, Msg: "shift amount must be an immediate");
6199 int64_t Imm = CE->getValue();
6200 if (Imm < 0 ||
6201 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
6202 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
6203 return Error(L: Loc, Msg: "immediate shift value out of range");
6204 // If <ShiftTy> #0, turn it into a no_shift.
6205 if (Imm == 0)
6206 St = ARM_AM::lsl;
6207 // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
6208 if (Imm == 32)
6209 Imm = 0;
6210 Amount = Imm;
6211 }
6212
6213 return false;
6214}
6215
6216/// parseFPImm - A floating point immediate expression operand.
6217ParseStatus ARMAsmParser::parseFPImm(OperandVector &Operands) {
6218 LLVM_DEBUG(dbgs() << "PARSE FPImm, Ops: " << Operands.size());
6219
6220 MCAsmParser &Parser = getParser();
6221 // Anything that can accept a floating point constant as an operand
6222 // needs to go through here, as the regular parseExpression is
6223 // integer only.
6224 //
6225 // This routine still creates a generic Immediate operand, containing
6226 // a bitcast of the 64-bit floating point value. The various operands
6227 // that accept floats can check whether the value is valid for them
6228 // via the standard is*() predicates.
6229
6230 SMLoc S = Parser.getTok().getLoc();
6231
6232 if (Parser.getTok().isNot(K: AsmToken::Hash) &&
6233 Parser.getTok().isNot(K: AsmToken::Dollar))
6234 return ParseStatus::NoMatch;
6235
6236 // Disambiguate the VMOV forms that can accept an FP immediate.
6237 // vmov.f32 <sreg>, #imm
6238 // vmov.f64 <dreg>, #imm
6239 // vmov.f32 <dreg>, #imm @ vector f32x2
6240 // vmov.f32 <qreg>, #imm @ vector f32x4
6241 //
6242 // There are also the NEON VMOV instructions which expect an
6243 // integer constant. Make sure we don't try to parse an FPImm
6244 // for these:
6245 // vmov.i{8|16|32|64} <dreg|qreg>, #imm
6246
6247 bool isVmovf = false;
6248 unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
6249 for (unsigned I = 1; I < MnemonicOpsEndInd; ++I) {
6250 ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[I]);
6251 if (TyOp.isToken() &&
6252 (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64" ||
6253 TyOp.getToken() == ".f16")) {
6254 isVmovf = true;
6255 break;
6256 }
6257 }
6258
6259 ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]);
6260 bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" ||
6261 Mnemonic.getToken() == "fconsts");
6262 if (!(isVmovf || isFconst))
6263 return ParseStatus::NoMatch;
6264
6265 Parser.Lex(); // Eat '#' or '$'.
6266
6267 // Handle negation, as that still comes through as a separate token.
6268 bool isNegative = false;
6269 if (Parser.getTok().is(K: AsmToken::Minus)) {
6270 isNegative = true;
6271 Parser.Lex();
6272 }
6273 const AsmToken &Tok = Parser.getTok();
6274 SMLoc Loc = Tok.getLoc();
6275 if (Tok.is(K: AsmToken::Real) && isVmovf) {
6276 APFloat RealVal(APFloat::IEEEsingle(), Tok.getString());
6277 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
6278 // If we had a '-' in front, toggle the sign bit.
6279 IntVal ^= (uint64_t)isNegative << 31;
6280 Parser.Lex(); // Eat the token.
6281 Operands.push_back(
6282 Elt: ARMOperand::CreateImm(Val: MCConstantExpr::create(Value: IntVal, Ctx&: getContext()), S,
6283 E: Parser.getTok().getLoc(), Parser&: *this));
6284 return ParseStatus::Success;
6285 }
6286 // Also handle plain integers. Instructions which allow floating point
6287 // immediates also allow a raw encoded 8-bit value.
6288 if (Tok.is(K: AsmToken::Integer) && isFconst) {
6289 int64_t Val = Tok.getIntVal();
6290 Parser.Lex(); // Eat the token.
6291 if (Val > 255 || Val < 0)
6292 return Error(L: Loc, Msg: "encoded floating point value out of range");
6293 float RealVal = ARM_AM::getFPImmFloat(Imm: Val);
6294 Val = APFloat(RealVal).bitcastToAPInt().getZExtValue();
6295
6296 Operands.push_back(
6297 Elt: ARMOperand::CreateImm(Val: MCConstantExpr::create(Value: Val, Ctx&: getContext()), S,
6298 E: Parser.getTok().getLoc(), Parser&: *this));
6299 return ParseStatus::Success;
6300 }
6301
6302 return Error(L: Loc, Msg: "invalid floating point immediate");
6303}
6304
6305/// Parse a arm instruction operand. For now this parses the operand regardless
6306/// of the mnemonic.
6307bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
6308 MCAsmParser &Parser = getParser();
6309 SMLoc S, E;
6310
6311 // Check if the current operand has a custom associated parser, if so, try to
6312 // custom parse the operand, or fallback to the general approach.
6313 ParseStatus ResTy = MatchOperandParserImpl(Operands, Mnemonic);
6314 if (ResTy.isSuccess())
6315 return false;
6316 // If there wasn't a custom match, try the generic matcher below. Otherwise,
6317 // there was a match, but an error occurred, in which case, just return that
6318 // the operand parsing failed.
6319 if (ResTy.isFailure())
6320 return true;
6321
6322 switch (getLexer().getKind()) {
6323 default:
6324 Error(L: Parser.getTok().getLoc(), Msg: "unexpected token in operand");
6325 return true;
6326 case AsmToken::Identifier: {
6327 // If we've seen a branch mnemonic, the next operand must be a label. This
6328 // is true even if the label is a register name. So "br r1" means branch to
6329 // label "r1".
6330 bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl";
6331 if (!ExpectLabel) {
6332 if (!tryParseRegisterWithWriteBack(Operands))
6333 return false;
6334 int Res = tryParseShiftRegister(Operands);
6335 if (Res == 0) // success
6336 return false;
6337 else if (Res == -1) // irrecoverable error
6338 return true;
6339 // If this is VMRS, check for the apsr_nzcv operand.
6340 if (Mnemonic == "vmrs" &&
6341 Parser.getTok().getString().equals_insensitive(RHS: "apsr_nzcv")) {
6342 S = Parser.getTok().getLoc();
6343 Parser.Lex();
6344 Operands.push_back(Elt: ARMOperand::CreateToken(Str: "APSR_nzcv", S, Parser&: *this));
6345 return false;
6346 }
6347 }
6348
6349 // Fall though for the Identifier case that is not a register or a
6350 // special name.
6351 [[fallthrough]];
6352 }
6353 case AsmToken::LParen: // parenthesized expressions like (_strcmp-4)
6354 case AsmToken::Integer: // things like 1f and 2b as a branch targets
6355 case AsmToken::String: // quoted label names.
6356 case AsmToken::Dot: { // . as a branch target
6357 // This was not a register so parse other operands that start with an
6358 // identifier (like labels) as expressions and create them as immediates.
6359 const MCExpr *IdVal;
6360 S = Parser.getTok().getLoc();
6361 if (getParser().parseExpression(Res&: IdVal))
6362 return true;
6363 E = SMLoc::getFromPointer(Ptr: Parser.getTok().getLoc().getPointer() - 1);
6364 Operands.push_back(Elt: ARMOperand::CreateImm(Val: IdVal, S, E, Parser&: *this));
6365 return false;
6366 }
6367 case AsmToken::LBrac:
6368 return parseMemory(Operands);
6369 case AsmToken::LCurly: {
6370 bool IsLazyLoadStore = Mnemonic == "vlldm" || Mnemonic == "vlstm";
6371 bool IsVSCCLRM = Mnemonic == "vscclrm";
6372 return parseRegisterList(Operands, EnforceOrder: !Mnemonic.starts_with(Prefix: "clr"), AllowRAAC: false,
6373 IsLazyLoadStore, IsVSCCLRM);
6374 }
6375 case AsmToken::Dollar:
6376 case AsmToken::Hash: {
6377 // #42 -> immediate
6378 // $ 42 -> immediate
6379 // $foo -> symbol name
6380 // $42 -> symbol name
6381 S = Parser.getTok().getLoc();
6382
6383 // Favor the interpretation of $-prefixed operands as symbol names.
6384 // Cases where immediates are explicitly expected are handled by their
6385 // specific ParseMethod implementations.
6386 auto AdjacentToken = getLexer().peekTok(/*ShouldSkipSpace=*/false);
6387 bool ExpectIdentifier = Parser.getTok().is(K: AsmToken::Dollar) &&
6388 (AdjacentToken.is(K: AsmToken::Identifier) ||
6389 AdjacentToken.is(K: AsmToken::Integer));
6390 if (!ExpectIdentifier) {
6391 // Token is not part of identifier. Drop leading $ or # before parsing
6392 // expression.
6393 Parser.Lex();
6394 }
6395
6396 if (Parser.getTok().isNot(K: AsmToken::Colon)) {
6397 bool IsNegative = Parser.getTok().is(K: AsmToken::Minus);
6398 const MCExpr *ImmVal;
6399 if (getParser().parseExpression(Res&: ImmVal))
6400 return true;
6401 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: ImmVal);
6402 if (CE) {
6403 int32_t Val = CE->getValue();
6404 if (IsNegative && Val == 0)
6405 ImmVal = MCConstantExpr::create(Value: std::numeric_limits<int32_t>::min(),
6406 Ctx&: getContext());
6407 }
6408 E = SMLoc::getFromPointer(Ptr: Parser.getTok().getLoc().getPointer() - 1);
6409 Operands.push_back(Elt: ARMOperand::CreateImm(Val: ImmVal, S, E, Parser&: *this));
6410
6411 // There can be a trailing '!' on operands that we want as a separate
6412 // '!' Token operand. Handle that here. For example, the compatibility
6413 // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
6414 if (Parser.getTok().is(K: AsmToken::Exclaim)) {
6415 Operands.push_back(Elt: ARMOperand::CreateToken(
6416 Str: Parser.getTok().getString(), S: Parser.getTok().getLoc(), Parser&: *this));
6417 Parser.Lex(); // Eat exclaim token
6418 }
6419 return false;
6420 }
6421 // w/ a ':' after the '#', it's just like a plain ':'.
6422 [[fallthrough]];
6423 }
6424 case AsmToken::Colon: {
6425 S = Parser.getTok().getLoc();
6426 // ":lower16:", ":upper16:", ":lower0_7:", ":lower8_15:", ":upper0_7:" and
6427 // ":upper8_15:", expression prefixes
6428 // FIXME: Check it's an expression prefix,
6429 // e.g. (FOO - :lower16:BAR) isn't legal.
6430 ARM::Specifier Spec;
6431 if (parsePrefix(Spec))
6432 return true;
6433
6434 const MCExpr *SubExprVal;
6435 if (getParser().parseExpression(Res&: SubExprVal))
6436 return true;
6437
6438 const auto *ExprVal =
6439 MCSpecifierExpr::create(Expr: SubExprVal, S: Spec, Ctx&: getContext());
6440 E = SMLoc::getFromPointer(Ptr: Parser.getTok().getLoc().getPointer() - 1);
6441 Operands.push_back(Elt: ARMOperand::CreateImm(Val: ExprVal, S, E, Parser&: *this));
6442 return false;
6443 }
6444 case AsmToken::Equal: {
6445 S = Parser.getTok().getLoc();
6446 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
6447 return Error(L: S, Msg: "unexpected token in operand");
6448 Parser.Lex(); // Eat '='
6449 const MCExpr *SubExprVal;
6450 if (getParser().parseExpression(Res&: SubExprVal))
6451 return true;
6452 E = SMLoc::getFromPointer(Ptr: Parser.getTok().getLoc().getPointer() - 1);
6453
6454 // execute-only: we assume that assembly programmers know what they are
6455 // doing and allow literal pool creation here
6456 Operands.push_back(
6457 Elt: ARMOperand::CreateConstantPoolImm(Val: SubExprVal, S, E, Parser&: *this));
6458 return false;
6459 }
6460 }
6461}
6462
6463bool ARMAsmParser::parseImmExpr(int64_t &Out) {
6464 const MCExpr *Expr = nullptr;
6465 SMLoc L = getParser().getTok().getLoc();
6466 if (check(P: getParser().parseExpression(Res&: Expr), Loc: L, Msg: "expected expression"))
6467 return true;
6468 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Val: Expr);
6469 if (check(P: !Value, Loc: L, Msg: "expected constant expression"))
6470 return true;
6471 Out = Value->getValue();
6472 return false;
6473}
6474
6475// parsePrefix - Parse ARM 16-bit relocations expression prefixes, i.e.
6476// :lower16: and :upper16: and Thumb 8-bit relocation expression prefixes, i.e.
6477// :upper8_15:, :upper0_7:, :lower8_15: and :lower0_7:
6478bool ARMAsmParser::parsePrefix(ARM::Specifier &Spec) {
6479 MCAsmParser &Parser = getParser();
6480 Spec = ARM::S_None;
6481
6482 // consume an optional '#' (GNU compatibility)
6483 if (getLexer().is(K: AsmToken::Hash))
6484 Parser.Lex();
6485
6486 assert(getLexer().is(AsmToken::Colon) && "expected a :");
6487 Parser.Lex(); // Eat ':'
6488
6489 if (getLexer().isNot(K: AsmToken::Identifier)) {
6490 Error(L: Parser.getTok().getLoc(), Msg: "expected prefix identifier in operand");
6491 return true;
6492 }
6493
6494 enum {
6495 COFF = (1 << MCContext::IsCOFF),
6496 ELF = (1 << MCContext::IsELF),
6497 MACHO = (1 << MCContext::IsMachO),
6498 WASM = (1 << MCContext::IsWasm),
6499 };
6500 static const struct PrefixEntry {
6501 const char *Spelling;
6502 ARM::Specifier Spec;
6503 uint8_t SupportedFormats;
6504 } PrefixEntries[] = {
6505 {.Spelling: "upper16", .Spec: ARM::S_HI16, .SupportedFormats: COFF | ELF | MACHO},
6506 {.Spelling: "lower16", .Spec: ARM::S_LO16, .SupportedFormats: COFF | ELF | MACHO},
6507 {.Spelling: "upper8_15", .Spec: ARM::S_HI_8_15, .SupportedFormats: ELF},
6508 {.Spelling: "upper0_7", .Spec: ARM::S_HI_0_7, .SupportedFormats: ELF},
6509 {.Spelling: "lower8_15", .Spec: ARM::S_LO_8_15, .SupportedFormats: ELF},
6510 {.Spelling: "lower0_7", .Spec: ARM::S_LO_0_7, .SupportedFormats: ELF},
6511 };
6512
6513 StringRef IDVal = Parser.getTok().getIdentifier();
6514
6515 const auto &Prefix =
6516 llvm::find_if(Range: PrefixEntries, P: [&IDVal](const PrefixEntry &PE) {
6517 return PE.Spelling == IDVal;
6518 });
6519 if (Prefix == std::end(arr: PrefixEntries)) {
6520 Error(L: Parser.getTok().getLoc(), Msg: "unexpected prefix in operand");
6521 return true;
6522 }
6523
6524 uint8_t CurrentFormat;
6525 switch (getContext().getObjectFileType()) {
6526 case MCContext::IsMachO:
6527 CurrentFormat = MACHO;
6528 break;
6529 case MCContext::IsELF:
6530 CurrentFormat = ELF;
6531 break;
6532 case MCContext::IsCOFF:
6533 CurrentFormat = COFF;
6534 break;
6535 case MCContext::IsWasm:
6536 CurrentFormat = WASM;
6537 break;
6538 case MCContext::IsGOFF:
6539 case MCContext::IsSPIRV:
6540 case MCContext::IsXCOFF:
6541 case MCContext::IsDXContainer:
6542 llvm_unreachable("unexpected object format");
6543 break;
6544 }
6545
6546 if (~Prefix->SupportedFormats & CurrentFormat) {
6547 Error(L: Parser.getTok().getLoc(),
6548 Msg: "cannot represent relocation in the current file format");
6549 return true;
6550 }
6551
6552 Spec = Prefix->Spec;
6553 Parser.Lex();
6554
6555 if (getLexer().isNot(K: AsmToken::Colon)) {
6556 Error(L: Parser.getTok().getLoc(), Msg: "unexpected token after prefix");
6557 return true;
6558 }
6559 Parser.Lex(); // Eat the last ':'
6560
6561 // consume an optional trailing '#' (GNU compatibility) bla
6562 parseOptionalToken(T: AsmToken::Hash);
6563
6564 return false;
6565}
6566
6567/// Given a mnemonic, split out possible predication code and carry
6568/// setting letters to form a canonical mnemonic and flags.
6569//
6570// FIXME: Would be nice to autogen this.
6571// FIXME: This is a bit of a maze of special cases.
6572StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
6573 ARMCC::CondCodes &PredicationCode,
6574 ARMVCC::VPTCodes &VPTPredicationCode,
6575 bool &CarrySetting,
6576 unsigned &ProcessorIMod,
6577 StringRef &ITMask) {
6578 PredicationCode = ARMCC::AL;
6579 VPTPredicationCode = ARMVCC::None;
6580 CarrySetting = false;
6581 ProcessorIMod = 0;
6582
6583 // Ignore some mnemonics we know aren't predicated forms.
6584 //
6585 // FIXME: Would be nice to autogen this.
6586 if ((Mnemonic == "movs" && isThumb()) || Mnemonic == "teq" ||
6587 Mnemonic == "vceq" || Mnemonic == "svc" || Mnemonic == "mls" ||
6588 Mnemonic == "smmls" || Mnemonic == "vcls" || Mnemonic == "vmls" ||
6589 Mnemonic == "vnmls" || Mnemonic == "vacge" || Mnemonic == "vcge" ||
6590 Mnemonic == "vclt" || Mnemonic == "vacgt" || Mnemonic == "vaclt" ||
6591 Mnemonic == "vacle" || Mnemonic == "hlt" || Mnemonic == "vcgt" ||
6592 Mnemonic == "vcle" || Mnemonic == "smlal" || Mnemonic == "umaal" ||
6593 Mnemonic == "umlal" || Mnemonic == "vabal" || Mnemonic == "vmlal" ||
6594 Mnemonic == "vpadal" || Mnemonic == "vqdmlal" || Mnemonic == "fmuls" ||
6595 Mnemonic == "vmaxnm" || Mnemonic == "vminnm" || Mnemonic == "vcvta" ||
6596 Mnemonic == "vcvtn" || Mnemonic == "vcvtp" || Mnemonic == "vcvtm" ||
6597 Mnemonic == "vrinta" || Mnemonic == "vrintn" || Mnemonic == "vrintp" ||
6598 Mnemonic == "vrintm" || Mnemonic == "hvc" ||
6599 Mnemonic.starts_with(Prefix: "vsel") || Mnemonic == "vins" ||
6600 Mnemonic == "vmovx" || Mnemonic == "bxns" || Mnemonic == "blxns" ||
6601 Mnemonic == "vdot" || Mnemonic == "vmmla" || Mnemonic == "vudot" ||
6602 Mnemonic == "vsdot" || Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
6603 Mnemonic == "vfmal" || Mnemonic == "vfmsl" || Mnemonic == "wls" ||
6604 Mnemonic == "le" || Mnemonic == "dls" || Mnemonic == "csel" ||
6605 Mnemonic == "csinc" || Mnemonic == "csinv" || Mnemonic == "csneg" ||
6606 Mnemonic == "cinc" || Mnemonic == "cinv" || Mnemonic == "cneg" ||
6607 Mnemonic == "cset" || Mnemonic == "csetm" || Mnemonic == "aut" ||
6608 Mnemonic == "pac" || Mnemonic == "pacbti" || Mnemonic == "bti")
6609 return Mnemonic;
6610
6611 // First, split out any predication code. Ignore mnemonics we know aren't
6612 // predicated but do have a carry-set and so weren't caught above.
6613 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
6614 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
6615 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
6616 Mnemonic != "sbcs" && Mnemonic != "rscs" &&
6617 !(hasMVE() &&
6618 (Mnemonic == "vmine" || Mnemonic == "vshle" || Mnemonic == "vshlt" ||
6619 Mnemonic == "vshllt" || Mnemonic == "vrshle" || Mnemonic == "vrshlt" ||
6620 Mnemonic == "vmvne" || Mnemonic == "vorne" || Mnemonic == "vnege" ||
6621 Mnemonic == "vnegt" || Mnemonic == "vmule" || Mnemonic == "vmult" ||
6622 Mnemonic == "vrintne" || Mnemonic == "vcmult" ||
6623 Mnemonic == "vcmule" || Mnemonic == "vpsele" || Mnemonic == "vpselt" ||
6624 Mnemonic.starts_with(Prefix: "vq")))) {
6625 unsigned CC = ARMCondCodeFromString(CC: Mnemonic.substr(Start: Mnemonic.size()-2));
6626 if (CC != ~0U) {
6627 Mnemonic = Mnemonic.slice(Start: 0, End: Mnemonic.size() - 2);
6628 PredicationCode = static_cast<ARMCC::CondCodes>(CC);
6629 }
6630 }
6631
6632 // Next, determine if we have a carry setting bit. We explicitly ignore all
6633 // the instructions we know end in 's'.
6634 if (Mnemonic.ends_with(Suffix: "s") &&
6635 !(Mnemonic == "cps" || Mnemonic == "mls" || Mnemonic == "mrs" ||
6636 Mnemonic == "smmls" || Mnemonic == "vabs" || Mnemonic == "vcls" ||
6637 Mnemonic == "vmls" || Mnemonic == "vmrs" || Mnemonic == "vnmls" ||
6638 Mnemonic == "vqabs" || Mnemonic == "vrecps" || Mnemonic == "vrsqrts" ||
6639 Mnemonic == "srs" || Mnemonic == "flds" || Mnemonic == "fmrs" ||
6640 Mnemonic == "fsqrts" || Mnemonic == "fsubs" || Mnemonic == "fsts" ||
6641 Mnemonic == "fcpys" || Mnemonic == "fdivs" || Mnemonic == "fmuls" ||
6642 Mnemonic == "fcmps" || Mnemonic == "fcmpzs" || Mnemonic == "vfms" ||
6643 Mnemonic == "vfnms" || Mnemonic == "fconsts" || Mnemonic == "bxns" ||
6644 Mnemonic == "blxns" || Mnemonic == "vfmas" || Mnemonic == "vmlas" ||
6645 (Mnemonic == "movs" && isThumb()))) {
6646 Mnemonic = Mnemonic.slice(Start: 0, End: Mnemonic.size() - 1);
6647 CarrySetting = true;
6648 }
6649
6650 // The "cps" instruction can have a interrupt mode operand which is glued into
6651 // the mnemonic. Check if this is the case, split it and parse the imod op
6652 if (Mnemonic.starts_with(Prefix: "cps")) {
6653 // Split out any imod code.
6654 unsigned IMod =
6655 StringSwitch<unsigned>(Mnemonic.substr(Start: Mnemonic.size()-2, N: 2))
6656 .Case(S: "ie", Value: ARM_PROC::IE)
6657 .Case(S: "id", Value: ARM_PROC::ID)
6658 .Default(Value: ~0U);
6659 if (IMod != ~0U) {
6660 Mnemonic = Mnemonic.slice(Start: 0, End: Mnemonic.size()-2);
6661 ProcessorIMod = IMod;
6662 }
6663 }
6664
6665 if (isMnemonicVPTPredicable(Mnemonic, ExtraToken) && Mnemonic != "vmovlt" &&
6666 Mnemonic != "vshllt" && Mnemonic != "vrshrnt" && Mnemonic != "vshrnt" &&
6667 Mnemonic != "vqrshrunt" && Mnemonic != "vqshrunt" &&
6668 Mnemonic != "vqrshrnt" && Mnemonic != "vqshrnt" && Mnemonic != "vmullt" &&
6669 Mnemonic != "vqmovnt" && Mnemonic != "vqmovunt" && Mnemonic != "vmovnt" &&
6670 Mnemonic != "vqdmullt" && Mnemonic != "vpnot" && Mnemonic != "vcvtt" &&
6671 Mnemonic != "vcvt") {
6672 unsigned VCC =
6673 ARMVectorCondCodeFromString(CC: Mnemonic.substr(Start: Mnemonic.size() - 1));
6674 if (VCC != ~0U) {
6675 Mnemonic = Mnemonic.slice(Start: 0, End: Mnemonic.size()-1);
6676 VPTPredicationCode = static_cast<ARMVCC::VPTCodes>(VCC);
6677 }
6678 return Mnemonic;
6679 }
6680
6681 // The "it" instruction has the condition mask on the end of the mnemonic.
6682 if (Mnemonic.starts_with(Prefix: "it")) {
6683 ITMask = Mnemonic.substr(Start: 2);
6684 Mnemonic = Mnemonic.slice(Start: 0, End: 2);
6685 }
6686
6687 if (Mnemonic.starts_with(Prefix: "vpst")) {
6688 ITMask = Mnemonic.substr(Start: 4);
6689 Mnemonic = Mnemonic.slice(Start: 0, End: 4);
6690 } else if (Mnemonic.starts_with(Prefix: "vpt")) {
6691 ITMask = Mnemonic.substr(Start: 3);
6692 Mnemonic = Mnemonic.slice(Start: 0, End: 3);
6693 }
6694
6695 return Mnemonic;
6696}
6697
6698/// Given a canonical mnemonic, determine if the instruction ever allows
6699/// inclusion of carry set or predication code operands.
6700//
6701// FIXME: It would be nice to autogen this.
6702void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic,
6703 StringRef ExtraToken,
6704 StringRef FullInst,
6705 bool &CanAcceptCarrySet,
6706 bool &CanAcceptPredicationCode,
6707 bool &CanAcceptVPTPredicationCode) {
6708 CanAcceptVPTPredicationCode = isMnemonicVPTPredicable(Mnemonic, ExtraToken);
6709
6710 CanAcceptCarrySet =
6711 Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
6712 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
6713 Mnemonic == "add" || Mnemonic == "adc" || Mnemonic == "mul" ||
6714 Mnemonic == "bic" || Mnemonic == "asr" || Mnemonic == "orr" ||
6715 Mnemonic == "mvn" || Mnemonic == "rsb" || Mnemonic == "rsc" ||
6716 Mnemonic == "orn" || Mnemonic == "sbc" || Mnemonic == "eor" ||
6717 Mnemonic == "neg" || Mnemonic == "vfm" || Mnemonic == "vfnm" ||
6718 (!isThumb() &&
6719 (Mnemonic == "smull" || Mnemonic == "mov" || Mnemonic == "mla" ||
6720 Mnemonic == "smlal" || Mnemonic == "umlal" || Mnemonic == "umull"));
6721
6722 if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" ||
6723 Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" ||
6724 Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic == "udf" ||
6725 Mnemonic.starts_with(Prefix: "crc32") || Mnemonic.starts_with(Prefix: "cps") ||
6726 Mnemonic.starts_with(Prefix: "vsel") || Mnemonic == "vmaxnm" ||
6727 Mnemonic == "vminnm" || Mnemonic == "vcvta" || Mnemonic == "vcvtn" ||
6728 Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || Mnemonic == "vrinta" ||
6729 Mnemonic == "vrintn" || Mnemonic == "vrintp" || Mnemonic == "vrintm" ||
6730 Mnemonic.starts_with(Prefix: "aes") || Mnemonic == "hvc" ||
6731 Mnemonic == "setpan" || Mnemonic.starts_with(Prefix: "sha1") ||
6732 Mnemonic.starts_with(Prefix: "sha256") ||
6733 (FullInst.starts_with(Prefix: "vmull") && FullInst.ends_with(Suffix: ".p64")) ||
6734 Mnemonic == "vmovx" || Mnemonic == "vins" || Mnemonic == "vudot" ||
6735 Mnemonic == "vsdot" || Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
6736 Mnemonic == "vfmal" || Mnemonic == "vfmsl" || Mnemonic == "vfmat" ||
6737 Mnemonic == "vfmab" || Mnemonic == "vdot" || Mnemonic == "vmmla" ||
6738 Mnemonic == "sb" || Mnemonic == "ssbb" || Mnemonic == "pssbb" ||
6739 Mnemonic == "vsmmla" || Mnemonic == "vummla" || Mnemonic == "vusmmla" ||
6740 Mnemonic == "vusdot" || Mnemonic == "vsudot" || Mnemonic == "bfcsel" ||
6741 Mnemonic == "wls" || Mnemonic == "dls" || Mnemonic == "le" ||
6742 Mnemonic == "csel" || Mnemonic == "csinc" || Mnemonic == "csinv" ||
6743 Mnemonic == "csneg" || Mnemonic == "cinc" || Mnemonic == "cinv" ||
6744 Mnemonic == "cneg" || Mnemonic == "cset" || Mnemonic == "csetm" ||
6745 (hasCDE() && MS.isCDEInstr(Mnemonic) &&
6746 !MS.isITPredicableCDEInstr(Mnemonic)) ||
6747 Mnemonic.starts_with(Prefix: "vpt") || Mnemonic.starts_with(Prefix: "vpst") ||
6748 Mnemonic == "pac" || Mnemonic == "pacbti" || Mnemonic == "aut" ||
6749 Mnemonic == "bti" ||
6750 (hasMVE() &&
6751 (Mnemonic.starts_with(Prefix: "vst2") || Mnemonic.starts_with(Prefix: "vld2") ||
6752 Mnemonic.starts_with(Prefix: "vst4") || Mnemonic.starts_with(Prefix: "vld4") ||
6753 Mnemonic.starts_with(Prefix: "wlstp") || Mnemonic.starts_with(Prefix: "dlstp") ||
6754 Mnemonic.starts_with(Prefix: "letp")))) {
6755 // These mnemonics are never predicable
6756 CanAcceptPredicationCode = false;
6757 } else if (!isThumb()) {
6758 // Some instructions are only predicable in Thumb mode
6759 CanAcceptPredicationCode =
6760 Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" &&
6761 Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" &&
6762 Mnemonic != "dmb" && Mnemonic != "dfb" && Mnemonic != "dsb" &&
6763 Mnemonic != "isb" && Mnemonic != "pld" && Mnemonic != "pli" &&
6764 Mnemonic != "pldw" && Mnemonic != "ldc2" && Mnemonic != "ldc2l" &&
6765 Mnemonic != "stc2" && Mnemonic != "stc2l" && Mnemonic != "tsb" &&
6766 !Mnemonic.starts_with(Prefix: "rfe") && !Mnemonic.starts_with(Prefix: "srs");
6767 } else if (isThumbOne()) {
6768 if (hasV6MOps())
6769 CanAcceptPredicationCode = Mnemonic != "movs";
6770 else
6771 CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs";
6772 } else
6773 CanAcceptPredicationCode = true;
6774}
6775
6776bool operandsContainWide(OperandVector &Operands, unsigned MnemonicOpsEndInd) {
6777 for (unsigned I = 0; I < MnemonicOpsEndInd; ++I) {
6778 auto &Op = static_cast<ARMOperand &>(*Operands[I]);
6779 if (Op.isToken() && Op.getToken() == ".w")
6780 return true;
6781 }
6782 return false;
6783}
6784
6785// Some Thumb instructions have two operand forms that are not
6786// available as three operand, convert to two operand form if possible.
6787//
6788// FIXME: We would really like to be able to tablegen'erate this.
6789void ARMAsmParser::tryConvertingToTwoOperandForm(
6790 StringRef Mnemonic, ARMCC::CondCodes PredicationCode, bool CarrySetting,
6791 OperandVector &Operands, unsigned MnemonicOpsEndInd) {
6792
6793 if (operandsContainWide(Operands, MnemonicOpsEndInd))
6794 return;
6795 if (Operands.size() != MnemonicOpsEndInd + 3)
6796 return;
6797
6798 const auto &Op3 = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]);
6799 auto &Op4 = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]);
6800 if (!Op3.isReg() || !Op4.isReg())
6801 return;
6802
6803 auto Op3Reg = Op3.getReg();
6804 auto Op4Reg = Op4.getReg();
6805
6806 // For most Thumb2 cases we just generate the 3 operand form and reduce
6807 // it in processInstruction(), but the 3 operand form of ADD (t2ADDrr)
6808 // won't accept SP or PC so we do the transformation here taking care
6809 // with immediate range in the 'add sp, sp #imm' case.
6810 auto &Op5 = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 2]);
6811 if (isThumbTwo()) {
6812 if (Mnemonic != "add")
6813 return;
6814 bool TryTransform = Op3Reg == ARM::PC || Op4Reg == ARM::PC ||
6815 (Op5.isReg() && Op5.getReg() == ARM::PC);
6816 if (!TryTransform) {
6817 TryTransform = (Op3Reg == ARM::SP || Op4Reg == ARM::SP ||
6818 (Op5.isReg() && Op5.getReg() == ARM::SP)) &&
6819 !(Op3Reg == ARM::SP && Op4Reg == ARM::SP &&
6820 Op5.isImm() && !Op5.isImm0_508s4());
6821 }
6822 if (!TryTransform)
6823 return;
6824 } else if (!isThumbOne())
6825 return;
6826
6827 if (!(Mnemonic == "add" || Mnemonic == "sub" || Mnemonic == "and" ||
6828 Mnemonic == "eor" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
6829 Mnemonic == "asr" || Mnemonic == "adc" || Mnemonic == "sbc" ||
6830 Mnemonic == "ror" || Mnemonic == "orr" || Mnemonic == "bic"))
6831 return;
6832
6833 // If first 2 operands of a 3 operand instruction are the same
6834 // then transform to 2 operand version of the same instruction
6835 // e.g. 'adds r0, r0, #1' transforms to 'adds r0, #1'
6836 bool Transform = Op3Reg == Op4Reg;
6837
6838 // For communtative operations, we might be able to transform if we swap
6839 // Op4 and Op5. The 'ADD Rdm, SP, Rdm' form is already handled specially
6840 // as tADDrsp.
6841 const ARMOperand *LastOp = &Op5;
6842 bool Swap = false;
6843 if (!Transform && Op5.isReg() && Op3Reg == Op5.getReg() &&
6844 ((Mnemonic == "add" && Op4Reg != ARM::SP) ||
6845 Mnemonic == "and" || Mnemonic == "eor" ||
6846 Mnemonic == "adc" || Mnemonic == "orr")) {
6847 Swap = true;
6848 LastOp = &Op4;
6849 Transform = true;
6850 }
6851
6852 // If both registers are the same then remove one of them from
6853 // the operand list, with certain exceptions.
6854 if (Transform) {
6855 // Don't transform 'adds Rd, Rd, Rm' or 'sub{s} Rd, Rd, Rm' because the
6856 // 2 operand forms don't exist.
6857 if (((Mnemonic == "add" && CarrySetting) || Mnemonic == "sub") &&
6858 LastOp->isReg())
6859 Transform = false;
6860
6861 // Don't transform 'add/sub{s} Rd, Rd, #imm' if the immediate fits into
6862 // 3-bits because the ARMARM says not to.
6863 if ((Mnemonic == "add" || Mnemonic == "sub") && LastOp->isImm0_7())
6864 Transform = false;
6865 }
6866
6867 if (Transform) {
6868 if (Swap)
6869 std::swap(a&: Op4, b&: Op5);
6870 Operands.erase(CI: Operands.begin() + MnemonicOpsEndInd);
6871 }
6872}
6873
6874// this function returns true if the operand is one of the following
6875// relocations: :upper8_15:, :upper0_7:, :lower8_15: or :lower0_7:
6876static bool isThumbI8Relocation(MCParsedAsmOperand &MCOp) {
6877 ARMOperand &Op = static_cast<ARMOperand &>(MCOp);
6878 if (!Op.isImm())
6879 return false;
6880 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Op.getImm());
6881 if (CE)
6882 return false;
6883 const MCExpr *E = dyn_cast<MCExpr>(Val: Op.getImm());
6884 if (!E)
6885 return false;
6886 auto *ARM16Expr = dyn_cast<MCSpecifierExpr>(Val: E);
6887 if (ARM16Expr && (ARM16Expr->getSpecifier() == ARM::S_HI_8_15 ||
6888 ARM16Expr->getSpecifier() == ARM::S_HI_0_7 ||
6889 ARM16Expr->getSpecifier() == ARM::S_LO_8_15 ||
6890 ARM16Expr->getSpecifier() == ARM::S_LO_0_7))
6891 return true;
6892 return false;
6893}
6894
6895bool ARMAsmParser::shouldOmitVectorPredicateOperand(
6896 StringRef Mnemonic, OperandVector &Operands, unsigned MnemonicOpsEndInd) {
6897 if (!hasMVE() || Operands.size() <= MnemonicOpsEndInd)
6898 return true;
6899
6900 if (Mnemonic.starts_with(Prefix: "vld2") || Mnemonic.starts_with(Prefix: "vld4") ||
6901 Mnemonic.starts_with(Prefix: "vst2") || Mnemonic.starts_with(Prefix: "vst4"))
6902 return true;
6903
6904 if (Mnemonic.starts_with(Prefix: "vctp") || Mnemonic.starts_with(Prefix: "vpnot"))
6905 return false;
6906
6907 if (Mnemonic.starts_with(Prefix: "vmov") &&
6908 !(Mnemonic.starts_with(Prefix: "vmovl") || Mnemonic.starts_with(Prefix: "vmovn") ||
6909 Mnemonic.starts_with(Prefix: "vmovx"))) {
6910 for (auto &Operand : Operands) {
6911 if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
6912 ((*Operand).isReg() &&
6913 (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
6914 Reg: (*Operand).getReg()) ||
6915 ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
6916 Reg: (*Operand).getReg())))) {
6917 return true;
6918 }
6919 }
6920 return false;
6921 } else {
6922 for (auto &Operand : Operands) {
6923 // We check the larger class QPR instead of just the legal class
6924 // MQPR, to more accurately report errors when using Q registers
6925 // outside of the allowed range.
6926 if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
6927 static_cast<ARMOperand &>(*Operand).isQReg())
6928 return false;
6929 }
6930 return true;
6931 }
6932}
6933
6934// FIXME: This bit should probably be handled via an explicit match class
6935// in the .td files that matches the suffix instead of having it be
6936// a literal string token the way it is now.
6937static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
6938 return Mnemonic.starts_with(Prefix: "vldm") || Mnemonic.starts_with(Prefix: "vstm");
6939}
6940
6941static void applyMnemonicAliases(StringRef &Mnemonic,
6942 const FeatureBitset &Features,
6943 unsigned VariantID);
6944
6945// The GNU assembler has aliases of ldrd, strd, ldrexd, strexd, ldaexd, and
6946// stlexd with the second register omitted. We don't have a way to do that in
6947// tablegen, so fix it up here.
6948//
6949// We have to be careful to not emit an invalid Rt2 here, because the rest of
6950// the assembly parser could then generate confusing diagnostics refering to
6951// it. If we do find anything that prevents us from doing the transformation we
6952// bail out, and let the assembly parser report an error on the instruction as
6953// it is written.
6954void ARMAsmParser::fixupGNULDRDAlias(StringRef Mnemonic,
6955 OperandVector &Operands,
6956 unsigned MnemonicOpsEndInd) {
6957 if (Mnemonic != "ldrd" && Mnemonic != "strd" && Mnemonic != "ldrexd" &&
6958 Mnemonic != "strexd" && Mnemonic != "ldaexd" && Mnemonic != "stlexd")
6959 return;
6960
6961 unsigned IdX = Mnemonic == "strexd" || Mnemonic == "stlexd"
6962 ? MnemonicOpsEndInd + 1
6963 : MnemonicOpsEndInd;
6964
6965 if (Operands.size() < IdX + 2)
6966 return;
6967
6968 ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[IdX]);
6969 ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[IdX + 1]);
6970
6971 if (!Op2.isReg())
6972 return;
6973 if (!Op3.isGPRMem())
6974 return;
6975
6976 const MCRegisterClass &GPR = MRI->getRegClass(i: ARM::GPRRegClassID);
6977 if (!GPR.contains(Reg: Op2.getReg()))
6978 return;
6979
6980 unsigned RtEncoding = MRI->getEncodingValue(Reg: Op2.getReg());
6981 if (!isThumb() && (RtEncoding & 1)) {
6982 // In ARM mode, the registers must be from an aligned pair, this
6983 // restriction does not apply in Thumb mode.
6984 return;
6985 }
6986 if (Op2.getReg() == ARM::PC)
6987 return;
6988 MCRegister PairedReg = GPR.getRegister(i: RtEncoding + 1);
6989 if (!PairedReg || PairedReg == ARM::PC ||
6990 (PairedReg == ARM::SP && !hasV8Ops()))
6991 return;
6992
6993 Operands.insert(I: Operands.begin() + IdX + 1,
6994 Elt: ARMOperand::CreateReg(Reg: PairedReg, S: Op2.getStartLoc(),
6995 E: Op2.getEndLoc(), Parser&: *this));
6996}
6997
6998// Dual-register instruction have the following syntax:
6999// <mnemonic> <predicate>? <coproc>, <Rdest>, <Rdest+1>, <Rsrc>, ..., #imm
7000// This function tries to remove <Rdest+1> and replace <Rdest> with a pair
7001// operand. If the conversion fails an error is diagnosed, and the function
7002// returns true.
7003bool ARMAsmParser::CDEConvertDualRegOperand(StringRef Mnemonic,
7004 OperandVector &Operands,
7005 unsigned MnemonicOpsEndInd) {
7006 assert(MS.isCDEDualRegInstr(Mnemonic));
7007
7008 if (Operands.size() < 3 + MnemonicOpsEndInd)
7009 return false;
7010
7011 StringRef Op2Diag(
7012 "operand must be an even-numbered register in the range [r0, r10]");
7013
7014 const MCParsedAsmOperand &Op2 = *Operands[MnemonicOpsEndInd + 1];
7015 if (!Op2.isReg())
7016 return Error(L: Op2.getStartLoc(), Msg: Op2Diag);
7017
7018 MCRegister RNext;
7019 MCRegister RPair;
7020 switch (Op2.getReg().id()) {
7021 default:
7022 return Error(L: Op2.getStartLoc(), Msg: Op2Diag);
7023 case ARM::R0:
7024 RNext = ARM::R1;
7025 RPair = ARM::R0_R1;
7026 break;
7027 case ARM::R2:
7028 RNext = ARM::R3;
7029 RPair = ARM::R2_R3;
7030 break;
7031 case ARM::R4:
7032 RNext = ARM::R5;
7033 RPair = ARM::R4_R5;
7034 break;
7035 case ARM::R6:
7036 RNext = ARM::R7;
7037 RPair = ARM::R6_R7;
7038 break;
7039 case ARM::R8:
7040 RNext = ARM::R9;
7041 RPair = ARM::R8_R9;
7042 break;
7043 case ARM::R10:
7044 RNext = ARM::R11;
7045 RPair = ARM::R10_R11;
7046 break;
7047 }
7048
7049 const MCParsedAsmOperand &Op3 = *Operands[MnemonicOpsEndInd + 2];
7050 if (!Op3.isReg() || Op3.getReg() != RNext)
7051 return Error(L: Op3.getStartLoc(), Msg: "operand must be a consecutive register");
7052
7053 Operands.erase(CI: Operands.begin() + MnemonicOpsEndInd + 2);
7054 Operands[MnemonicOpsEndInd + 1] =
7055 ARMOperand::CreateReg(Reg: RPair, S: Op2.getStartLoc(), E: Op2.getEndLoc(), Parser&: *this);
7056 return false;
7057}
7058
7059void removeCondCode(OperandVector &Operands, unsigned &MnemonicOpsEndInd) {
7060 for (unsigned I = 0; I < MnemonicOpsEndInd; ++I)
7061 if (static_cast<ARMOperand &>(*Operands[I]).isCondCode()) {
7062 Operands.erase(CI: Operands.begin() + I);
7063 --MnemonicOpsEndInd;
7064 break;
7065 }
7066}
7067
7068void removeCCOut(OperandVector &Operands, unsigned &MnemonicOpsEndInd) {
7069 for (unsigned I = 0; I < MnemonicOpsEndInd; ++I)
7070 if (static_cast<ARMOperand &>(*Operands[I]).isCCOut()) {
7071 Operands.erase(CI: Operands.begin() + I);
7072 --MnemonicOpsEndInd;
7073 break;
7074 }
7075}
7076
7077void removeVPTCondCode(OperandVector &Operands, unsigned &MnemonicOpsEndInd) {
7078 for (unsigned I = 0; I < MnemonicOpsEndInd; ++I)
7079 if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred()) {
7080 Operands.erase(CI: Operands.begin() + I);
7081 --MnemonicOpsEndInd;
7082 break;
7083 }
7084}
7085
7086/// Parse an arm instruction mnemonic followed by its operands.
7087bool ARMAsmParser::parseInstruction(ParseInstructionInfo &Info, StringRef Name,
7088 SMLoc NameLoc, OperandVector &Operands) {
7089 MCAsmParser &Parser = getParser();
7090
7091 // Apply mnemonic aliases before doing anything else, as the destination
7092 // mnemonic may include suffices and we want to handle them normally.
7093 // The generic tblgen'erated code does this later, at the start of
7094 // MatchInstructionImpl(), but that's too late for aliases that include
7095 // any sort of suffix.
7096 const FeatureBitset &AvailableFeatures = getAvailableFeatures();
7097 unsigned AssemblerDialect = getParser().getAssemblerDialect();
7098 applyMnemonicAliases(Mnemonic&: Name, Features: AvailableFeatures, VariantID: AssemblerDialect);
7099
7100 // First check for the ARM-specific .req directive.
7101 if (Parser.getTok().is(K: AsmToken::Identifier) &&
7102 Parser.getTok().getIdentifier().lower() == ".req") {
7103 parseDirectiveReq(Name, L: NameLoc);
7104 // We always return 'error' for this, as we're done with this
7105 // statement and don't need to match the 'instruction."
7106 return true;
7107 }
7108
7109 // Create the leading tokens for the mnemonic, split by '.' characters.
7110 size_t Start = 0, Next = Name.find(C: '.');
7111 StringRef Mnemonic = Name.slice(Start, End: Next);
7112 StringRef ExtraToken = Name.slice(Start: Next, End: Name.find(C: ' ', From: Next + 1));
7113
7114 // Split out the predication code and carry setting flag from the mnemonic.
7115 ARMCC::CondCodes PredicationCode;
7116 ARMVCC::VPTCodes VPTPredicationCode;
7117 unsigned ProcessorIMod;
7118 bool CarrySetting;
7119 StringRef ITMask;
7120 Mnemonic = splitMnemonic(Mnemonic, ExtraToken, PredicationCode, VPTPredicationCode,
7121 CarrySetting, ProcessorIMod, ITMask);
7122
7123 // In Thumb1, only the branch (B) instruction can be predicated.
7124 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
7125 return Error(L: NameLoc, Msg: "conditional execution not supported in Thumb1");
7126 }
7127
7128 Operands.push_back(Elt: ARMOperand::CreateToken(Str: Mnemonic, S: NameLoc, Parser&: *this));
7129
7130 // Handle the mask for IT and VPT instructions. In ARMOperand and
7131 // MCOperand, this is stored in a format independent of the
7132 // condition code: the lowest set bit indicates the end of the
7133 // encoding, and above that, a 1 bit indicates 'else', and an 0
7134 // indicates 'then'. E.g.
7135 // IT -> 1000
7136 // ITx -> x100 (ITT -> 0100, ITE -> 1100)
7137 // ITxy -> xy10 (e.g. ITET -> 1010)
7138 // ITxyz -> xyz1 (e.g. ITEET -> 1101)
7139 // Note: See the ARM::PredBlockMask enum in
7140 // /lib/Target/ARM/Utils/ARMBaseInfo.h
7141 if (Mnemonic == "it" || Mnemonic.starts_with(Prefix: "vpt") ||
7142 Mnemonic.starts_with(Prefix: "vpst")) {
7143 SMLoc Loc = Mnemonic == "it" ? SMLoc::getFromPointer(Ptr: NameLoc.getPointer() + 2) :
7144 Mnemonic == "vpt" ? SMLoc::getFromPointer(Ptr: NameLoc.getPointer() + 3) :
7145 SMLoc::getFromPointer(Ptr: NameLoc.getPointer() + 4);
7146 if (ITMask.size() > 3) {
7147 if (Mnemonic == "it")
7148 return Error(L: Loc, Msg: "too many conditions on IT instruction");
7149 return Error(L: Loc, Msg: "too many conditions on VPT instruction");
7150 }
7151 unsigned Mask = 8;
7152 for (char Pos : llvm::reverse(C&: ITMask)) {
7153 if (Pos != 't' && Pos != 'e') {
7154 return Error(L: Loc, Msg: "illegal IT block condition mask '" + ITMask + "'");
7155 }
7156 Mask >>= 1;
7157 if (Pos == 'e')
7158 Mask |= 8;
7159 }
7160 Operands.push_back(Elt: ARMOperand::CreateITMask(Mask, S: Loc, Parser&: *this));
7161 }
7162
7163 // FIXME: This is all a pretty gross hack. We should automatically handle
7164 // optional operands like this via tblgen.
7165
7166 // Next, add the CCOut and ConditionCode operands, if needed.
7167 //
7168 // For mnemonics which can ever incorporate a carry setting bit or predication
7169 // code, our matching model involves us always generating CCOut and
7170 // ConditionCode operands to match the mnemonic "as written" and then we let
7171 // the matcher deal with finding the right instruction or generating an
7172 // appropriate error.
7173 bool CanAcceptCarrySet, CanAcceptPredicationCode, CanAcceptVPTPredicationCode;
7174 getMnemonicAcceptInfo(Mnemonic, ExtraToken, FullInst: Name, CanAcceptCarrySet,
7175 CanAcceptPredicationCode, CanAcceptVPTPredicationCode);
7176
7177 // If we had a carry-set on an instruction that can't do that, issue an
7178 // error.
7179 if (!CanAcceptCarrySet && CarrySetting) {
7180 return Error(L: NameLoc, Msg: "instruction '" + Mnemonic +
7181 "' can not set flags, but 's' suffix specified");
7182 }
7183 // If we had a predication code on an instruction that can't do that, issue an
7184 // error.
7185 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
7186 return Error(L: NameLoc, Msg: "instruction '" + Mnemonic +
7187 "' is not predicable, but condition code specified");
7188 }
7189
7190 // If we had a VPT predication code on an instruction that can't do that, issue an
7191 // error.
7192 if (!CanAcceptVPTPredicationCode && VPTPredicationCode != ARMVCC::None) {
7193 return Error(L: NameLoc, Msg: "instruction '" + Mnemonic +
7194 "' is not VPT predicable, but VPT code T/E is specified");
7195 }
7196
7197 // Add the carry setting operand, if necessary.
7198 if (CanAcceptCarrySet && CarrySetting) {
7199 SMLoc Loc = SMLoc::getFromPointer(Ptr: NameLoc.getPointer() + Mnemonic.size());
7200 Operands.push_back(Elt: ARMOperand::CreateCCOut(
7201 Reg: CarrySetting ? ARM::CPSR : ARM::NoRegister, S: Loc, Parser&: *this));
7202 }
7203
7204 // Add the predication code operand, if necessary.
7205 if (CanAcceptPredicationCode && PredicationCode != llvm::ARMCC::AL) {
7206 SMLoc Loc = SMLoc::getFromPointer(Ptr: NameLoc.getPointer() + Mnemonic.size() +
7207 CarrySetting);
7208 Operands.push_back(Elt: ARMOperand::CreateCondCode(
7209 CC: ARMCC::CondCodes(PredicationCode), S: Loc, Parser&: *this));
7210 }
7211
7212 // Add the VPT predication code operand, if necessary.
7213 // Dont add in certain cases of VCVT as this needs to be disambiguated
7214 // after operand parsing.
7215 if (CanAcceptVPTPredicationCode && VPTPredicationCode != llvm::ARMVCC::None &&
7216 !(Mnemonic.starts_with(Prefix: "vcvt") && Mnemonic != "vcvta" &&
7217 Mnemonic != "vcvtn" && Mnemonic != "vcvtp" && Mnemonic != "vcvtm")) {
7218 SMLoc Loc = SMLoc::getFromPointer(Ptr: NameLoc.getPointer() + Mnemonic.size() +
7219 CarrySetting);
7220 Operands.push_back(Elt: ARMOperand::CreateVPTPred(
7221 CC: ARMVCC::VPTCodes(VPTPredicationCode), S: Loc, Parser&: *this));
7222 }
7223
7224 // Add the processor imod operand, if necessary.
7225 if (ProcessorIMod) {
7226 Operands.push_back(Elt: ARMOperand::CreateImm(
7227 Val: MCConstantExpr::create(Value: ProcessorIMod, Ctx&: getContext()), S: NameLoc, E: NameLoc,
7228 Parser&: *this));
7229 } else if (Mnemonic == "cps" && isMClass()) {
7230 return Error(L: NameLoc, Msg: "instruction 'cps' requires effect for M-class");
7231 }
7232
7233 // Add the remaining tokens in the mnemonic.
7234 while (Next != StringRef::npos) {
7235 Start = Next;
7236 Next = Name.find(C: '.', From: Start + 1);
7237 ExtraToken = Name.slice(Start, End: Next);
7238
7239 // Some NEON instructions have an optional datatype suffix that is
7240 // completely ignored. Check for that.
7241 if (isDataTypeToken(Tok: ExtraToken) &&
7242 doesIgnoreDataTypeSuffix(Mnemonic, DT: ExtraToken))
7243 continue;
7244
7245 // For for ARM mode generate an error if the .n qualifier is used.
7246 if (ExtraToken == ".n" && !isThumb()) {
7247 SMLoc Loc = SMLoc::getFromPointer(Ptr: NameLoc.getPointer() + Start);
7248 return Error(L: Loc, Msg: "instruction with .n (narrow) qualifier not allowed in "
7249 "arm mode");
7250 }
7251
7252 // The .n qualifier is always discarded as that is what the tables
7253 // and matcher expect. In ARM mode the .w qualifier has no effect,
7254 // so discard it to avoid errors that can be caused by the matcher.
7255 if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) {
7256 SMLoc Loc = SMLoc::getFromPointer(Ptr: NameLoc.getPointer() + Start);
7257 Operands.push_back(Elt: ARMOperand::CreateToken(Str: ExtraToken, S: Loc, Parser&: *this));
7258 }
7259 }
7260
7261 // This marks the end of the LHS Mnemonic operators.
7262 // This is used for indexing into the non-menmonic operators as some of the
7263 // mnemonic operators are optional and therfore indexes can differ.
7264 unsigned MnemonicOpsEndInd = Operands.size();
7265
7266 // Read the remaining operands.
7267 if (getLexer().isNot(K: AsmToken::EndOfStatement)) {
7268 // Read the first operand.
7269 if (parseOperand(Operands, Mnemonic)) {
7270 return true;
7271 }
7272
7273 while (parseOptionalToken(T: AsmToken::Comma)) {
7274 // Parse and remember the operand.
7275 if (parseOperand(Operands, Mnemonic)) {
7276 return true;
7277 }
7278 }
7279 }
7280
7281 if (parseToken(T: AsmToken::EndOfStatement, Msg: "unexpected token in argument list"))
7282 return true;
7283
7284 tryConvertingToTwoOperandForm(Mnemonic, PredicationCode, CarrySetting,
7285 Operands, MnemonicOpsEndInd);
7286
7287 if (hasCDE() && MS.isCDEInstr(Mnemonic)) {
7288 // Dual-register instructions use even-odd register pairs as their
7289 // destination operand, in assembly such pair is spelled as two
7290 // consecutive registers, without any special syntax. ConvertDualRegOperand
7291 // tries to convert such operand into register pair, e.g. r2, r3 -> r2_r3.
7292 // It returns true, if an error message has been emitted. If the function
7293 // returns false, the function either succeeded or an error (e.g. missing
7294 // operand) will be diagnosed elsewhere.
7295 if (MS.isCDEDualRegInstr(Mnemonic)) {
7296 bool GotError =
7297 CDEConvertDualRegOperand(Mnemonic, Operands, MnemonicOpsEndInd);
7298 if (GotError)
7299 return GotError;
7300 }
7301 }
7302
7303 if (hasMVE()) {
7304 if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands,
7305 MnemonicOpsEndInd) &&
7306 Mnemonic == "vmov" && PredicationCode == ARMCC::LT) {
7307 // Very nasty hack to deal with the vector predicated variant of vmovlt
7308 // the scalar predicated vmov with condition 'lt'. We can not tell them
7309 // apart until we have parsed their operands.
7310 Operands.erase(CI: Operands.begin() + 1);
7311 Operands.erase(CI: Operands.begin());
7312 SMLoc MLoc = SMLoc::getFromPointer(Ptr: NameLoc.getPointer());
7313 SMLoc PLoc = SMLoc::getFromPointer(Ptr: NameLoc.getPointer() +
7314 Mnemonic.size() - 1 + CarrySetting);
7315 Operands.insert(I: Operands.begin(),
7316 Elt: ARMOperand::CreateVPTPred(CC: ARMVCC::None, S: PLoc, Parser&: *this));
7317 Operands.insert(I: Operands.begin(), Elt: ARMOperand::CreateToken(
7318 Str: StringRef("vmovlt"), S: MLoc, Parser&: *this));
7319 } else if (Mnemonic == "vcvt" && PredicationCode == ARMCC::NE &&
7320 !shouldOmitVectorPredicateOperand(Mnemonic, Operands,
7321 MnemonicOpsEndInd)) {
7322 // Another nasty hack to deal with the ambiguity between vcvt with scalar
7323 // predication 'ne' and vcvtn with vector predication 'e'. As above we
7324 // can only distinguish between the two after we have parsed their
7325 // operands.
7326 Operands.erase(CI: Operands.begin() + 1);
7327 Operands.erase(CI: Operands.begin());
7328 SMLoc MLoc = SMLoc::getFromPointer(Ptr: NameLoc.getPointer());
7329 SMLoc PLoc = SMLoc::getFromPointer(Ptr: NameLoc.getPointer() +
7330 Mnemonic.size() - 1 + CarrySetting);
7331 Operands.insert(I: Operands.begin(),
7332 Elt: ARMOperand::CreateVPTPred(CC: ARMVCC::Else, S: PLoc, Parser&: *this));
7333 Operands.insert(I: Operands.begin(),
7334 Elt: ARMOperand::CreateToken(Str: StringRef("vcvtn"), S: MLoc, Parser&: *this));
7335 } else if (Mnemonic == "vmul" && PredicationCode == ARMCC::LT &&
7336 !shouldOmitVectorPredicateOperand(Mnemonic, Operands,
7337 MnemonicOpsEndInd)) {
7338 // Another hack, this time to distinguish between scalar predicated vmul
7339 // with 'lt' predication code and the vector instruction vmullt with
7340 // vector predication code "none"
7341 removeCondCode(Operands, MnemonicOpsEndInd);
7342 Operands.erase(CI: Operands.begin());
7343 SMLoc MLoc = SMLoc::getFromPointer(Ptr: NameLoc.getPointer());
7344 Operands.insert(I: Operands.begin(), Elt: ARMOperand::CreateToken(
7345 Str: StringRef("vmullt"), S: MLoc, Parser&: *this));
7346 } else if (Mnemonic.starts_with(Prefix: "vcvt") && !Mnemonic.starts_with(Prefix: "vcvta") &&
7347 !Mnemonic.starts_with(Prefix: "vcvtn") &&
7348 !Mnemonic.starts_with(Prefix: "vcvtp") &&
7349 !Mnemonic.starts_with(Prefix: "vcvtm")) {
7350 if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands,
7351 MnemonicOpsEndInd)) {
7352 // We could not split the vector predicate off vcvt because it might
7353 // have been the scalar vcvtt instruction. Now we know its a vector
7354 // instruction, we still need to check whether its the vector
7355 // predicated vcvt with 'Then' predication or the vector vcvtt. We can
7356 // distinguish the two based on the suffixes, if it is any of
7357 // ".f16.f32", ".f32.f16", ".f16.f64" or ".f64.f16" then it is the vcvtt.
7358 if (Mnemonic.starts_with(Prefix: "vcvtt") && MnemonicOpsEndInd > 2) {
7359 auto Sz1 =
7360 static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd - 2]);
7361 auto Sz2 =
7362 static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd - 1]);
7363 if (!(Sz1.isToken() && Sz1.getToken().starts_with(Prefix: ".f") &&
7364 Sz2.isToken() && Sz2.getToken().starts_with(Prefix: ".f"))) {
7365 Operands.erase(CI: Operands.begin());
7366 SMLoc MLoc = SMLoc::getFromPointer(Ptr: NameLoc.getPointer());
7367 VPTPredicationCode = ARMVCC::Then;
7368
7369 Mnemonic = Mnemonic.substr(Start: 0, N: 4);
7370 Operands.insert(I: Operands.begin(),
7371 Elt: ARMOperand::CreateToken(Str: Mnemonic, S: MLoc, Parser&: *this));
7372 }
7373 }
7374 SMLoc PLoc = SMLoc::getFromPointer(Ptr: NameLoc.getPointer() +
7375 Mnemonic.size() + CarrySetting);
7376 // Add VPTPred
7377 Operands.insert(I: Operands.begin() + 1,
7378 Elt: ARMOperand::CreateVPTPred(
7379 CC: ARMVCC::VPTCodes(VPTPredicationCode), S: PLoc, Parser&: *this));
7380 ++MnemonicOpsEndInd;
7381 }
7382 } else if (CanAcceptVPTPredicationCode) {
7383 // For all other instructions, make sure only one of the two
7384 // predication operands is left behind, depending on whether we should
7385 // use the vector predication.
7386 if (shouldOmitVectorPredicateOperand(Mnemonic, Operands,
7387 MnemonicOpsEndInd)) {
7388 removeVPTCondCode(Operands, MnemonicOpsEndInd);
7389 }
7390 }
7391 }
7392
7393 if (VPTPredicationCode != ARMVCC::None) {
7394 bool usedVPTPredicationCode = false;
7395 for (unsigned I = 1; I < Operands.size(); ++I)
7396 if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred())
7397 usedVPTPredicationCode = true;
7398 if (!usedVPTPredicationCode) {
7399 // If we have a VPT predication code and we haven't just turned it
7400 // into an operand, then it was a mistake for splitMnemonic to
7401 // separate it from the rest of the mnemonic in the first place,
7402 // and this may lead to wrong disassembly (e.g. scalar floating
7403 // point VCMPE is actually a different instruction from VCMP, so
7404 // we mustn't treat them the same). In that situation, glue it
7405 // back on.
7406 Mnemonic = Name.slice(Start: 0, End: Mnemonic.size() + 1);
7407 Operands.erase(CI: Operands.begin());
7408 Operands.insert(I: Operands.begin(),
7409 Elt: ARMOperand::CreateToken(Str: Mnemonic, S: NameLoc, Parser&: *this));
7410 }
7411 }
7412
7413 // ARM mode 'blx' need special handling, as the register operand version
7414 // is predicable, but the label operand version is not. So, we can't rely
7415 // on the Mnemonic based checking to correctly figure out when to put
7416 // a k_CondCode operand in the list. If we're trying to match the label
7417 // version, remove the k_CondCode operand here.
7418 if (!isThumb() && Mnemonic == "blx" &&
7419 Operands.size() == MnemonicOpsEndInd + 1 &&
7420 static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]).isImm())
7421 removeCondCode(Operands, MnemonicOpsEndInd);
7422
7423 // GNU Assembler extension (compatibility).
7424 fixupGNULDRDAlias(Mnemonic, Operands, MnemonicOpsEndInd);
7425
7426 // Adjust operands of ldrexd/strexd to MCK_GPRPair.
7427 // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
7428 // a single GPRPair reg operand is used in the .td file to replace the two
7429 // GPRs. However, when parsing from asm, the two GRPs cannot be
7430 // automatically
7431 // expressed as a GPRPair, so we have to manually merge them.
7432 // FIXME: We would really like to be able to tablegen'erate this.
7433 bool IsLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
7434 if (!isThumb() && Operands.size() > MnemonicOpsEndInd + 1 + (!IsLoad) &&
7435 (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
7436 Mnemonic == "stlexd")) {
7437 unsigned Idx = IsLoad ? MnemonicOpsEndInd : MnemonicOpsEndInd + 1;
7438 ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]);
7439 ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]);
7440
7441 const MCRegisterClass &MRC = MRI->getRegClass(i: ARM::GPRRegClassID);
7442 // Adjust only if Op1 is a GPR.
7443 if (Op1.isReg() && MRC.contains(Reg: Op1.getReg())) {
7444 MCRegister Reg1 = Op1.getReg();
7445 unsigned Rt = MRI->getEncodingValue(Reg: Reg1);
7446 MCRegister Reg2 = Op2.getReg();
7447 unsigned Rt2 = MRI->getEncodingValue(Reg: Reg2);
7448 // Rt2 must be Rt + 1.
7449 if (Rt + 1 != Rt2)
7450 return Error(L: Op2.getStartLoc(),
7451 Msg: IsLoad ? "destination operands must be sequential"
7452 : "source operands must be sequential");
7453
7454 // Rt must be even
7455 if (Rt & 1)
7456 return Error(
7457 L: Op1.getStartLoc(),
7458 Msg: IsLoad ? "destination operands must start start at an even register"
7459 : "source operands must start start at an even register");
7460
7461 MCRegister NewReg = MRI->getMatchingSuperReg(
7462 Reg: Reg1, SubIdx: ARM::gsub_0, RC: &(MRI->getRegClass(i: ARM::GPRPairRegClassID)));
7463 Operands[Idx] = ARMOperand::CreateReg(Reg: NewReg, S: Op1.getStartLoc(),
7464 E: Op2.getEndLoc(), Parser&: *this);
7465 Operands.erase(CI: Operands.begin() + Idx + 1);
7466 }
7467 }
7468
7469 // FIXME: As said above, this is all a pretty gross hack. This instruction
7470 // does not fit with other "subs" and tblgen.
7471 // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction
7472 // so the Mnemonic is the original name "subs" and delete the predicate
7473 // operand so it will match the table entry.
7474 if (isThumbTwo() && Mnemonic == "sub" &&
7475 Operands.size() == MnemonicOpsEndInd + 3 &&
7476 static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]).isReg() &&
7477 static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]).getReg() ==
7478 ARM::PC &&
7479 static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]).isReg() &&
7480 static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]).getReg() ==
7481 ARM::LR &&
7482 static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 2]).isImm()) {
7483 Operands.front() = ARMOperand::CreateToken(Str: Name, S: NameLoc, Parser&: *this);
7484 removeCCOut(Operands, MnemonicOpsEndInd);
7485 }
7486 return false;
7487}
7488
7489// Validate context-sensitive operand constraints.
7490
7491// return 'true' if register list contains non-low GPR registers,
7492// 'false' otherwise. If Reg is in the register list or is HiReg, set
7493// 'containsReg' to true.
7494static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo,
7495 MCRegister Reg, MCRegister HiReg,
7496 bool &containsReg) {
7497 containsReg = false;
7498 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
7499 MCRegister OpReg = Inst.getOperand(i).getReg();
7500 if (OpReg == Reg)
7501 containsReg = true;
7502 // Anything other than a low register isn't legal here.
7503 if (!isARMLowRegister(Reg: OpReg) && (!HiReg || OpReg != HiReg))
7504 return true;
7505 }
7506 return false;
7507}
7508
7509// Check if the specified regisgter is in the register list of the inst,
7510// starting at the indicated operand number.
7511static bool listContainsReg(const MCInst &Inst, unsigned OpNo, MCRegister Reg) {
7512 for (unsigned i = OpNo, e = Inst.getNumOperands(); i < e; ++i) {
7513 MCRegister OpReg = Inst.getOperand(i).getReg();
7514 if (OpReg == Reg)
7515 return true;
7516 }
7517 return false;
7518}
7519
7520// Return true if instruction has the interesting property of being
7521// allowed in IT blocks, but not being predicable.
7522static bool instIsBreakpoint(const MCInst &Inst) {
7523 return Inst.getOpcode() == ARM::tBKPT ||
7524 Inst.getOpcode() == ARM::BKPT ||
7525 Inst.getOpcode() == ARM::tHLT ||
7526 Inst.getOpcode() == ARM::HLT;
7527}
7528
7529unsigned getRegListInd(const OperandVector &Operands,
7530 unsigned MnemonicOpsEndInd) {
7531 for (unsigned I = MnemonicOpsEndInd; I < Operands.size(); ++I) {
7532 const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[I]);
7533 if (Op.isRegList()) {
7534 return I;
7535 }
7536 }
7537 return 0;
7538}
7539
7540bool ARMAsmParser::validatetLDMRegList(const MCInst &Inst,
7541 const OperandVector &Operands,
7542 unsigned MnemonicOpsEndInd,
7543 unsigned ListIndex, bool IsARPop) {
7544 bool ListContainsSP = listContainsReg(Inst, OpNo: ListIndex, Reg: ARM::SP);
7545 bool ListContainsLR = listContainsReg(Inst, OpNo: ListIndex, Reg: ARM::LR);
7546 bool ListContainsPC = listContainsReg(Inst, OpNo: ListIndex, Reg: ARM::PC);
7547
7548 if (!IsARPop && ListContainsSP)
7549 return Error(
7550 L: Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
7551 Msg: "SP may not be in the register list");
7552 if (ListContainsPC && ListContainsLR)
7553 return Error(
7554 L: Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
7555 Msg: "PC and LR may not be in the register list simultaneously");
7556 return false;
7557}
7558
7559bool ARMAsmParser::validatetSTMRegList(const MCInst &Inst,
7560 const OperandVector &Operands,
7561 unsigned MnemonicOpsEndInd,
7562 unsigned ListIndex) {
7563 bool ListContainsSP = listContainsReg(Inst, OpNo: ListIndex, Reg: ARM::SP);
7564 bool ListContainsPC = listContainsReg(Inst, OpNo: ListIndex, Reg: ARM::PC);
7565
7566 if (ListContainsSP && ListContainsPC)
7567 return Error(
7568 L: Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
7569 Msg: "SP and PC may not be in the register list");
7570 if (ListContainsSP)
7571 return Error(
7572 L: Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
7573 Msg: "SP may not be in the register list");
7574 if (ListContainsPC)
7575 return Error(
7576 L: Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
7577 Msg: "PC may not be in the register list");
7578 return false;
7579}
7580
7581bool ARMAsmParser::validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands,
7582 bool Load, bool ARMMode, bool Writeback,
7583 unsigned MnemonicOpsEndInd) {
7584 unsigned RtIndex = Load || !Writeback ? 0 : 1;
7585 unsigned Rt = MRI->getEncodingValue(Reg: Inst.getOperand(i: RtIndex).getReg());
7586 unsigned Rt2 = MRI->getEncodingValue(Reg: Inst.getOperand(i: RtIndex + 1).getReg());
7587
7588 if (ARMMode) {
7589 // Rt can't be R14.
7590 if (Rt == 14)
7591 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
7592 Msg: "Rt can't be R14");
7593
7594 // Rt must be even-numbered.
7595 if ((Rt & 1) == 1)
7596 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
7597 Msg: "Rt must be even-numbered");
7598
7599 // Rt2 must be Rt + 1.
7600 if (Rt2 != Rt + 1) {
7601 if (Load)
7602 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
7603 Msg: "destination operands must be sequential");
7604 else
7605 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
7606 Msg: "source operands must be sequential");
7607 }
7608
7609 // FIXME: Diagnose m == 15
7610 // FIXME: Diagnose ldrd with m == t || m == t2.
7611 }
7612
7613 if (!ARMMode && Load) {
7614 if (Rt2 == Rt)
7615 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
7616 Msg: "destination operands can't be identical");
7617 }
7618
7619 if (Writeback) {
7620 unsigned Rn = MRI->getEncodingValue(Reg: Inst.getOperand(i: 3).getReg());
7621
7622 if (Rn == Rt || Rn == Rt2) {
7623 if (Load)
7624 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
7625 Msg: "base register needs to be different from destination "
7626 "registers");
7627 else
7628 return Error(L: Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7629 Msg: "source register and base register can't be identical");
7630 }
7631
7632 // FIXME: Diagnose ldrd/strd with writeback and n == 15.
7633 // (Except the immediate form of ldrd?)
7634 }
7635
7636 return false;
7637}
7638
7639static int findFirstVectorPredOperandIdx(const MCInstrDesc &MCID) {
7640 for (unsigned i = 0; i < MCID.NumOperands; ++i) {
7641 if (ARM::isVpred(op: MCID.operands()[i].OperandType))
7642 return i;
7643 }
7644 return -1;
7645}
7646
7647static bool isVectorPredicable(const MCInstrDesc &MCID) {
7648 return findFirstVectorPredOperandIdx(MCID) != -1;
7649}
7650
7651static bool isARMMCExpr(MCParsedAsmOperand &MCOp) {
7652 ARMOperand &Op = static_cast<ARMOperand &>(MCOp);
7653 if (!Op.isImm())
7654 return false;
7655 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Op.getImm());
7656 if (CE)
7657 return false;
7658 const MCExpr *E = dyn_cast<MCExpr>(Val: Op.getImm());
7659 if (!E)
7660 return false;
7661 return true;
7662}
7663
7664// FIXME: We would really like to be able to tablegen'erate this.
7665bool ARMAsmParser::validateInstruction(MCInst &Inst,
7666 const OperandVector &Operands,
7667 unsigned MnemonicOpsEndInd) {
7668 const MCInstrDesc &MCID = MII.get(Opcode: Inst.getOpcode());
7669 SMLoc Loc = Operands[0]->getStartLoc();
7670
7671 // Check the IT block state first.
7672 // NOTE: BKPT and HLT instructions have the interesting property of being
7673 // allowed in IT blocks, but not being predicable. They just always execute.
7674 if (inITBlock() && !instIsBreakpoint(Inst)) {
7675 // The instruction must be predicable.
7676 if (!MCID.isPredicable())
7677 return Error(L: Loc, Msg: "instructions in IT block must be predicable");
7678 ARMCC::CondCodes Cond = ARMCC::CondCodes(
7679 Inst.getOperand(i: MCID.findFirstPredOperandIdx()).getImm());
7680 if (Cond != currentITCond()) {
7681 // Find the condition code Operand to get its SMLoc information.
7682 SMLoc CondLoc = Operands[0]->getEndLoc();
7683 for (unsigned I = 1; I < Operands.size(); ++I)
7684 if (static_cast<ARMOperand &>(*Operands[I]).isCondCode())
7685 CondLoc = Operands[I]->getStartLoc();
7686 return Error(L: CondLoc, Msg: "incorrect condition in IT block; got '" +
7687 StringRef(ARMCondCodeToString(CC: Cond)) +
7688 "', but expected '" +
7689 ARMCondCodeToString(CC: currentITCond()) + "'");
7690 }
7691 // Check for non-'al' condition codes outside of the IT block.
7692 } else if (isThumbTwo() && MCID.isPredicable() &&
7693 Inst.getOperand(i: MCID.findFirstPredOperandIdx()).getImm() !=
7694 ARMCC::AL && Inst.getOpcode() != ARM::tBcc &&
7695 Inst.getOpcode() != ARM::t2Bcc &&
7696 Inst.getOpcode() != ARM::t2BFic) {
7697 return Error(L: Loc, Msg: "predicated instructions must be in IT block");
7698 } else if (!isThumb() && !useImplicitITARM() && MCID.isPredicable() &&
7699 Inst.getOperand(i: MCID.findFirstPredOperandIdx()).getImm() !=
7700 ARMCC::AL) {
7701 return Warning(L: Loc, Msg: "predicated instructions should be in IT block");
7702 } else if (!MCID.isPredicable()) {
7703 // Check the instruction doesn't have a predicate operand anyway
7704 // that it's not allowed to use. Sometimes this happens in order
7705 // to keep instructions the same shape even though one cannot
7706 // legally be predicated, e.g. vmul.f16 vs vmul.f32.
7707 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
7708 if (MCID.operands()[i].isPredicate()) {
7709 if (Inst.getOperand(i).getImm() != ARMCC::AL)
7710 return Error(L: Loc, Msg: "instruction is not predicable");
7711 break;
7712 }
7713 }
7714 }
7715
7716 // PC-setting instructions in an IT block, but not the last instruction of
7717 // the block, are UNPREDICTABLE.
7718 if (inExplicitITBlock() && !lastInITBlock() && isITBlockTerminator(Inst)) {
7719 return Error(L: Loc, Msg: "instruction must be outside of IT block or the last instruction in an IT block");
7720 }
7721
7722 if (inVPTBlock() && !instIsBreakpoint(Inst)) {
7723 unsigned Bit = extractITMaskBit(Mask: VPTState.Mask, Position: VPTState.CurPosition);
7724 if (!isVectorPredicable(MCID))
7725 return Error(L: Loc, Msg: "instruction in VPT block must be predicable");
7726 unsigned Pred = Inst.getOperand(i: findFirstVectorPredOperandIdx(MCID)).getImm();
7727 unsigned VPTPred = Bit ? ARMVCC::Else : ARMVCC::Then;
7728 if (Pred != VPTPred) {
7729 SMLoc PredLoc;
7730 for (unsigned I = 1; I < Operands.size(); ++I)
7731 if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred())
7732 PredLoc = Operands[I]->getStartLoc();
7733 return Error(L: PredLoc, Msg: "incorrect predication in VPT block; got '" +
7734 StringRef(ARMVPTPredToString(CC: ARMVCC::VPTCodes(Pred))) +
7735 "', but expected '" +
7736 ARMVPTPredToString(CC: ARMVCC::VPTCodes(VPTPred)) + "'");
7737 }
7738 }
7739 else if (isVectorPredicable(MCID) &&
7740 Inst.getOperand(i: findFirstVectorPredOperandIdx(MCID)).getImm() !=
7741 ARMVCC::None)
7742 return Error(L: Loc, Msg: "VPT predicated instructions must be in VPT block");
7743
7744 const unsigned Opcode = Inst.getOpcode();
7745 switch (Opcode) {
7746 case ARM::VLLDM:
7747 case ARM::VLLDM_T2:
7748 case ARM::VLSTM:
7749 case ARM::VLSTM_T2: {
7750 // Since in some cases both T1 and T2 are valid, tablegen can not always
7751 // pick the correct instruction.
7752 if (Operands.size() ==
7753 MnemonicOpsEndInd + 2) { // a register list has been provided
7754 ARMOperand &Op = static_cast<ARMOperand &>(
7755 *Operands[MnemonicOpsEndInd + 1]); // the register list, a dpr_reglist
7756 assert(Op.isDPRRegList());
7757 auto &RegList = Op.getRegList();
7758 // T2 requires v8.1-M.Main (cannot be handled by tablegen)
7759 if (RegList.size() == 32 && !hasV8_1MMainline()) {
7760 return Error(L: Op.getEndLoc(), Msg: "T2 version requires v8.1-M.Main");
7761 }
7762 // When target has 32 D registers, T1 is undefined.
7763 if (hasD32() && RegList.size() != 32) {
7764 return Error(L: Op.getEndLoc(), Msg: "operand must be exactly {d0-d31}");
7765 }
7766 // When target has 16 D registers, both T1 and T2 are valid.
7767 if (!hasD32() && (RegList.size() != 16 && RegList.size() != 32)) {
7768 return Error(L: Op.getEndLoc(),
7769 Msg: "operand must be exactly {d0-d15} (T1) or {d0-d31} (T2)");
7770 }
7771 }
7772 return false;
7773 }
7774 case ARM::t2IT: {
7775 // Encoding is unpredictable if it ever results in a notional 'NV'
7776 // predicate. Since we don't parse 'NV' directly this means an 'AL'
7777 // predicate with an "else" mask bit.
7778 unsigned Cond = Inst.getOperand(i: 0).getImm();
7779 unsigned Mask = Inst.getOperand(i: 1).getImm();
7780
7781 // Conditions only allowing a 't' are those with no set bit except
7782 // the lowest-order one that indicates the end of the sequence. In
7783 // other words, powers of 2.
7784 if (Cond == ARMCC::AL && llvm::popcount(Value: Mask) != 1)
7785 return Error(L: Loc, Msg: "unpredictable IT predicate sequence");
7786 break;
7787 }
7788 case ARM::LDRD:
7789 if (validateLDRDSTRD(Inst, Operands, /*Load*/ true, /*ARMMode*/ true,
7790 /*Writeback*/ false, MnemonicOpsEndInd))
7791 return true;
7792 break;
7793 case ARM::LDRD_PRE:
7794 case ARM::LDRD_POST:
7795 if (validateLDRDSTRD(Inst, Operands, /*Load*/ true, /*ARMMode*/ true,
7796 /*Writeback*/ true, MnemonicOpsEndInd))
7797 return true;
7798 break;
7799 case ARM::t2LDRDi8:
7800 if (validateLDRDSTRD(Inst, Operands, /*Load*/ true, /*ARMMode*/ false,
7801 /*Writeback*/ false, MnemonicOpsEndInd))
7802 return true;
7803 break;
7804 case ARM::t2LDRD_PRE:
7805 case ARM::t2LDRD_POST:
7806 if (validateLDRDSTRD(Inst, Operands, /*Load*/ true, /*ARMMode*/ false,
7807 /*Writeback*/ true, MnemonicOpsEndInd))
7808 return true;
7809 break;
7810 case ARM::t2BXJ: {
7811 const MCRegister RmReg = Inst.getOperand(i: 0).getReg();
7812 // Rm = SP is no longer unpredictable in v8-A
7813 if (RmReg == ARM::SP && !hasV8Ops())
7814 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
7815 Msg: "r13 (SP) is an unpredictable operand to BXJ");
7816 return false;
7817 }
7818 case ARM::STRD:
7819 if (validateLDRDSTRD(Inst, Operands, /*Load*/ false, /*ARMMode*/ true,
7820 /*Writeback*/ false, MnemonicOpsEndInd))
7821 return true;
7822 break;
7823 case ARM::STRD_PRE:
7824 case ARM::STRD_POST:
7825 if (validateLDRDSTRD(Inst, Operands, /*Load*/ false, /*ARMMode*/ true,
7826 /*Writeback*/ true, MnemonicOpsEndInd))
7827 return true;
7828 break;
7829 case ARM::t2STRD_PRE:
7830 case ARM::t2STRD_POST:
7831 if (validateLDRDSTRD(Inst, Operands, /*Load*/ false, /*ARMMode*/ false,
7832 /*Writeback*/ true, MnemonicOpsEndInd))
7833 return true;
7834 break;
7835 case ARM::STR_PRE_IMM:
7836 case ARM::STR_PRE_REG:
7837 case ARM::t2STR_PRE:
7838 case ARM::STR_POST_IMM:
7839 case ARM::STR_POST_REG:
7840 case ARM::t2STR_POST:
7841 case ARM::STRH_PRE:
7842 case ARM::t2STRH_PRE:
7843 case ARM::STRH_POST:
7844 case ARM::t2STRH_POST:
7845 case ARM::STRB_PRE_IMM:
7846 case ARM::STRB_PRE_REG:
7847 case ARM::t2STRB_PRE:
7848 case ARM::STRB_POST_IMM:
7849 case ARM::STRB_POST_REG:
7850 case ARM::t2STRB_POST: {
7851 // Rt must be different from Rn.
7852 const unsigned Rt = MRI->getEncodingValue(Reg: Inst.getOperand(i: 1).getReg());
7853 const unsigned Rn = MRI->getEncodingValue(Reg: Inst.getOperand(i: 2).getReg());
7854
7855 if (Rt == Rn)
7856 return Error(L: Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
7857 Msg: "source register and base register can't be identical");
7858 return false;
7859 }
7860 case ARM::t2LDR_PRE_imm:
7861 case ARM::t2LDR_POST_imm:
7862 case ARM::t2STR_PRE_imm:
7863 case ARM::t2STR_POST_imm: {
7864 // Rt must be different from Rn.
7865 const unsigned Rt = MRI->getEncodingValue(Reg: Inst.getOperand(i: 0).getReg());
7866 const unsigned Rn = MRI->getEncodingValue(Reg: Inst.getOperand(i: 1).getReg());
7867
7868 if (Rt == Rn)
7869 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
7870 Msg: "destination register and base register can't be identical");
7871 if (Inst.getOpcode() == ARM::t2LDR_POST_imm ||
7872 Inst.getOpcode() == ARM::t2STR_POST_imm) {
7873 int Imm = Inst.getOperand(i: 2).getImm();
7874 if (Imm > 255 || Imm < -255)
7875 return Error(L: Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7876 Msg: "operand must be in range [-255, 255]");
7877 }
7878 if (Inst.getOpcode() == ARM::t2STR_PRE_imm ||
7879 Inst.getOpcode() == ARM::t2STR_POST_imm) {
7880 if (Inst.getOperand(i: 0).getReg() == ARM::PC) {
7881 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
7882 Msg: "operand must be a register in range [r0, r14]");
7883 }
7884 }
7885 return false;
7886 }
7887
7888 case ARM::t2LDRB_OFFSET_imm:
7889 case ARM::t2LDRB_PRE_imm:
7890 case ARM::t2LDRB_POST_imm:
7891 case ARM::t2STRB_OFFSET_imm:
7892 case ARM::t2STRB_PRE_imm:
7893 case ARM::t2STRB_POST_imm: {
7894 if (Inst.getOpcode() == ARM::t2LDRB_POST_imm ||
7895 Inst.getOpcode() == ARM::t2STRB_POST_imm ||
7896 Inst.getOpcode() == ARM::t2LDRB_PRE_imm ||
7897 Inst.getOpcode() == ARM::t2STRB_PRE_imm) {
7898 int Imm = Inst.getOperand(i: 2).getImm();
7899 if (Imm > 255 || Imm < -255)
7900 return Error(L: Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7901 Msg: "operand must be in range [-255, 255]");
7902 } else if (Inst.getOpcode() == ARM::t2LDRB_OFFSET_imm ||
7903 Inst.getOpcode() == ARM::t2STRB_OFFSET_imm) {
7904 int Imm = Inst.getOperand(i: 2).getImm();
7905 if (Imm > 0 || Imm < -255)
7906 return Error(L: Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7907 Msg: "operand must be in range [0, 255] with a negative sign");
7908 }
7909 if (Inst.getOperand(i: 0).getReg() == ARM::PC) {
7910 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
7911 Msg: "if operand is PC, should call the LDRB (literal)");
7912 }
7913 return false;
7914 }
7915
7916 case ARM::t2LDRH_OFFSET_imm:
7917 case ARM::t2LDRH_PRE_imm:
7918 case ARM::t2LDRH_POST_imm:
7919 case ARM::t2STRH_OFFSET_imm:
7920 case ARM::t2STRH_PRE_imm:
7921 case ARM::t2STRH_POST_imm: {
7922 if (Inst.getOpcode() == ARM::t2LDRH_POST_imm ||
7923 Inst.getOpcode() == ARM::t2STRH_POST_imm ||
7924 Inst.getOpcode() == ARM::t2LDRH_PRE_imm ||
7925 Inst.getOpcode() == ARM::t2STRH_PRE_imm) {
7926 int Imm = Inst.getOperand(i: 2).getImm();
7927 if (Imm > 255 || Imm < -255)
7928 return Error(L: Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7929 Msg: "operand must be in range [-255, 255]");
7930 } else if (Inst.getOpcode() == ARM::t2LDRH_OFFSET_imm ||
7931 Inst.getOpcode() == ARM::t2STRH_OFFSET_imm) {
7932 int Imm = Inst.getOperand(i: 2).getImm();
7933 if (Imm > 0 || Imm < -255)
7934 return Error(L: Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7935 Msg: "operand must be in range [0, 255] with a negative sign");
7936 }
7937 if (Inst.getOperand(i: 0).getReg() == ARM::PC) {
7938 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
7939 Msg: "if operand is PC, should call the LDRH (literal)");
7940 }
7941 return false;
7942 }
7943
7944 case ARM::t2LDRSB_OFFSET_imm:
7945 case ARM::t2LDRSB_PRE_imm:
7946 case ARM::t2LDRSB_POST_imm: {
7947 if (Inst.getOpcode() == ARM::t2LDRSB_POST_imm ||
7948 Inst.getOpcode() == ARM::t2LDRSB_PRE_imm) {
7949 int Imm = Inst.getOperand(i: 2).getImm();
7950 if (Imm > 255 || Imm < -255)
7951 return Error(L: Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7952 Msg: "operand must be in range [-255, 255]");
7953 } else if (Inst.getOpcode() == ARM::t2LDRSB_OFFSET_imm) {
7954 int Imm = Inst.getOperand(i: 2).getImm();
7955 if (Imm > 0 || Imm < -255)
7956 return Error(L: Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7957 Msg: "operand must be in range [0, 255] with a negative sign");
7958 }
7959 if (Inst.getOperand(i: 0).getReg() == ARM::PC) {
7960 return Error(L: Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7961 Msg: "if operand is PC, should call the LDRH (literal)");
7962 }
7963 return false;
7964 }
7965
7966 case ARM::t2LDRSH_OFFSET_imm:
7967 case ARM::t2LDRSH_PRE_imm:
7968 case ARM::t2LDRSH_POST_imm: {
7969 if (Inst.getOpcode() == ARM::t2LDRSH_POST_imm ||
7970 Inst.getOpcode() == ARM::t2LDRSH_PRE_imm) {
7971 int Imm = Inst.getOperand(i: 2).getImm();
7972 if (Imm > 255 || Imm < -255)
7973 return Error(L: Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7974 Msg: "operand must be in range [-255, 255]");
7975 } else if (Inst.getOpcode() == ARM::t2LDRSH_OFFSET_imm) {
7976 int Imm = Inst.getOperand(i: 2).getImm();
7977 if (Imm > 0 || Imm < -255)
7978 return Error(L: Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7979 Msg: "operand must be in range [0, 255] with a negative sign");
7980 }
7981 if (Inst.getOperand(i: 0).getReg() == ARM::PC) {
7982 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
7983 Msg: "if operand is PC, should call the LDRH (literal)");
7984 }
7985 return false;
7986 }
7987
7988 case ARM::LDR_PRE_IMM:
7989 case ARM::LDR_PRE_REG:
7990 case ARM::t2LDR_PRE:
7991 case ARM::LDR_POST_IMM:
7992 case ARM::LDR_POST_REG:
7993 case ARM::t2LDR_POST:
7994 case ARM::LDRH_PRE:
7995 case ARM::t2LDRH_PRE:
7996 case ARM::LDRH_POST:
7997 case ARM::t2LDRH_POST:
7998 case ARM::LDRSH_PRE:
7999 case ARM::t2LDRSH_PRE:
8000 case ARM::LDRSH_POST:
8001 case ARM::t2LDRSH_POST:
8002 case ARM::LDRB_PRE_IMM:
8003 case ARM::LDRB_PRE_REG:
8004 case ARM::t2LDRB_PRE:
8005 case ARM::LDRB_POST_IMM:
8006 case ARM::LDRB_POST_REG:
8007 case ARM::t2LDRB_POST:
8008 case ARM::LDRSB_PRE:
8009 case ARM::t2LDRSB_PRE:
8010 case ARM::LDRSB_POST:
8011 case ARM::t2LDRSB_POST: {
8012 // Rt must be different from Rn.
8013 const unsigned Rt = MRI->getEncodingValue(Reg: Inst.getOperand(i: 0).getReg());
8014 const unsigned Rn = MRI->getEncodingValue(Reg: Inst.getOperand(i: 2).getReg());
8015
8016 if (Rt == Rn)
8017 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
8018 Msg: "destination register and base register can't be identical");
8019 return false;
8020 }
8021
8022 case ARM::MVE_VLDRBU8_rq:
8023 case ARM::MVE_VLDRBU16_rq:
8024 case ARM::MVE_VLDRBS16_rq:
8025 case ARM::MVE_VLDRBU32_rq:
8026 case ARM::MVE_VLDRBS32_rq:
8027 case ARM::MVE_VLDRHU16_rq:
8028 case ARM::MVE_VLDRHU16_rq_u:
8029 case ARM::MVE_VLDRHU32_rq:
8030 case ARM::MVE_VLDRHU32_rq_u:
8031 case ARM::MVE_VLDRHS32_rq:
8032 case ARM::MVE_VLDRHS32_rq_u:
8033 case ARM::MVE_VLDRWU32_rq:
8034 case ARM::MVE_VLDRWU32_rq_u:
8035 case ARM::MVE_VLDRDU64_rq:
8036 case ARM::MVE_VLDRDU64_rq_u:
8037 case ARM::MVE_VLDRWU32_qi:
8038 case ARM::MVE_VLDRWU32_qi_pre:
8039 case ARM::MVE_VLDRDU64_qi:
8040 case ARM::MVE_VLDRDU64_qi_pre: {
8041 // Qd must be different from Qm.
8042 unsigned QdIdx = 0, QmIdx = 2;
8043 bool QmIsPointer = false;
8044 switch (Opcode) {
8045 case ARM::MVE_VLDRWU32_qi:
8046 case ARM::MVE_VLDRDU64_qi:
8047 QmIdx = 1;
8048 QmIsPointer = true;
8049 break;
8050 case ARM::MVE_VLDRWU32_qi_pre:
8051 case ARM::MVE_VLDRDU64_qi_pre:
8052 QdIdx = 1;
8053 QmIsPointer = true;
8054 break;
8055 }
8056
8057 const unsigned Qd = MRI->getEncodingValue(Reg: Inst.getOperand(i: QdIdx).getReg());
8058 const unsigned Qm = MRI->getEncodingValue(Reg: Inst.getOperand(i: QmIdx).getReg());
8059
8060 if (Qd == Qm) {
8061 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
8062 Msg: Twine("destination vector register and vector ") +
8063 (QmIsPointer ? "pointer" : "offset") +
8064 " register can't be identical");
8065 }
8066 return false;
8067 }
8068
8069 case ARM::SBFX:
8070 case ARM::t2SBFX:
8071 case ARM::UBFX:
8072 case ARM::t2UBFX: {
8073 // Width must be in range [1, 32-lsb].
8074 unsigned LSB = Inst.getOperand(i: 2).getImm();
8075 unsigned Widthm1 = Inst.getOperand(i: 3).getImm();
8076 if (Widthm1 >= 32 - LSB)
8077 return Error(L: Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8078 Msg: "bitfield width must be in range [1,32-lsb]");
8079 return false;
8080 }
8081 // Notionally handles ARM::tLDMIA_UPD too.
8082 case ARM::tLDMIA: {
8083 // If we're parsing Thumb2, the .w variant is available and handles
8084 // most cases that are normally illegal for a Thumb1 LDM instruction.
8085 // We'll make the transformation in processInstruction() if necessary.
8086 //
8087 // Thumb LDM instructions are writeback iff the base register is not
8088 // in the register list.
8089 MCRegister Rn = Inst.getOperand(i: 0).getReg();
8090 bool HasWritebackToken =
8091 (static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
8092 .isToken() &&
8093 static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
8094 .getToken() == "!");
8095
8096 bool ListContainsBase;
8097 if (checkLowRegisterList(Inst, OpNo: 3, Reg: Rn, HiReg: MCRegister(), containsReg&: ListContainsBase) &&
8098 !isThumbTwo())
8099 return Error(
8100 L: Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
8101 Msg: "registers must be in range r0-r7");
8102 // If we should have writeback, then there should be a '!' token.
8103 if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
8104 return Error(
8105 L: Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
8106 Msg: "writeback operator '!' expected");
8107 // If we should not have writeback, there must not be a '!'. This is
8108 // true even for the 32-bit wide encodings.
8109 if (ListContainsBase && HasWritebackToken)
8110 return Error(L: Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8111 Msg: "writeback operator '!' not allowed when base register "
8112 "in register list");
8113
8114 if (validatetLDMRegList(Inst, Operands, MnemonicOpsEndInd, ListIndex: 3))
8115 return true;
8116 break;
8117 }
8118 case ARM::LDMIA_UPD:
8119 case ARM::LDMDB_UPD:
8120 case ARM::LDMIB_UPD:
8121 case ARM::LDMDA_UPD:
8122 // ARM variants loading and updating the same register are only officially
8123 // UNPREDICTABLE on v7 upwards. Goodness knows what they did before.
8124 if (!hasV7Ops())
8125 break;
8126 if (listContainsReg(Inst, OpNo: 3, Reg: Inst.getOperand(i: 0).getReg()))
8127 return Error(L: Operands.back()->getStartLoc(),
8128 Msg: "writeback register not allowed in register list");
8129 break;
8130 case ARM::t2LDMIA:
8131 case ARM::t2LDMDB:
8132 if (validatetLDMRegList(Inst, Operands, MnemonicOpsEndInd, ListIndex: 3))
8133 return true;
8134 break;
8135 case ARM::t2STMIA:
8136 case ARM::t2STMDB:
8137 if (validatetSTMRegList(Inst, Operands, MnemonicOpsEndInd, ListIndex: 3))
8138 return true;
8139 break;
8140 case ARM::t2LDMIA_UPD:
8141 case ARM::t2LDMDB_UPD:
8142 case ARM::t2STMIA_UPD:
8143 case ARM::t2STMDB_UPD:
8144 if (listContainsReg(Inst, OpNo: 3, Reg: Inst.getOperand(i: 0).getReg()))
8145 return Error(L: Operands.back()->getStartLoc(),
8146 Msg: "writeback register not allowed in register list");
8147
8148 if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
8149 if (validatetLDMRegList(Inst, Operands, MnemonicOpsEndInd, ListIndex: 3))
8150 return true;
8151 } else {
8152 if (validatetSTMRegList(Inst, Operands, MnemonicOpsEndInd, ListIndex: 3))
8153 return true;
8154 }
8155 break;
8156
8157 case ARM::sysLDMIA_UPD:
8158 case ARM::sysLDMDA_UPD:
8159 case ARM::sysLDMDB_UPD:
8160 case ARM::sysLDMIB_UPD:
8161 if (!listContainsReg(Inst, OpNo: 3, Reg: ARM::PC))
8162 return Error(L: Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8163 Msg: "writeback register only allowed on system LDM "
8164 "if PC in register-list");
8165 break;
8166 case ARM::sysSTMIA_UPD:
8167 case ARM::sysSTMDA_UPD:
8168 case ARM::sysSTMDB_UPD:
8169 case ARM::sysSTMIB_UPD:
8170 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
8171 Msg: "system STM cannot have writeback register");
8172 // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
8173 // so only issue a diagnostic for thumb1. The instructions will be
8174 // switched to the t2 encodings in processInstruction() if necessary.
8175 case ARM::tPOP: {
8176 bool ListContainsBase;
8177 if (checkLowRegisterList(Inst, OpNo: 2, Reg: MCRegister(), HiReg: ARM::PC,
8178 containsReg&: ListContainsBase) &&
8179 !isThumbTwo())
8180 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
8181 Msg: "registers must be in range r0-r7 or pc");
8182 if (validatetLDMRegList(Inst, Operands, MnemonicOpsEndInd, ListIndex: 2, IsARPop: !isMClass()))
8183 return true;
8184 break;
8185 }
8186 case ARM::tPUSH: {
8187 bool ListContainsBase;
8188 if (checkLowRegisterList(Inst, OpNo: 2, Reg: MCRegister(), HiReg: ARM::LR,
8189 containsReg&: ListContainsBase) &&
8190 !isThumbTwo())
8191 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
8192 Msg: "registers must be in range r0-r7 or lr");
8193 if (validatetSTMRegList(Inst, Operands, MnemonicOpsEndInd, ListIndex: 2))
8194 return true;
8195 break;
8196 }
8197 case ARM::tSTMIA_UPD: {
8198 bool ListContainsBase, InvalidLowList;
8199 InvalidLowList = checkLowRegisterList(Inst, OpNo: 4, Reg: Inst.getOperand(i: 0).getReg(),
8200 HiReg: 0, containsReg&: ListContainsBase);
8201 if (InvalidLowList && !isThumbTwo())
8202 return Error(L: Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8203 Msg: "registers must be in range r0-r7");
8204
8205 // This would be converted to a 32-bit stm, but that's not valid if the
8206 // writeback register is in the list.
8207 if (InvalidLowList && ListContainsBase)
8208 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
8209 Msg: "writeback operator '!' not allowed when base register "
8210 "in register list");
8211
8212 if (validatetSTMRegList(Inst, Operands, MnemonicOpsEndInd, ListIndex: 4))
8213 return true;
8214 break;
8215 }
8216 case ARM::tADDrSP:
8217 // If the non-SP source operand and the destination operand are not the
8218 // same, we need thumb2 (for the wide encoding), or we have an error.
8219 if (!isThumbTwo() &&
8220 Inst.getOperand(i: 0).getReg() != Inst.getOperand(i: 2).getReg()) {
8221 return Error(L: Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8222 Msg: "source register must be the same as destination");
8223 }
8224 break;
8225
8226 case ARM::t2ADDrr:
8227 case ARM::t2ADDrs:
8228 case ARM::t2SUBrr:
8229 case ARM::t2SUBrs:
8230 if (Inst.getOperand(i: 0).getReg() == ARM::SP &&
8231 Inst.getOperand(i: 1).getReg() != ARM::SP)
8232 return Error(L: Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8233 Msg: "source register must be sp if destination is sp");
8234 break;
8235
8236 // Final range checking for Thumb unconditional branch instructions.
8237 case ARM::tB:
8238 if (!(static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]))
8239 .isSignedOffset<11, 1>())
8240 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
8241 Msg: "branch target out of range");
8242 break;
8243 case ARM::t2B: {
8244 int op = (Operands[MnemonicOpsEndInd]->isImm()) ? MnemonicOpsEndInd
8245 : MnemonicOpsEndInd + 1;
8246 ARMOperand &Operand = static_cast<ARMOperand &>(*Operands[op]);
8247 // Delay the checks of symbolic expressions until they are resolved.
8248 if (!isa<MCBinaryExpr>(Val: Operand.getImm()) &&
8249 !Operand.isSignedOffset<24, 1>())
8250 return Error(L: Operands[op]->getStartLoc(), Msg: "branch target out of range");
8251 break;
8252 }
8253 // Final range checking for Thumb conditional branch instructions.
8254 case ARM::tBcc:
8255 if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd])
8256 .isSignedOffset<8, 1>())
8257 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
8258 Msg: "branch target out of range");
8259 break;
8260 case ARM::t2Bcc: {
8261 int Op = (Operands[MnemonicOpsEndInd]->isImm()) ? MnemonicOpsEndInd
8262 : MnemonicOpsEndInd + 1;
8263 if (!static_cast<ARMOperand &>(*Operands[Op]).isSignedOffset<20, 1>())
8264 return Error(L: Operands[Op]->getStartLoc(), Msg: "branch target out of range");
8265 break;
8266 }
8267 case ARM::tCBZ:
8268 case ARM::tCBNZ: {
8269 if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
8270 .isUnsignedOffset<6, 1>())
8271 return Error(L: Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8272 Msg: "branch target out of range");
8273 break;
8274 }
8275 case ARM::MOVi16:
8276 case ARM::MOVTi16:
8277 case ARM::t2MOVi16:
8278 case ARM::t2MOVTi16:
8279 {
8280 // We want to avoid misleadingly allowing something like "mov r0, <symbol>"
8281 // especially when we turn it into a movw and the expression <symbol> does
8282 // not have a :lower16: or :upper16 as part of the expression. We don't
8283 // want the behavior of silently truncating, which can be unexpected and
8284 // lead to bugs that are difficult to find since this is an easy mistake
8285 // to make.
8286 int i = (Operands[MnemonicOpsEndInd]->isImm()) ? MnemonicOpsEndInd
8287 : MnemonicOpsEndInd + 1;
8288 ARMOperand &Op = static_cast<ARMOperand &>(*Operands[i]);
8289 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Op.getImm());
8290 if (CE) break;
8291 const MCExpr *E = dyn_cast<MCExpr>(Val: Op.getImm());
8292 if (!E) break;
8293 auto *ARM16Expr = dyn_cast<MCSpecifierExpr>(Val: E);
8294 if (!ARM16Expr || (ARM16Expr->getSpecifier() != ARM::S_HI16 &&
8295 ARM16Expr->getSpecifier() != ARM::S_LO16))
8296 return Error(
8297 L: Op.getStartLoc(),
8298 Msg: "immediate expression for mov requires :lower16: or :upper16");
8299 break;
8300 }
8301 case ARM::tADDi8: {
8302 int i = (Operands[MnemonicOpsEndInd + 1]->isImm()) ? MnemonicOpsEndInd + 1
8303 : MnemonicOpsEndInd + 2;
8304 MCParsedAsmOperand &Op = *Operands[i];
8305 if (isARMMCExpr(MCOp&: Op) && !isThumbI8Relocation(MCOp&: Op))
8306 return Error(L: Op.getStartLoc(),
8307 Msg: "Immediate expression for Thumb adds requires :lower0_7:,"
8308 " :lower8_15:, :upper0_7: or :upper8_15:");
8309 break;
8310 }
8311 case ARM::tMOVi8: {
8312 MCParsedAsmOperand &Op = *Operands[MnemonicOpsEndInd + 1];
8313 if (isARMMCExpr(MCOp&: Op) && !isThumbI8Relocation(MCOp&: Op))
8314 return Error(L: Op.getStartLoc(),
8315 Msg: "Immediate expression for Thumb movs requires :lower0_7:,"
8316 " :lower8_15:, :upper0_7: or :upper8_15:");
8317 break;
8318 }
8319 case ARM::HINT:
8320 case ARM::t2HINT: {
8321 unsigned Imm8 = Inst.getOperand(i: 0).getImm();
8322 unsigned Pred = Inst.getOperand(i: 1).getImm();
8323 // ESB is not predicable (pred must be AL). Without the RAS extension, this
8324 // behaves as any other unallocated hint.
8325 if (Imm8 == 0x10 && Pred != ARMCC::AL && hasRAS())
8326 return Error(L: Operands[1]->getStartLoc(), Msg: "instruction 'esb' is not "
8327 "predicable, but condition "
8328 "code specified");
8329 if (Imm8 == 0x14 && Pred != ARMCC::AL)
8330 return Error(L: Operands[1]->getStartLoc(), Msg: "instruction 'csdb' is not "
8331 "predicable, but condition "
8332 "code specified");
8333 break;
8334 }
8335 case ARM::t2BFi:
8336 case ARM::t2BFr:
8337 case ARM::t2BFLi:
8338 case ARM::t2BFLr: {
8339 if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd])
8340 .isUnsignedOffset<4, 1>() ||
8341 (Inst.getOperand(i: 0).isImm() && Inst.getOperand(i: 0).getImm() == 0)) {
8342 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
8343 Msg: "branch location out of range or not a multiple of 2");
8344 }
8345
8346 if (Opcode == ARM::t2BFi) {
8347 if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
8348 .isSignedOffset<16, 1>())
8349 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
8350 Msg: "branch target out of range or not a multiple of 2");
8351 } else if (Opcode == ARM::t2BFLi) {
8352 if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
8353 .isSignedOffset<18, 1>())
8354 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
8355 Msg: "branch target out of range or not a multiple of 2");
8356 }
8357 break;
8358 }
8359 case ARM::t2BFic: {
8360 if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd])
8361 .isUnsignedOffset<4, 1>() ||
8362 (Inst.getOperand(i: 0).isImm() && Inst.getOperand(i: 0).getImm() == 0))
8363 return Error(L: Operands[1]->getStartLoc(),
8364 Msg: "branch location out of range or not a multiple of 2");
8365
8366 if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
8367 .isSignedOffset<16, 1>())
8368 return Error(L: Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8369 Msg: "branch target out of range or not a multiple of 2");
8370
8371 assert(Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() &&
8372 "branch location and else branch target should either both be "
8373 "immediates or both labels");
8374
8375 if (Inst.getOperand(i: 0).isImm() && Inst.getOperand(i: 2).isImm()) {
8376 int Diff = Inst.getOperand(i: 2).getImm() - Inst.getOperand(i: 0).getImm();
8377 if (Diff != 4 && Diff != 2)
8378 return Error(
8379 L: Operands[3]->getStartLoc(),
8380 Msg: "else branch target must be 2 or 4 greater than the branch location");
8381 }
8382 break;
8383 }
8384 case ARM::t2CLRM: {
8385 for (unsigned i = 2; i < Inst.getNumOperands(); i++) {
8386 if (Inst.getOperand(i).isReg() &&
8387 !ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(
8388 Reg: Inst.getOperand(i).getReg())) {
8389 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
8390 Msg: "invalid register in register list. Valid registers are "
8391 "r0-r12, lr/r14 and APSR.");
8392 }
8393 }
8394 break;
8395 }
8396 case ARM::DSB:
8397 case ARM::t2DSB: {
8398
8399 if (Inst.getNumOperands() < 2)
8400 break;
8401
8402 unsigned Option = Inst.getOperand(i: 0).getImm();
8403 unsigned Pred = Inst.getOperand(i: 1).getImm();
8404
8405 // SSBB and PSSBB (DSB #0|#4) are not predicable (pred must be AL).
8406 if (Option == 0 && Pred != ARMCC::AL)
8407 return Error(L: Operands[1]->getStartLoc(),
8408 Msg: "instruction 'ssbb' is not predicable, but condition code "
8409 "specified");
8410 if (Option == 4 && Pred != ARMCC::AL)
8411 return Error(L: Operands[1]->getStartLoc(),
8412 Msg: "instruction 'pssbb' is not predicable, but condition code "
8413 "specified");
8414 break;
8415 }
8416 case ARM::VMOVRRS: {
8417 // Source registers must be sequential.
8418 const unsigned Sm = MRI->getEncodingValue(Reg: Inst.getOperand(i: 2).getReg());
8419 const unsigned Sm1 = MRI->getEncodingValue(Reg: Inst.getOperand(i: 3).getReg());
8420 if (Sm1 != Sm + 1)
8421 return Error(L: Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8422 Msg: "source operands must be sequential");
8423 break;
8424 }
8425 case ARM::VMOVSRR: {
8426 // Destination registers must be sequential.
8427 const unsigned Sm = MRI->getEncodingValue(Reg: Inst.getOperand(i: 0).getReg());
8428 const unsigned Sm1 = MRI->getEncodingValue(Reg: Inst.getOperand(i: 1).getReg());
8429 if (Sm1 != Sm + 1)
8430 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
8431 Msg: "destination operands must be sequential");
8432 break;
8433 }
8434 case ARM::VLDMDIA:
8435 case ARM::VSTMDIA: {
8436 ARMOperand &Op =
8437 static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]);
8438 auto &RegList = Op.getRegList();
8439 if (RegList.size() < 1 || RegList.size() > 16)
8440 return Error(L: Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8441 Msg: "list of registers must be at least 1 and at most 16");
8442 break;
8443 }
8444 case ARM::MVE_VQDMULLs32bh:
8445 case ARM::MVE_VQDMULLs32th:
8446 case ARM::MVE_VCMULf32:
8447 case ARM::MVE_VMULLBs32:
8448 case ARM::MVE_VMULLTs32:
8449 case ARM::MVE_VMULLBu32:
8450 case ARM::MVE_VMULLTu32: {
8451 if (Operands[MnemonicOpsEndInd]->getReg() ==
8452 Operands[MnemonicOpsEndInd + 1]->getReg()) {
8453 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
8454 Msg: "Qd register and Qn register can't be identical");
8455 }
8456 if (Operands[MnemonicOpsEndInd]->getReg() ==
8457 Operands[MnemonicOpsEndInd + 2]->getReg()) {
8458 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
8459 Msg: "Qd register and Qm register can't be identical");
8460 }
8461 break;
8462 }
8463 case ARM::MVE_VREV64_8:
8464 case ARM::MVE_VREV64_16:
8465 case ARM::MVE_VREV64_32:
8466 case ARM::MVE_VQDMULL_qr_s32bh:
8467 case ARM::MVE_VQDMULL_qr_s32th: {
8468 if (Operands[MnemonicOpsEndInd]->getReg() ==
8469 Operands[MnemonicOpsEndInd + 1]->getReg()) {
8470 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
8471 Msg: "Qd register and Qn register can't be identical");
8472 }
8473 break;
8474 }
8475 case ARM::MVE_VCADDi32:
8476 case ARM::MVE_VCADDf32:
8477 case ARM::MVE_VHCADDs32: {
8478 if (Operands[MnemonicOpsEndInd]->getReg() ==
8479 Operands[MnemonicOpsEndInd + 2]->getReg()) {
8480 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
8481 Msg: "Qd register and Qm register can't be identical");
8482 }
8483 break;
8484 }
8485 case ARM::MVE_VMOV_rr_q: {
8486 if (Operands[MnemonicOpsEndInd + 2]->getReg() !=
8487 Operands[MnemonicOpsEndInd + 4]->getReg())
8488 return Error(L: Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8489 Msg: "Q-registers must be the same");
8490 if (static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 3])
8491 .getVectorIndex() !=
8492 static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 5])
8493 .getVectorIndex() +
8494 2)
8495 return Error(L: Operands[MnemonicOpsEndInd + 3]->getStartLoc(),
8496 Msg: "Q-register indexes must be 2 and 0 or 3 and 1");
8497 break;
8498 }
8499 case ARM::MVE_VMOV_q_rr: {
8500 if (Operands[MnemonicOpsEndInd]->getReg() !=
8501 Operands[MnemonicOpsEndInd + 2]->getReg())
8502 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
8503 Msg: "Q-registers must be the same");
8504 if (static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
8505 .getVectorIndex() !=
8506 static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 3])
8507 .getVectorIndex() +
8508 2)
8509 return Error(L: Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8510 Msg: "Q-register indexes must be 2 and 0 or 3 and 1");
8511 break;
8512 }
8513 case ARM::MVE_SQRSHR:
8514 case ARM::MVE_UQRSHL: {
8515 if (Operands[MnemonicOpsEndInd]->getReg() ==
8516 Operands[MnemonicOpsEndInd + 1]->getReg()) {
8517 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
8518 Msg: "Rda register and Rm register can't be identical");
8519 }
8520 break;
8521 }
8522 case ARM::UMAAL:
8523 case ARM::UMLAL:
8524 case ARM::UMULL:
8525 case ARM::t2UMAAL:
8526 case ARM::t2UMLAL:
8527 case ARM::t2UMULL:
8528 case ARM::SMLAL:
8529 case ARM::SMLALBB:
8530 case ARM::SMLALBT:
8531 case ARM::SMLALD:
8532 case ARM::SMLALDX:
8533 case ARM::SMLALTB:
8534 case ARM::SMLALTT:
8535 case ARM::SMLSLD:
8536 case ARM::SMLSLDX:
8537 case ARM::SMULL:
8538 case ARM::t2SMLAL:
8539 case ARM::t2SMLALBB:
8540 case ARM::t2SMLALBT:
8541 case ARM::t2SMLALD:
8542 case ARM::t2SMLALDX:
8543 case ARM::t2SMLALTB:
8544 case ARM::t2SMLALTT:
8545 case ARM::t2SMLSLD:
8546 case ARM::t2SMLSLDX:
8547 case ARM::t2SMULL: {
8548 MCRegister RdHi = Inst.getOperand(i: 0).getReg();
8549 MCRegister RdLo = Inst.getOperand(i: 1).getReg();
8550 if(RdHi == RdLo) {
8551 return Error(L: Loc,
8552 Msg: "unpredictable instruction, RdHi and RdLo must be different");
8553 }
8554 break;
8555 }
8556
8557 case ARM::CDE_CX1:
8558 case ARM::CDE_CX1A:
8559 case ARM::CDE_CX1D:
8560 case ARM::CDE_CX1DA:
8561 case ARM::CDE_CX2:
8562 case ARM::CDE_CX2A:
8563 case ARM::CDE_CX2D:
8564 case ARM::CDE_CX2DA:
8565 case ARM::CDE_CX3:
8566 case ARM::CDE_CX3A:
8567 case ARM::CDE_CX3D:
8568 case ARM::CDE_CX3DA:
8569 case ARM::CDE_VCX1_vec:
8570 case ARM::CDE_VCX1_fpsp:
8571 case ARM::CDE_VCX1_fpdp:
8572 case ARM::CDE_VCX1A_vec:
8573 case ARM::CDE_VCX1A_fpsp:
8574 case ARM::CDE_VCX1A_fpdp:
8575 case ARM::CDE_VCX2_vec:
8576 case ARM::CDE_VCX2_fpsp:
8577 case ARM::CDE_VCX2_fpdp:
8578 case ARM::CDE_VCX2A_vec:
8579 case ARM::CDE_VCX2A_fpsp:
8580 case ARM::CDE_VCX2A_fpdp:
8581 case ARM::CDE_VCX3_vec:
8582 case ARM::CDE_VCX3_fpsp:
8583 case ARM::CDE_VCX3_fpdp:
8584 case ARM::CDE_VCX3A_vec:
8585 case ARM::CDE_VCX3A_fpsp:
8586 case ARM::CDE_VCX3A_fpdp: {
8587 assert(Inst.getOperand(1).isImm() &&
8588 "CDE operand 1 must be a coprocessor ID");
8589 int64_t Coproc = Inst.getOperand(i: 1).getImm();
8590 if (Coproc < 8 && !ARM::isCDECoproc(Coproc, STI: *STI))
8591 return Error(L: Operands[1]->getStartLoc(),
8592 Msg: "coprocessor must be configured as CDE");
8593 else if (Coproc >= 8)
8594 return Error(L: Operands[1]->getStartLoc(),
8595 Msg: "coprocessor must be in the range [p0, p7]");
8596 break;
8597 }
8598
8599 case ARM::t2CDP:
8600 case ARM::t2CDP2:
8601 case ARM::t2LDC2L_OFFSET:
8602 case ARM::t2LDC2L_OPTION:
8603 case ARM::t2LDC2L_POST:
8604 case ARM::t2LDC2L_PRE:
8605 case ARM::t2LDC2_OFFSET:
8606 case ARM::t2LDC2_OPTION:
8607 case ARM::t2LDC2_POST:
8608 case ARM::t2LDC2_PRE:
8609 case ARM::t2LDCL_OFFSET:
8610 case ARM::t2LDCL_OPTION:
8611 case ARM::t2LDCL_POST:
8612 case ARM::t2LDCL_PRE:
8613 case ARM::t2LDC_OFFSET:
8614 case ARM::t2LDC_OPTION:
8615 case ARM::t2LDC_POST:
8616 case ARM::t2LDC_PRE:
8617 case ARM::t2MCR:
8618 case ARM::t2MCR2:
8619 case ARM::t2MCRR:
8620 case ARM::t2MCRR2:
8621 case ARM::t2MRC:
8622 case ARM::t2MRC2:
8623 case ARM::t2MRRC:
8624 case ARM::t2MRRC2:
8625 case ARM::t2STC2L_OFFSET:
8626 case ARM::t2STC2L_OPTION:
8627 case ARM::t2STC2L_POST:
8628 case ARM::t2STC2L_PRE:
8629 case ARM::t2STC2_OFFSET:
8630 case ARM::t2STC2_OPTION:
8631 case ARM::t2STC2_POST:
8632 case ARM::t2STC2_PRE:
8633 case ARM::t2STCL_OFFSET:
8634 case ARM::t2STCL_OPTION:
8635 case ARM::t2STCL_POST:
8636 case ARM::t2STCL_PRE:
8637 case ARM::t2STC_OFFSET:
8638 case ARM::t2STC_OPTION:
8639 case ARM::t2STC_POST:
8640 case ARM::t2STC_PRE: {
8641 unsigned Opcode = Inst.getOpcode();
8642 // Inst.getOperand indexes operands in the (oops ...) and (iops ...) dags,
8643 // CopInd is the index of the coprocessor operand.
8644 size_t CopInd = 0;
8645 if (Opcode == ARM::t2MRRC || Opcode == ARM::t2MRRC2)
8646 CopInd = 2;
8647 else if (Opcode == ARM::t2MRC || Opcode == ARM::t2MRC2)
8648 CopInd = 1;
8649 assert(Inst.getOperand(CopInd).isImm() &&
8650 "Operand must be a coprocessor ID");
8651 int64_t Coproc = Inst.getOperand(i: CopInd).getImm();
8652 // Operands[2] is the coprocessor operand at syntactic level
8653 if (ARM::isCDECoproc(Coproc, STI: *STI))
8654 return Error(L: Operands[2]->getStartLoc(),
8655 Msg: "coprocessor must be configured as GCP");
8656 break;
8657 }
8658
8659 case ARM::VTOSHH:
8660 case ARM::VTOUHH:
8661 case ARM::VTOSLH:
8662 case ARM::VTOULH:
8663 case ARM::VTOSHS:
8664 case ARM::VTOUHS:
8665 case ARM::VTOSLS:
8666 case ARM::VTOULS:
8667 case ARM::VTOSHD:
8668 case ARM::VTOUHD:
8669 case ARM::VTOSLD:
8670 case ARM::VTOULD:
8671 case ARM::VSHTOH:
8672 case ARM::VUHTOH:
8673 case ARM::VSLTOH:
8674 case ARM::VULTOH:
8675 case ARM::VSHTOS:
8676 case ARM::VUHTOS:
8677 case ARM::VSLTOS:
8678 case ARM::VULTOS:
8679 case ARM::VSHTOD:
8680 case ARM::VUHTOD:
8681 case ARM::VSLTOD:
8682 case ARM::VULTOD: {
8683 if (Operands[MnemonicOpsEndInd]->getReg() !=
8684 Operands[MnemonicOpsEndInd + 1]->getReg())
8685 return Error(L: Operands[MnemonicOpsEndInd]->getStartLoc(),
8686 Msg: "source and destination registers must be the same");
8687 break;
8688 }
8689 }
8690
8691 return false;
8692}
8693
8694static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
8695 switch(Opc) {
8696 default: llvm_unreachable("unexpected opcode!");
8697 // VST1LN
8698 case ARM::VST1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD;
8699 case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
8700 case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
8701 case ARM::VST1LNdWB_register_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD;
8702 case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
8703 case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
8704 case ARM::VST1LNdAsm_8: Spacing = 1; return ARM::VST1LNd8;
8705 case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
8706 case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
8707
8708 // VST2LN
8709 case ARM::VST2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD;
8710 case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
8711 case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
8712 case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
8713 case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
8714
8715 case ARM::VST2LNdWB_register_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD;
8716 case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
8717 case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
8718 case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
8719 case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
8720
8721 case ARM::VST2LNdAsm_8: Spacing = 1; return ARM::VST2LNd8;
8722 case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
8723 case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
8724 case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
8725 case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
8726
8727 // VST3LN
8728 case ARM::VST3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD;
8729 case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
8730 case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
8731 case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
8732 case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
8733 case ARM::VST3LNdWB_register_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD;
8734 case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
8735 case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
8736 case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
8737 case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
8738 case ARM::VST3LNdAsm_8: Spacing = 1; return ARM::VST3LNd8;
8739 case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
8740 case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
8741 case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
8742 case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
8743
8744 // VST3
8745 case ARM::VST3dWB_fixed_Asm_8: Spacing = 1; return ARM::VST3d8_UPD;
8746 case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
8747 case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
8748 case ARM::VST3qWB_fixed_Asm_8: Spacing = 2; return ARM::VST3q8_UPD;
8749 case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
8750 case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
8751 case ARM::VST3dWB_register_Asm_8: Spacing = 1; return ARM::VST3d8_UPD;
8752 case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
8753 case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
8754 case ARM::VST3qWB_register_Asm_8: Spacing = 2; return ARM::VST3q8_UPD;
8755 case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
8756 case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
8757 case ARM::VST3dAsm_8: Spacing = 1; return ARM::VST3d8;
8758 case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
8759 case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
8760 case ARM::VST3qAsm_8: Spacing = 2; return ARM::VST3q8;
8761 case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
8762 case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
8763
8764 // VST4LN
8765 case ARM::VST4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD;
8766 case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
8767 case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
8768 case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
8769 case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
8770 case ARM::VST4LNdWB_register_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD;
8771 case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
8772 case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
8773 case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
8774 case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
8775 case ARM::VST4LNdAsm_8: Spacing = 1; return ARM::VST4LNd8;
8776 case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
8777 case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
8778 case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
8779 case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
8780
8781 // VST4
8782 case ARM::VST4dWB_fixed_Asm_8: Spacing = 1; return ARM::VST4d8_UPD;
8783 case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
8784 case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
8785 case ARM::VST4qWB_fixed_Asm_8: Spacing = 2; return ARM::VST4q8_UPD;
8786 case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
8787 case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
8788 case ARM::VST4dWB_register_Asm_8: Spacing = 1; return ARM::VST4d8_UPD;
8789 case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
8790 case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
8791 case ARM::VST4qWB_register_Asm_8: Spacing = 2; return ARM::VST4q8_UPD;
8792 case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
8793 case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
8794 case ARM::VST4dAsm_8: Spacing = 1; return ARM::VST4d8;
8795 case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
8796 case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
8797 case ARM::VST4qAsm_8: Spacing = 2; return ARM::VST4q8;
8798 case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
8799 case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
8800 }
8801}
8802
8803static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
8804 switch(Opc) {
8805 default: llvm_unreachable("unexpected opcode!");
8806 // VLD1LN
8807 case ARM::VLD1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD;
8808 case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
8809 case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
8810 case ARM::VLD1LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD;
8811 case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
8812 case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
8813 case ARM::VLD1LNdAsm_8: Spacing = 1; return ARM::VLD1LNd8;
8814 case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
8815 case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
8816
8817 // VLD2LN
8818 case ARM::VLD2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD;
8819 case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
8820 case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
8821 case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
8822 case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
8823 case ARM::VLD2LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD;
8824 case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
8825 case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
8826 case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
8827 case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
8828 case ARM::VLD2LNdAsm_8: Spacing = 1; return ARM::VLD2LNd8;
8829 case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
8830 case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
8831 case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
8832 case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
8833
8834 // VLD3DUP
8835 case ARM::VLD3DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD;
8836 case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
8837 case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
8838 case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
8839 case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
8840 case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
8841 case ARM::VLD3DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD;
8842 case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
8843 case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
8844 case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
8845 case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
8846 case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
8847 case ARM::VLD3DUPdAsm_8: Spacing = 1; return ARM::VLD3DUPd8;
8848 case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
8849 case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
8850 case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
8851 case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
8852 case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
8853
8854 // VLD3LN
8855 case ARM::VLD3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD;
8856 case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
8857 case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
8858 case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
8859 case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
8860 case ARM::VLD3LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD;
8861 case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
8862 case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
8863 case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
8864 case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
8865 case ARM::VLD3LNdAsm_8: Spacing = 1; return ARM::VLD3LNd8;
8866 case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
8867 case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
8868 case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
8869 case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
8870
8871 // VLD3
8872 case ARM::VLD3dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD;
8873 case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
8874 case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
8875 case ARM::VLD3qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD;
8876 case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
8877 case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
8878 case ARM::VLD3dWB_register_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD;
8879 case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
8880 case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
8881 case ARM::VLD3qWB_register_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD;
8882 case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
8883 case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
8884 case ARM::VLD3dAsm_8: Spacing = 1; return ARM::VLD3d8;
8885 case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
8886 case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
8887 case ARM::VLD3qAsm_8: Spacing = 2; return ARM::VLD3q8;
8888 case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
8889 case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
8890
8891 // VLD4LN
8892 case ARM::VLD4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD;
8893 case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
8894 case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
8895 case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
8896 case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
8897 case ARM::VLD4LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD;
8898 case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
8899 case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
8900 case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
8901 case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
8902 case ARM::VLD4LNdAsm_8: Spacing = 1; return ARM::VLD4LNd8;
8903 case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
8904 case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
8905 case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
8906 case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
8907
8908 // VLD4DUP
8909 case ARM::VLD4DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD;
8910 case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
8911 case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
8912 case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
8913 case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
8914 case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
8915 case ARM::VLD4DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD;
8916 case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
8917 case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
8918 case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
8919 case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
8920 case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
8921 case ARM::VLD4DUPdAsm_8: Spacing = 1; return ARM::VLD4DUPd8;
8922 case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
8923 case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
8924 case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
8925 case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
8926 case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
8927
8928 // VLD4
8929 case ARM::VLD4dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD;
8930 case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
8931 case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
8932 case ARM::VLD4qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD;
8933 case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
8934 case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
8935 case ARM::VLD4dWB_register_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD;
8936 case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
8937 case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
8938 case ARM::VLD4qWB_register_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD;
8939 case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
8940 case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
8941 case ARM::VLD4dAsm_8: Spacing = 1; return ARM::VLD4d8;
8942 case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
8943 case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
8944 case ARM::VLD4qAsm_8: Spacing = 2; return ARM::VLD4q8;
8945 case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
8946 case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
8947 }
8948}
8949
8950bool ARMAsmParser::processInstruction(MCInst &Inst,
8951 const OperandVector &Operands,
8952 unsigned MnemonicOpsEndInd,
8953 MCStreamer &Out) {
8954 // Check if we have the wide qualifier, because if it's present we
8955 // must avoid selecting a 16-bit thumb instruction.
8956 bool HasWideQualifier = false;
8957 for (auto &Op : Operands) {
8958 ARMOperand &ARMOp = static_cast<ARMOperand&>(*Op);
8959 if (ARMOp.isToken() && ARMOp.getToken() == ".w") {
8960 HasWideQualifier = true;
8961 break;
8962 }
8963 }
8964
8965 switch (Inst.getOpcode()) {
8966 case ARM::VLLDM:
8967 case ARM::VLSTM: {
8968 // In some cases both T1 and T2 are valid, causing tablegen pick T1 instead
8969 // of T2
8970 if (Operands.size() ==
8971 MnemonicOpsEndInd + 2) { // a register list has been provided
8972 ARMOperand &Op = static_cast<ARMOperand &>(
8973 *Operands[MnemonicOpsEndInd + 1]); // the register list, a dpr_reglist
8974 assert(Op.isDPRRegList());
8975 auto &RegList = Op.getRegList();
8976 // When the register list is {d0-d31} the instruction has to be the T2
8977 // variant
8978 if (RegList.size() == 32) {
8979 const unsigned Opcode =
8980 (Inst.getOpcode() == ARM::VLLDM) ? ARM::VLLDM_T2 : ARM::VLSTM_T2;
8981 MCInst TmpInst;
8982 TmpInst.setOpcode(Opcode);
8983 TmpInst.addOperand(Op: Inst.getOperand(i: 0));
8984 TmpInst.addOperand(Op: Inst.getOperand(i: 1));
8985 TmpInst.addOperand(Op: Inst.getOperand(i: 2));
8986 TmpInst.addOperand(Op: Inst.getOperand(i: 3));
8987 Inst = TmpInst;
8988 return true;
8989 }
8990 }
8991 return false;
8992 }
8993 // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction.
8994 case ARM::LDRT_POST:
8995 case ARM::LDRBT_POST: {
8996 const unsigned Opcode =
8997 (Inst.getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
8998 : ARM::LDRBT_POST_IMM;
8999 MCInst TmpInst;
9000 TmpInst.setOpcode(Opcode);
9001 TmpInst.addOperand(Op: Inst.getOperand(i: 0));
9002 TmpInst.addOperand(Op: Inst.getOperand(i: 1));
9003 TmpInst.addOperand(Op: Inst.getOperand(i: 1));
9004 TmpInst.addOperand(Op: MCOperand::createReg(Reg: 0));
9005 TmpInst.addOperand(Op: MCOperand::createImm(Val: 0));
9006 TmpInst.addOperand(Op: Inst.getOperand(i: 2));
9007 TmpInst.addOperand(Op: Inst.getOperand(i: 3));
9008 Inst = TmpInst;
9009 return true;
9010 }
9011 // Alias for 'ldr{sb,h,sh}t Rt, [Rn] {, #imm}' for ommitted immediate.
9012 case ARM::LDRSBTii:
9013 case ARM::LDRHTii:
9014 case ARM::LDRSHTii: {
9015 MCInst TmpInst;
9016
9017 if (Inst.getOpcode() == ARM::LDRSBTii)
9018 TmpInst.setOpcode(ARM::LDRSBTi);
9019 else if (Inst.getOpcode() == ARM::LDRHTii)
9020 TmpInst.setOpcode(ARM::LDRHTi);
9021 else if (Inst.getOpcode() == ARM::LDRSHTii)
9022 TmpInst.setOpcode(ARM::LDRSHTi);
9023 TmpInst.addOperand(Op: Inst.getOperand(i: 0));
9024 TmpInst.addOperand(Op: Inst.getOperand(i: 1));
9025 TmpInst.addOperand(Op: Inst.getOperand(i: 1));
9026 TmpInst.addOperand(Op: MCOperand::createImm(Val: 256));
9027 TmpInst.addOperand(Op: Inst.getOperand(i: 2));
9028 Inst = TmpInst;
9029 return true;
9030 }
9031 // Alias for alternate form of 'str{,b}t Rt, [Rn], #imm' instruction.
9032 case ARM::STRT_POST:
9033 case ARM::STRBT_POST: {
9034 const unsigned Opcode =
9035 (Inst.getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
9036 : ARM::STRBT_POST_IMM;
9037 MCInst TmpInst;
9038 TmpInst.setOpcode(Opcode);
9039 TmpInst.addOperand(Op: Inst.getOperand(i: 1));
9040 TmpInst.addOperand(Op: Inst.getOperand(i: 0));
9041 TmpInst.addOperand(Op: Inst.getOperand(i: 1));
9042 TmpInst.addOperand(Op: MCOperand::createReg(Reg: 0));
9043 TmpInst.addOperand(Op: MCOperand::createImm(Val: 0));
9044 TmpInst.addOperand(Op: Inst.getOperand(i: 2));
9045 TmpInst.addOperand(Op: Inst.getOperand(i: 3));
9046 Inst = TmpInst;
9047 return true;
9048 }
9049 // Alias for alternate form of 'ADR Rd, #imm' instruction.
9050 case ARM::ADDri: {
9051 if (Inst.getOperand(i: 1).getReg() != ARM::PC || Inst.getOperand(i: 5).getReg() ||
9052 !(Inst.getOperand(i: 2).isExpr() || Inst.getOperand(i: 2).isImm()))
9053 return false;
9054 MCInst TmpInst;
9055 TmpInst.setOpcode(ARM::ADR);
9056 TmpInst.addOperand(Op: Inst.getOperand(i: 0));
9057 if (Inst.getOperand(i: 2).isImm()) {
9058 // Immediate (mod_imm) will be in its encoded form, we must unencode it
9059 // before passing it to the ADR instruction.
9060 unsigned Enc = Inst.getOperand(i: 2).getImm();
9061 TmpInst.addOperand(Op: MCOperand::createImm(
9062 Val: llvm::rotr<uint32_t>(V: Enc & 0xFF, R: (Enc & 0xF00) >> 7)));
9063 } else {
9064 // Turn PC-relative expression into absolute expression.
9065 // Reading PC provides the start of the current instruction + 8 and
9066 // the transform to adr is biased by that.
9067 MCSymbol *Dot = getContext().createTempSymbol();
9068 Out.emitLabel(Symbol: Dot);
9069 const MCExpr *OpExpr = Inst.getOperand(i: 2).getExpr();
9070 const MCExpr *InstPC = MCSymbolRefExpr::create(Symbol: Dot,
9071 Ctx&: getContext());
9072 const MCExpr *Const8 = MCConstantExpr::create(Value: 8, Ctx&: getContext());
9073 const MCExpr *ReadPC = MCBinaryExpr::createAdd(LHS: InstPC, RHS: Const8,
9074 Ctx&: getContext());
9075 const MCExpr *FixupAddr = MCBinaryExpr::createAdd(LHS: ReadPC, RHS: OpExpr,
9076 Ctx&: getContext());
9077 TmpInst.addOperand(Op: MCOperand::createExpr(Val: FixupAddr));
9078 }
9079 TmpInst.addOperand(Op: Inst.getOperand(i: 3));
9080 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
9081 Inst = TmpInst;
9082 return true;
9083 }
9084 // Aliases for imm syntax of LDR instructions.
9085 case ARM::t2LDR_PRE_imm:
9086 case ARM::t2LDR_POST_imm: {
9087 MCInst TmpInst;
9088 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDR_PRE_imm ? ARM::t2LDR_PRE
9089 : ARM::t2LDR_POST);
9090 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Rt
9091 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn_wb
9092 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
9093 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // imm
9094 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
9095 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
9096 Inst = TmpInst;
9097 return true;
9098 }
9099 // Aliases for imm syntax of STR instructions.
9100 case ARM::t2STR_PRE_imm:
9101 case ARM::t2STR_POST_imm: {
9102 MCInst TmpInst;
9103 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2STR_PRE_imm ? ARM::t2STR_PRE
9104 : ARM::t2STR_POST);
9105 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn_wb
9106 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Rt
9107 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
9108 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // imm
9109 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
9110 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
9111 Inst = TmpInst;
9112 return true;
9113 }
9114 // Aliases for imm syntax of LDRB instructions.
9115 case ARM::t2LDRB_OFFSET_imm: {
9116 MCInst TmpInst;
9117 TmpInst.setOpcode(ARM::t2LDRBi8);
9118 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Rt
9119 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
9120 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // imm
9121 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
9122 Inst = TmpInst;
9123 return true;
9124 }
9125 case ARM::t2LDRB_PRE_imm:
9126 case ARM::t2LDRB_POST_imm: {
9127 MCInst TmpInst;
9128 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRB_PRE_imm
9129 ? ARM::t2LDRB_PRE
9130 : ARM::t2LDRB_POST);
9131 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Rt
9132 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn_wb
9133 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
9134 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // imm
9135 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
9136 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
9137 Inst = TmpInst;
9138 return true;
9139 }
9140 // Aliases for imm syntax of STRB instructions.
9141 case ARM::t2STRB_OFFSET_imm: {
9142 MCInst TmpInst;
9143 TmpInst.setOpcode(ARM::t2STRBi8);
9144 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Rt
9145 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
9146 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // imm
9147 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
9148 Inst = TmpInst;
9149 return true;
9150 }
9151 case ARM::t2STRB_PRE_imm:
9152 case ARM::t2STRB_POST_imm: {
9153 MCInst TmpInst;
9154 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2STRB_PRE_imm
9155 ? ARM::t2STRB_PRE
9156 : ARM::t2STRB_POST);
9157 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn_wb
9158 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Rt
9159 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
9160 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // imm
9161 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
9162 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
9163 Inst = TmpInst;
9164 return true;
9165 }
9166 // Aliases for imm syntax of LDRH instructions.
9167 case ARM::t2LDRH_OFFSET_imm: {
9168 MCInst TmpInst;
9169 TmpInst.setOpcode(ARM::t2LDRHi8);
9170 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Rt
9171 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
9172 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // imm
9173 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
9174 Inst = TmpInst;
9175 return true;
9176 }
9177 case ARM::t2LDRH_PRE_imm:
9178 case ARM::t2LDRH_POST_imm: {
9179 MCInst TmpInst;
9180 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRH_PRE_imm
9181 ? ARM::t2LDRH_PRE
9182 : ARM::t2LDRH_POST);
9183 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Rt
9184 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn_wb
9185 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
9186 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // imm
9187 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
9188 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
9189 Inst = TmpInst;
9190 return true;
9191 }
9192 // Aliases for imm syntax of STRH instructions.
9193 case ARM::t2STRH_OFFSET_imm: {
9194 MCInst TmpInst;
9195 TmpInst.setOpcode(ARM::t2STRHi8);
9196 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Rt
9197 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
9198 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // imm
9199 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
9200 Inst = TmpInst;
9201 return true;
9202 }
9203 case ARM::t2STRH_PRE_imm:
9204 case ARM::t2STRH_POST_imm: {
9205 MCInst TmpInst;
9206 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2STRH_PRE_imm
9207 ? ARM::t2STRH_PRE
9208 : ARM::t2STRH_POST);
9209 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn_wb
9210 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Rt
9211 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
9212 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // imm
9213 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
9214 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
9215 Inst = TmpInst;
9216 return true;
9217 }
9218 // Aliases for imm syntax of LDRSB instructions.
9219 case ARM::t2LDRSB_OFFSET_imm: {
9220 MCInst TmpInst;
9221 TmpInst.setOpcode(ARM::t2LDRSBi8);
9222 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Rt
9223 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
9224 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // imm
9225 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
9226 Inst = TmpInst;
9227 return true;
9228 }
9229 case ARM::t2LDRSB_PRE_imm:
9230 case ARM::t2LDRSB_POST_imm: {
9231 MCInst TmpInst;
9232 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRSB_PRE_imm
9233 ? ARM::t2LDRSB_PRE
9234 : ARM::t2LDRSB_POST);
9235 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Rt
9236 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn_wb
9237 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
9238 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // imm
9239 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
9240 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
9241 Inst = TmpInst;
9242 return true;
9243 }
9244 // Aliases for imm syntax of LDRSH instructions.
9245 case ARM::t2LDRSH_OFFSET_imm: {
9246 MCInst TmpInst;
9247 TmpInst.setOpcode(ARM::t2LDRSHi8);
9248 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Rt
9249 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
9250 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // imm
9251 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
9252 Inst = TmpInst;
9253 return true;
9254 }
9255 case ARM::t2LDRSH_PRE_imm:
9256 case ARM::t2LDRSH_POST_imm: {
9257 MCInst TmpInst;
9258 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRSH_PRE_imm
9259 ? ARM::t2LDRSH_PRE
9260 : ARM::t2LDRSH_POST);
9261 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Rt
9262 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn_wb
9263 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
9264 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // imm
9265 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
9266 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
9267 Inst = TmpInst;
9268 return true;
9269 }
9270 // Aliases for alternate PC+imm syntax of LDR instructions.
9271 case ARM::t2LDRpcrel:
9272 // Select the narrow version if the immediate will fit.
9273 if (Inst.getOperand(i: 1).getImm() > 0 &&
9274 Inst.getOperand(i: 1).getImm() <= 0xff &&
9275 !HasWideQualifier)
9276 Inst.setOpcode(ARM::tLDRpci);
9277 else
9278 Inst.setOpcode(ARM::t2LDRpci);
9279 return true;
9280 case ARM::t2LDRBpcrel:
9281 Inst.setOpcode(ARM::t2LDRBpci);
9282 return true;
9283 case ARM::t2LDRHpcrel:
9284 Inst.setOpcode(ARM::t2LDRHpci);
9285 return true;
9286 case ARM::t2LDRSBpcrel:
9287 Inst.setOpcode(ARM::t2LDRSBpci);
9288 return true;
9289 case ARM::t2LDRSHpcrel:
9290 Inst.setOpcode(ARM::t2LDRSHpci);
9291 return true;
9292 case ARM::LDRConstPool:
9293 case ARM::tLDRConstPool:
9294 case ARM::t2LDRConstPool: {
9295 // Pseudo instruction ldr rt, =immediate is converted to a
9296 // MOV rt, immediate if immediate is known and representable
9297 // otherwise we create a constant pool entry that we load from.
9298 MCInst TmpInst;
9299 if (Inst.getOpcode() == ARM::LDRConstPool)
9300 TmpInst.setOpcode(ARM::LDRi12);
9301 else if (Inst.getOpcode() == ARM::tLDRConstPool)
9302 TmpInst.setOpcode(ARM::tLDRpci);
9303 else if (Inst.getOpcode() == ARM::t2LDRConstPool)
9304 TmpInst.setOpcode(ARM::t2LDRpci);
9305 const ARMOperand &PoolOperand =
9306 static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]);
9307 const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm();
9308 // If SubExprVal is a constant we may be able to use a MOV
9309 if (isa<MCConstantExpr>(Val: SubExprVal) &&
9310 Inst.getOperand(i: 0).getReg() != ARM::PC &&
9311 Inst.getOperand(i: 0).getReg() != ARM::SP) {
9312 int64_t Value =
9313 (int64_t) (cast<MCConstantExpr>(Val: SubExprVal))->getValue();
9314 bool UseMov = true;
9315 bool MovHasS = true;
9316 if (Inst.getOpcode() == ARM::LDRConstPool) {
9317 // ARM Constant
9318 if (ARM_AM::getSOImmVal(Arg: Value) != -1) {
9319 Value = ARM_AM::getSOImmVal(Arg: Value);
9320 TmpInst.setOpcode(ARM::MOVi);
9321 }
9322 else if (ARM_AM::getSOImmVal(Arg: ~Value) != -1) {
9323 Value = ARM_AM::getSOImmVal(Arg: ~Value);
9324 TmpInst.setOpcode(ARM::MVNi);
9325 }
9326 else if (hasV6T2Ops() &&
9327 Value >=0 && Value < 65536) {
9328 TmpInst.setOpcode(ARM::MOVi16);
9329 MovHasS = false;
9330 }
9331 else
9332 UseMov = false;
9333 }
9334 else {
9335 // Thumb/Thumb2 Constant
9336 if (hasThumb2() &&
9337 ARM_AM::getT2SOImmVal(Arg: Value) != -1)
9338 TmpInst.setOpcode(ARM::t2MOVi);
9339 else if (hasThumb2() &&
9340 ARM_AM::getT2SOImmVal(Arg: ~Value) != -1) {
9341 TmpInst.setOpcode(ARM::t2MVNi);
9342 Value = ~Value;
9343 }
9344 else if (hasV8MBaseline() &&
9345 Value >=0 && Value < 65536) {
9346 TmpInst.setOpcode(ARM::t2MOVi16);
9347 MovHasS = false;
9348 }
9349 else
9350 UseMov = false;
9351 }
9352 if (UseMov) {
9353 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Rt
9354 TmpInst.addOperand(Op: MCOperand::createImm(Val: Value)); // Immediate
9355 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // CondCode
9356 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
9357 if (MovHasS)
9358 TmpInst.addOperand(Op: MCOperand::createReg(Reg: 0)); // S
9359 Inst = TmpInst;
9360 return true;
9361 }
9362 }
9363 // No opportunity to use MOV/MVN create constant pool
9364 const MCExpr *CPLoc =
9365 getTargetStreamer().addConstantPoolEntry(SubExprVal,
9366 Loc: PoolOperand.getStartLoc());
9367 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Rt
9368 TmpInst.addOperand(Op: MCOperand::createExpr(Val: CPLoc)); // offset to constpool
9369 if (TmpInst.getOpcode() == ARM::LDRi12)
9370 TmpInst.addOperand(Op: MCOperand::createImm(Val: 0)); // unused offset
9371 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // CondCode
9372 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
9373 Inst = TmpInst;
9374 return true;
9375 }
9376 // Handle NEON VST complex aliases.
9377 case ARM::VST1LNdWB_register_Asm_8:
9378 case ARM::VST1LNdWB_register_Asm_16:
9379 case ARM::VST1LNdWB_register_Asm_32: {
9380 MCInst TmpInst;
9381 // Shuffle the operands around so the lane index operand is in the
9382 // right place.
9383 unsigned Spacing;
9384 TmpInst.setOpcode(getRealVSTOpcode(Opc: Inst.getOpcode(), Spacing));
9385 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn_wb
9386 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn
9387 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // alignment
9388 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // Rm
9389 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
9390 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // lane
9391 TmpInst.addOperand(Op: Inst.getOperand(i: 5)); // CondCode
9392 TmpInst.addOperand(Op: Inst.getOperand(i: 6));
9393 Inst = TmpInst;
9394 return true;
9395 }
9396
9397 case ARM::VST2LNdWB_register_Asm_8:
9398 case ARM::VST2LNdWB_register_Asm_16:
9399 case ARM::VST2LNdWB_register_Asm_32:
9400 case ARM::VST2LNqWB_register_Asm_16:
9401 case ARM::VST2LNqWB_register_Asm_32: {
9402 MCInst TmpInst;
9403 // Shuffle the operands around so the lane index operand is in the
9404 // right place.
9405 unsigned Spacing;
9406 TmpInst.setOpcode(getRealVSTOpcode(Opc: Inst.getOpcode(), Spacing));
9407 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn_wb
9408 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn
9409 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // alignment
9410 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // Rm
9411 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
9412 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9413 Spacing));
9414 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // lane
9415 TmpInst.addOperand(Op: Inst.getOperand(i: 5)); // CondCode
9416 TmpInst.addOperand(Op: Inst.getOperand(i: 6));
9417 Inst = TmpInst;
9418 return true;
9419 }
9420
9421 case ARM::VST3LNdWB_register_Asm_8:
9422 case ARM::VST3LNdWB_register_Asm_16:
9423 case ARM::VST3LNdWB_register_Asm_32:
9424 case ARM::VST3LNqWB_register_Asm_16:
9425 case ARM::VST3LNqWB_register_Asm_32: {
9426 MCInst TmpInst;
9427 // Shuffle the operands around so the lane index operand is in the
9428 // right place.
9429 unsigned Spacing;
9430 TmpInst.setOpcode(getRealVSTOpcode(Opc: Inst.getOpcode(), Spacing));
9431 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn_wb
9432 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn
9433 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // alignment
9434 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // Rm
9435 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
9436 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9437 Spacing));
9438 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9439 Spacing * 2));
9440 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // lane
9441 TmpInst.addOperand(Op: Inst.getOperand(i: 5)); // CondCode
9442 TmpInst.addOperand(Op: Inst.getOperand(i: 6));
9443 Inst = TmpInst;
9444 return true;
9445 }
9446
9447 case ARM::VST4LNdWB_register_Asm_8:
9448 case ARM::VST4LNdWB_register_Asm_16:
9449 case ARM::VST4LNdWB_register_Asm_32:
9450 case ARM::VST4LNqWB_register_Asm_16:
9451 case ARM::VST4LNqWB_register_Asm_32: {
9452 MCInst TmpInst;
9453 // Shuffle the operands around so the lane index operand is in the
9454 // right place.
9455 unsigned Spacing;
9456 TmpInst.setOpcode(getRealVSTOpcode(Opc: Inst.getOpcode(), Spacing));
9457 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn_wb
9458 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn
9459 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // alignment
9460 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // Rm
9461 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
9462 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9463 Spacing));
9464 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9465 Spacing * 2));
9466 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9467 Spacing * 3));
9468 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // lane
9469 TmpInst.addOperand(Op: Inst.getOperand(i: 5)); // CondCode
9470 TmpInst.addOperand(Op: Inst.getOperand(i: 6));
9471 Inst = TmpInst;
9472 return true;
9473 }
9474
9475 case ARM::VST1LNdWB_fixed_Asm_8:
9476 case ARM::VST1LNdWB_fixed_Asm_16:
9477 case ARM::VST1LNdWB_fixed_Asm_32: {
9478 MCInst TmpInst;
9479 // Shuffle the operands around so the lane index operand is in the
9480 // right place.
9481 unsigned Spacing;
9482 TmpInst.setOpcode(getRealVSTOpcode(Opc: Inst.getOpcode(), Spacing));
9483 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn_wb
9484 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn
9485 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // alignment
9486 TmpInst.addOperand(Op: MCOperand::createReg(Reg: 0)); // Rm
9487 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
9488 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // lane
9489 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // CondCode
9490 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
9491 Inst = TmpInst;
9492 return true;
9493 }
9494
9495 case ARM::VST2LNdWB_fixed_Asm_8:
9496 case ARM::VST2LNdWB_fixed_Asm_16:
9497 case ARM::VST2LNdWB_fixed_Asm_32:
9498 case ARM::VST2LNqWB_fixed_Asm_16:
9499 case ARM::VST2LNqWB_fixed_Asm_32: {
9500 MCInst TmpInst;
9501 // Shuffle the operands around so the lane index operand is in the
9502 // right place.
9503 unsigned Spacing;
9504 TmpInst.setOpcode(getRealVSTOpcode(Opc: Inst.getOpcode(), Spacing));
9505 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn_wb
9506 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn
9507 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // alignment
9508 TmpInst.addOperand(Op: MCOperand::createReg(Reg: 0)); // Rm
9509 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
9510 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9511 Spacing));
9512 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // lane
9513 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // CondCode
9514 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
9515 Inst = TmpInst;
9516 return true;
9517 }
9518
9519 case ARM::VST3LNdWB_fixed_Asm_8:
9520 case ARM::VST3LNdWB_fixed_Asm_16:
9521 case ARM::VST3LNdWB_fixed_Asm_32:
9522 case ARM::VST3LNqWB_fixed_Asm_16:
9523 case ARM::VST3LNqWB_fixed_Asm_32: {
9524 MCInst TmpInst;
9525 // Shuffle the operands around so the lane index operand is in the
9526 // right place.
9527 unsigned Spacing;
9528 TmpInst.setOpcode(getRealVSTOpcode(Opc: Inst.getOpcode(), Spacing));
9529 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn_wb
9530 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn
9531 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // alignment
9532 TmpInst.addOperand(Op: MCOperand::createReg(Reg: 0)); // Rm
9533 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
9534 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9535 Spacing));
9536 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9537 Spacing * 2));
9538 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // lane
9539 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // CondCode
9540 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
9541 Inst = TmpInst;
9542 return true;
9543 }
9544
9545 case ARM::VST4LNdWB_fixed_Asm_8:
9546 case ARM::VST4LNdWB_fixed_Asm_16:
9547 case ARM::VST4LNdWB_fixed_Asm_32:
9548 case ARM::VST4LNqWB_fixed_Asm_16:
9549 case ARM::VST4LNqWB_fixed_Asm_32: {
9550 MCInst TmpInst;
9551 // Shuffle the operands around so the lane index operand is in the
9552 // right place.
9553 unsigned Spacing;
9554 TmpInst.setOpcode(getRealVSTOpcode(Opc: Inst.getOpcode(), Spacing));
9555 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn_wb
9556 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn
9557 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // alignment
9558 TmpInst.addOperand(Op: MCOperand::createReg(Reg: 0)); // Rm
9559 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
9560 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9561 Spacing));
9562 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9563 Spacing * 2));
9564 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9565 Spacing * 3));
9566 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // lane
9567 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // CondCode
9568 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
9569 Inst = TmpInst;
9570 return true;
9571 }
9572
9573 case ARM::VST1LNdAsm_8:
9574 case ARM::VST1LNdAsm_16:
9575 case ARM::VST1LNdAsm_32: {
9576 MCInst TmpInst;
9577 // Shuffle the operands around so the lane index operand is in the
9578 // right place.
9579 unsigned Spacing;
9580 TmpInst.setOpcode(getRealVSTOpcode(Opc: Inst.getOpcode(), Spacing));
9581 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn
9582 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // alignment
9583 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
9584 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // lane
9585 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // CondCode
9586 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
9587 Inst = TmpInst;
9588 return true;
9589 }
9590
9591 case ARM::VST2LNdAsm_8:
9592 case ARM::VST2LNdAsm_16:
9593 case ARM::VST2LNdAsm_32:
9594 case ARM::VST2LNqAsm_16:
9595 case ARM::VST2LNqAsm_32: {
9596 MCInst TmpInst;
9597 // Shuffle the operands around so the lane index operand is in the
9598 // right place.
9599 unsigned Spacing;
9600 TmpInst.setOpcode(getRealVSTOpcode(Opc: Inst.getOpcode(), Spacing));
9601 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn
9602 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // alignment
9603 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
9604 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9605 Spacing));
9606 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // lane
9607 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // CondCode
9608 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
9609 Inst = TmpInst;
9610 return true;
9611 }
9612
9613 case ARM::VST3LNdAsm_8:
9614 case ARM::VST3LNdAsm_16:
9615 case ARM::VST3LNdAsm_32:
9616 case ARM::VST3LNqAsm_16:
9617 case ARM::VST3LNqAsm_32: {
9618 MCInst TmpInst;
9619 // Shuffle the operands around so the lane index operand is in the
9620 // right place.
9621 unsigned Spacing;
9622 TmpInst.setOpcode(getRealVSTOpcode(Opc: Inst.getOpcode(), Spacing));
9623 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn
9624 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // alignment
9625 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
9626 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9627 Spacing));
9628 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9629 Spacing * 2));
9630 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // lane
9631 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // CondCode
9632 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
9633 Inst = TmpInst;
9634 return true;
9635 }
9636
9637 case ARM::VST4LNdAsm_8:
9638 case ARM::VST4LNdAsm_16:
9639 case ARM::VST4LNdAsm_32:
9640 case ARM::VST4LNqAsm_16:
9641 case ARM::VST4LNqAsm_32: {
9642 MCInst TmpInst;
9643 // Shuffle the operands around so the lane index operand is in the
9644 // right place.
9645 unsigned Spacing;
9646 TmpInst.setOpcode(getRealVSTOpcode(Opc: Inst.getOpcode(), Spacing));
9647 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn
9648 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // alignment
9649 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
9650 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9651 Spacing));
9652 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9653 Spacing * 2));
9654 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9655 Spacing * 3));
9656 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // lane
9657 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // CondCode
9658 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
9659 Inst = TmpInst;
9660 return true;
9661 }
9662
9663 // Handle NEON VLD complex aliases.
9664 case ARM::VLD1LNdWB_register_Asm_8:
9665 case ARM::VLD1LNdWB_register_Asm_16:
9666 case ARM::VLD1LNdWB_register_Asm_32: {
9667 MCInst TmpInst;
9668 // Shuffle the operands around so the lane index operand is in the
9669 // right place.
9670 unsigned Spacing;
9671 TmpInst.setOpcode(getRealVLDOpcode(Opc: Inst.getOpcode(), Spacing));
9672 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
9673 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn_wb
9674 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn
9675 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // alignment
9676 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // Rm
9677 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Tied operand src (== Vd)
9678 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // lane
9679 TmpInst.addOperand(Op: Inst.getOperand(i: 5)); // CondCode
9680 TmpInst.addOperand(Op: Inst.getOperand(i: 6));
9681 Inst = TmpInst;
9682 return true;
9683 }
9684
9685 case ARM::VLD2LNdWB_register_Asm_8:
9686 case ARM::VLD2LNdWB_register_Asm_16:
9687 case ARM::VLD2LNdWB_register_Asm_32:
9688 case ARM::VLD2LNqWB_register_Asm_16:
9689 case ARM::VLD2LNqWB_register_Asm_32: {
9690 MCInst TmpInst;
9691 // Shuffle the operands around so the lane index operand is in the
9692 // right place.
9693 unsigned Spacing;
9694 TmpInst.setOpcode(getRealVLDOpcode(Opc: Inst.getOpcode(), Spacing));
9695 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
9696 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9697 Spacing));
9698 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn_wb
9699 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn
9700 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // alignment
9701 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // Rm
9702 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Tied operand src (== Vd)
9703 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9704 Spacing));
9705 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // lane
9706 TmpInst.addOperand(Op: Inst.getOperand(i: 5)); // CondCode
9707 TmpInst.addOperand(Op: Inst.getOperand(i: 6));
9708 Inst = TmpInst;
9709 return true;
9710 }
9711
9712 case ARM::VLD3LNdWB_register_Asm_8:
9713 case ARM::VLD3LNdWB_register_Asm_16:
9714 case ARM::VLD3LNdWB_register_Asm_32:
9715 case ARM::VLD3LNqWB_register_Asm_16:
9716 case ARM::VLD3LNqWB_register_Asm_32: {
9717 MCInst TmpInst;
9718 // Shuffle the operands around so the lane index operand is in the
9719 // right place.
9720 unsigned Spacing;
9721 TmpInst.setOpcode(getRealVLDOpcode(Opc: Inst.getOpcode(), Spacing));
9722 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
9723 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9724 Spacing));
9725 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9726 Spacing * 2));
9727 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn_wb
9728 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn
9729 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // alignment
9730 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // Rm
9731 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Tied operand src (== Vd)
9732 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9733 Spacing));
9734 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9735 Spacing * 2));
9736 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // lane
9737 TmpInst.addOperand(Op: Inst.getOperand(i: 5)); // CondCode
9738 TmpInst.addOperand(Op: Inst.getOperand(i: 6));
9739 Inst = TmpInst;
9740 return true;
9741 }
9742
9743 case ARM::VLD4LNdWB_register_Asm_8:
9744 case ARM::VLD4LNdWB_register_Asm_16:
9745 case ARM::VLD4LNdWB_register_Asm_32:
9746 case ARM::VLD4LNqWB_register_Asm_16:
9747 case ARM::VLD4LNqWB_register_Asm_32: {
9748 MCInst TmpInst;
9749 // Shuffle the operands around so the lane index operand is in the
9750 // right place.
9751 unsigned Spacing;
9752 TmpInst.setOpcode(getRealVLDOpcode(Opc: Inst.getOpcode(), Spacing));
9753 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
9754 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9755 Spacing));
9756 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9757 Spacing * 2));
9758 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9759 Spacing * 3));
9760 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn_wb
9761 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn
9762 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // alignment
9763 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // Rm
9764 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Tied operand src (== Vd)
9765 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9766 Spacing));
9767 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9768 Spacing * 2));
9769 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9770 Spacing * 3));
9771 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // lane
9772 TmpInst.addOperand(Op: Inst.getOperand(i: 5)); // CondCode
9773 TmpInst.addOperand(Op: Inst.getOperand(i: 6));
9774 Inst = TmpInst;
9775 return true;
9776 }
9777
9778 case ARM::VLD1LNdWB_fixed_Asm_8:
9779 case ARM::VLD1LNdWB_fixed_Asm_16:
9780 case ARM::VLD1LNdWB_fixed_Asm_32: {
9781 MCInst TmpInst;
9782 // Shuffle the operands around so the lane index operand is in the
9783 // right place.
9784 unsigned Spacing;
9785 TmpInst.setOpcode(getRealVLDOpcode(Opc: Inst.getOpcode(), Spacing));
9786 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
9787 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn_wb
9788 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn
9789 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // alignment
9790 TmpInst.addOperand(Op: MCOperand::createReg(Reg: 0)); // Rm
9791 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Tied operand src (== Vd)
9792 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // lane
9793 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // CondCode
9794 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
9795 Inst = TmpInst;
9796 return true;
9797 }
9798
9799 case ARM::VLD2LNdWB_fixed_Asm_8:
9800 case ARM::VLD2LNdWB_fixed_Asm_16:
9801 case ARM::VLD2LNdWB_fixed_Asm_32:
9802 case ARM::VLD2LNqWB_fixed_Asm_16:
9803 case ARM::VLD2LNqWB_fixed_Asm_32: {
9804 MCInst TmpInst;
9805 // Shuffle the operands around so the lane index operand is in the
9806 // right place.
9807 unsigned Spacing;
9808 TmpInst.setOpcode(getRealVLDOpcode(Opc: Inst.getOpcode(), Spacing));
9809 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
9810 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9811 Spacing));
9812 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn_wb
9813 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn
9814 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // alignment
9815 TmpInst.addOperand(Op: MCOperand::createReg(Reg: 0)); // Rm
9816 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Tied operand src (== Vd)
9817 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9818 Spacing));
9819 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // lane
9820 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // CondCode
9821 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
9822 Inst = TmpInst;
9823 return true;
9824 }
9825
9826 case ARM::VLD3LNdWB_fixed_Asm_8:
9827 case ARM::VLD3LNdWB_fixed_Asm_16:
9828 case ARM::VLD3LNdWB_fixed_Asm_32:
9829 case ARM::VLD3LNqWB_fixed_Asm_16:
9830 case ARM::VLD3LNqWB_fixed_Asm_32: {
9831 MCInst TmpInst;
9832 // Shuffle the operands around so the lane index operand is in the
9833 // right place.
9834 unsigned Spacing;
9835 TmpInst.setOpcode(getRealVLDOpcode(Opc: Inst.getOpcode(), Spacing));
9836 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
9837 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9838 Spacing));
9839 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9840 Spacing * 2));
9841 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn_wb
9842 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn
9843 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // alignment
9844 TmpInst.addOperand(Op: MCOperand::createReg(Reg: 0)); // Rm
9845 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Tied operand src (== Vd)
9846 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9847 Spacing));
9848 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9849 Spacing * 2));
9850 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // lane
9851 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // CondCode
9852 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
9853 Inst = TmpInst;
9854 return true;
9855 }
9856
9857 case ARM::VLD4LNdWB_fixed_Asm_8:
9858 case ARM::VLD4LNdWB_fixed_Asm_16:
9859 case ARM::VLD4LNdWB_fixed_Asm_32:
9860 case ARM::VLD4LNqWB_fixed_Asm_16:
9861 case ARM::VLD4LNqWB_fixed_Asm_32: {
9862 MCInst TmpInst;
9863 // Shuffle the operands around so the lane index operand is in the
9864 // right place.
9865 unsigned Spacing;
9866 TmpInst.setOpcode(getRealVLDOpcode(Opc: Inst.getOpcode(), Spacing));
9867 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
9868 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9869 Spacing));
9870 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9871 Spacing * 2));
9872 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9873 Spacing * 3));
9874 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn_wb
9875 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn
9876 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // alignment
9877 TmpInst.addOperand(Op: MCOperand::createReg(Reg: 0)); // Rm
9878 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Tied operand src (== Vd)
9879 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9880 Spacing));
9881 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9882 Spacing * 2));
9883 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9884 Spacing * 3));
9885 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // lane
9886 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // CondCode
9887 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
9888 Inst = TmpInst;
9889 return true;
9890 }
9891
9892 case ARM::VLD1LNdAsm_8:
9893 case ARM::VLD1LNdAsm_16:
9894 case ARM::VLD1LNdAsm_32: {
9895 MCInst TmpInst;
9896 // Shuffle the operands around so the lane index operand is in the
9897 // right place.
9898 unsigned Spacing;
9899 TmpInst.setOpcode(getRealVLDOpcode(Opc: Inst.getOpcode(), Spacing));
9900 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
9901 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn
9902 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // alignment
9903 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Tied operand src (== Vd)
9904 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // lane
9905 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // CondCode
9906 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
9907 Inst = TmpInst;
9908 return true;
9909 }
9910
9911 case ARM::VLD2LNdAsm_8:
9912 case ARM::VLD2LNdAsm_16:
9913 case ARM::VLD2LNdAsm_32:
9914 case ARM::VLD2LNqAsm_16:
9915 case ARM::VLD2LNqAsm_32: {
9916 MCInst TmpInst;
9917 // Shuffle the operands around so the lane index operand is in the
9918 // right place.
9919 unsigned Spacing;
9920 TmpInst.setOpcode(getRealVLDOpcode(Opc: Inst.getOpcode(), Spacing));
9921 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
9922 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9923 Spacing));
9924 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn
9925 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // alignment
9926 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Tied operand src (== Vd)
9927 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9928 Spacing));
9929 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // lane
9930 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // CondCode
9931 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
9932 Inst = TmpInst;
9933 return true;
9934 }
9935
9936 case ARM::VLD3LNdAsm_8:
9937 case ARM::VLD3LNdAsm_16:
9938 case ARM::VLD3LNdAsm_32:
9939 case ARM::VLD3LNqAsm_16:
9940 case ARM::VLD3LNqAsm_32: {
9941 MCInst TmpInst;
9942 // Shuffle the operands around so the lane index operand is in the
9943 // right place.
9944 unsigned Spacing;
9945 TmpInst.setOpcode(getRealVLDOpcode(Opc: Inst.getOpcode(), Spacing));
9946 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
9947 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9948 Spacing));
9949 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9950 Spacing * 2));
9951 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn
9952 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // alignment
9953 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Tied operand src (== Vd)
9954 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9955 Spacing));
9956 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9957 Spacing * 2));
9958 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // lane
9959 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // CondCode
9960 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
9961 Inst = TmpInst;
9962 return true;
9963 }
9964
9965 case ARM::VLD4LNdAsm_8:
9966 case ARM::VLD4LNdAsm_16:
9967 case ARM::VLD4LNdAsm_32:
9968 case ARM::VLD4LNqAsm_16:
9969 case ARM::VLD4LNqAsm_32: {
9970 MCInst TmpInst;
9971 // Shuffle the operands around so the lane index operand is in the
9972 // right place.
9973 unsigned Spacing;
9974 TmpInst.setOpcode(getRealVLDOpcode(Opc: Inst.getOpcode(), Spacing));
9975 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
9976 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9977 Spacing));
9978 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9979 Spacing * 2));
9980 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9981 Spacing * 3));
9982 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rn
9983 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // alignment
9984 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Tied operand src (== Vd)
9985 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9986 Spacing));
9987 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9988 Spacing * 2));
9989 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
9990 Spacing * 3));
9991 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // lane
9992 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // CondCode
9993 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
9994 Inst = TmpInst;
9995 return true;
9996 }
9997
9998 // VLD3DUP single 3-element structure to all lanes instructions.
9999 case ARM::VLD3DUPdAsm_8:
10000 case ARM::VLD3DUPdAsm_16:
10001 case ARM::VLD3DUPdAsm_32:
10002 case ARM::VLD3DUPqAsm_8:
10003 case ARM::VLD3DUPqAsm_16:
10004 case ARM::VLD3DUPqAsm_32: {
10005 MCInst TmpInst;
10006 unsigned Spacing;
10007 TmpInst.setOpcode(getRealVLDOpcode(Opc: Inst.getOpcode(), Spacing));
10008 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
10009 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10010 Spacing));
10011 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10012 Spacing * 2));
10013 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
10014 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // alignment
10015 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
10016 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
10017 Inst = TmpInst;
10018 return true;
10019 }
10020
10021 case ARM::VLD3DUPdWB_fixed_Asm_8:
10022 case ARM::VLD3DUPdWB_fixed_Asm_16:
10023 case ARM::VLD3DUPdWB_fixed_Asm_32:
10024 case ARM::VLD3DUPqWB_fixed_Asm_8:
10025 case ARM::VLD3DUPqWB_fixed_Asm_16:
10026 case ARM::VLD3DUPqWB_fixed_Asm_32: {
10027 MCInst TmpInst;
10028 unsigned Spacing;
10029 TmpInst.setOpcode(getRealVLDOpcode(Opc: Inst.getOpcode(), Spacing));
10030 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
10031 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10032 Spacing));
10033 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10034 Spacing * 2));
10035 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
10036 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn_wb == tied Rn
10037 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // alignment
10038 TmpInst.addOperand(Op: MCOperand::createReg(Reg: 0)); // Rm
10039 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
10040 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
10041 Inst = TmpInst;
10042 return true;
10043 }
10044
10045 case ARM::VLD3DUPdWB_register_Asm_8:
10046 case ARM::VLD3DUPdWB_register_Asm_16:
10047 case ARM::VLD3DUPdWB_register_Asm_32:
10048 case ARM::VLD3DUPqWB_register_Asm_8:
10049 case ARM::VLD3DUPqWB_register_Asm_16:
10050 case ARM::VLD3DUPqWB_register_Asm_32: {
10051 MCInst TmpInst;
10052 unsigned Spacing;
10053 TmpInst.setOpcode(getRealVLDOpcode(Opc: Inst.getOpcode(), Spacing));
10054 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
10055 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10056 Spacing));
10057 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10058 Spacing * 2));
10059 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
10060 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn_wb == tied Rn
10061 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // alignment
10062 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // Rm
10063 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // CondCode
10064 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
10065 Inst = TmpInst;
10066 return true;
10067 }
10068
10069 // VLD3 multiple 3-element structure instructions.
10070 case ARM::VLD3dAsm_8:
10071 case ARM::VLD3dAsm_16:
10072 case ARM::VLD3dAsm_32:
10073 case ARM::VLD3qAsm_8:
10074 case ARM::VLD3qAsm_16:
10075 case ARM::VLD3qAsm_32: {
10076 MCInst TmpInst;
10077 unsigned Spacing;
10078 TmpInst.setOpcode(getRealVLDOpcode(Opc: Inst.getOpcode(), Spacing));
10079 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
10080 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10081 Spacing));
10082 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10083 Spacing * 2));
10084 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
10085 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // alignment
10086 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
10087 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
10088 Inst = TmpInst;
10089 return true;
10090 }
10091
10092 case ARM::VLD3dWB_fixed_Asm_8:
10093 case ARM::VLD3dWB_fixed_Asm_16:
10094 case ARM::VLD3dWB_fixed_Asm_32:
10095 case ARM::VLD3qWB_fixed_Asm_8:
10096 case ARM::VLD3qWB_fixed_Asm_16:
10097 case ARM::VLD3qWB_fixed_Asm_32: {
10098 MCInst TmpInst;
10099 unsigned Spacing;
10100 TmpInst.setOpcode(getRealVLDOpcode(Opc: Inst.getOpcode(), Spacing));
10101 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
10102 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10103 Spacing));
10104 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10105 Spacing * 2));
10106 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
10107 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn_wb == tied Rn
10108 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // alignment
10109 TmpInst.addOperand(Op: MCOperand::createReg(Reg: 0)); // Rm
10110 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
10111 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
10112 Inst = TmpInst;
10113 return true;
10114 }
10115
10116 case ARM::VLD3dWB_register_Asm_8:
10117 case ARM::VLD3dWB_register_Asm_16:
10118 case ARM::VLD3dWB_register_Asm_32:
10119 case ARM::VLD3qWB_register_Asm_8:
10120 case ARM::VLD3qWB_register_Asm_16:
10121 case ARM::VLD3qWB_register_Asm_32: {
10122 MCInst TmpInst;
10123 unsigned Spacing;
10124 TmpInst.setOpcode(getRealVLDOpcode(Opc: Inst.getOpcode(), Spacing));
10125 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
10126 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10127 Spacing));
10128 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10129 Spacing * 2));
10130 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
10131 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn_wb == tied Rn
10132 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // alignment
10133 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // Rm
10134 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // CondCode
10135 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
10136 Inst = TmpInst;
10137 return true;
10138 }
10139
10140 // VLD4DUP single 3-element structure to all lanes instructions.
10141 case ARM::VLD4DUPdAsm_8:
10142 case ARM::VLD4DUPdAsm_16:
10143 case ARM::VLD4DUPdAsm_32:
10144 case ARM::VLD4DUPqAsm_8:
10145 case ARM::VLD4DUPqAsm_16:
10146 case ARM::VLD4DUPqAsm_32: {
10147 MCInst TmpInst;
10148 unsigned Spacing;
10149 TmpInst.setOpcode(getRealVLDOpcode(Opc: Inst.getOpcode(), Spacing));
10150 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
10151 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10152 Spacing));
10153 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10154 Spacing * 2));
10155 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10156 Spacing * 3));
10157 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
10158 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // alignment
10159 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
10160 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
10161 Inst = TmpInst;
10162 return true;
10163 }
10164
10165 case ARM::VLD4DUPdWB_fixed_Asm_8:
10166 case ARM::VLD4DUPdWB_fixed_Asm_16:
10167 case ARM::VLD4DUPdWB_fixed_Asm_32:
10168 case ARM::VLD4DUPqWB_fixed_Asm_8:
10169 case ARM::VLD4DUPqWB_fixed_Asm_16:
10170 case ARM::VLD4DUPqWB_fixed_Asm_32: {
10171 MCInst TmpInst;
10172 unsigned Spacing;
10173 TmpInst.setOpcode(getRealVLDOpcode(Opc: Inst.getOpcode(), Spacing));
10174 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
10175 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10176 Spacing));
10177 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10178 Spacing * 2));
10179 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10180 Spacing * 3));
10181 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
10182 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn_wb == tied Rn
10183 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // alignment
10184 TmpInst.addOperand(Op: MCOperand::createReg(Reg: 0)); // Rm
10185 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
10186 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
10187 Inst = TmpInst;
10188 return true;
10189 }
10190
10191 case ARM::VLD4DUPdWB_register_Asm_8:
10192 case ARM::VLD4DUPdWB_register_Asm_16:
10193 case ARM::VLD4DUPdWB_register_Asm_32:
10194 case ARM::VLD4DUPqWB_register_Asm_8:
10195 case ARM::VLD4DUPqWB_register_Asm_16:
10196 case ARM::VLD4DUPqWB_register_Asm_32: {
10197 MCInst TmpInst;
10198 unsigned Spacing;
10199 TmpInst.setOpcode(getRealVLDOpcode(Opc: Inst.getOpcode(), Spacing));
10200 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
10201 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10202 Spacing));
10203 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10204 Spacing * 2));
10205 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10206 Spacing * 3));
10207 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
10208 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn_wb == tied Rn
10209 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // alignment
10210 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // Rm
10211 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // CondCode
10212 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
10213 Inst = TmpInst;
10214 return true;
10215 }
10216
10217 // VLD4 multiple 4-element structure instructions.
10218 case ARM::VLD4dAsm_8:
10219 case ARM::VLD4dAsm_16:
10220 case ARM::VLD4dAsm_32:
10221 case ARM::VLD4qAsm_8:
10222 case ARM::VLD4qAsm_16:
10223 case ARM::VLD4qAsm_32: {
10224 MCInst TmpInst;
10225 unsigned Spacing;
10226 TmpInst.setOpcode(getRealVLDOpcode(Opc: Inst.getOpcode(), Spacing));
10227 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
10228 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10229 Spacing));
10230 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10231 Spacing * 2));
10232 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10233 Spacing * 3));
10234 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
10235 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // alignment
10236 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
10237 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
10238 Inst = TmpInst;
10239 return true;
10240 }
10241
10242 case ARM::VLD4dWB_fixed_Asm_8:
10243 case ARM::VLD4dWB_fixed_Asm_16:
10244 case ARM::VLD4dWB_fixed_Asm_32:
10245 case ARM::VLD4qWB_fixed_Asm_8:
10246 case ARM::VLD4qWB_fixed_Asm_16:
10247 case ARM::VLD4qWB_fixed_Asm_32: {
10248 MCInst TmpInst;
10249 unsigned Spacing;
10250 TmpInst.setOpcode(getRealVLDOpcode(Opc: Inst.getOpcode(), Spacing));
10251 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
10252 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10253 Spacing));
10254 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10255 Spacing * 2));
10256 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10257 Spacing * 3));
10258 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
10259 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn_wb == tied Rn
10260 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // alignment
10261 TmpInst.addOperand(Op: MCOperand::createReg(Reg: 0)); // Rm
10262 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
10263 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
10264 Inst = TmpInst;
10265 return true;
10266 }
10267
10268 case ARM::VLD4dWB_register_Asm_8:
10269 case ARM::VLD4dWB_register_Asm_16:
10270 case ARM::VLD4dWB_register_Asm_32:
10271 case ARM::VLD4qWB_register_Asm_8:
10272 case ARM::VLD4qWB_register_Asm_16:
10273 case ARM::VLD4qWB_register_Asm_32: {
10274 MCInst TmpInst;
10275 unsigned Spacing;
10276 TmpInst.setOpcode(getRealVLDOpcode(Opc: Inst.getOpcode(), Spacing));
10277 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
10278 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10279 Spacing));
10280 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10281 Spacing * 2));
10282 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10283 Spacing * 3));
10284 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
10285 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn_wb == tied Rn
10286 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // alignment
10287 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // Rm
10288 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // CondCode
10289 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
10290 Inst = TmpInst;
10291 return true;
10292 }
10293
10294 // VST3 multiple 3-element structure instructions.
10295 case ARM::VST3dAsm_8:
10296 case ARM::VST3dAsm_16:
10297 case ARM::VST3dAsm_32:
10298 case ARM::VST3qAsm_8:
10299 case ARM::VST3qAsm_16:
10300 case ARM::VST3qAsm_32: {
10301 MCInst TmpInst;
10302 unsigned Spacing;
10303 TmpInst.setOpcode(getRealVSTOpcode(Opc: Inst.getOpcode(), Spacing));
10304 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
10305 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // alignment
10306 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
10307 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10308 Spacing));
10309 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10310 Spacing * 2));
10311 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
10312 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
10313 Inst = TmpInst;
10314 return true;
10315 }
10316
10317 case ARM::VST3dWB_fixed_Asm_8:
10318 case ARM::VST3dWB_fixed_Asm_16:
10319 case ARM::VST3dWB_fixed_Asm_32:
10320 case ARM::VST3qWB_fixed_Asm_8:
10321 case ARM::VST3qWB_fixed_Asm_16:
10322 case ARM::VST3qWB_fixed_Asm_32: {
10323 MCInst TmpInst;
10324 unsigned Spacing;
10325 TmpInst.setOpcode(getRealVSTOpcode(Opc: Inst.getOpcode(), Spacing));
10326 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
10327 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn_wb == tied Rn
10328 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // alignment
10329 TmpInst.addOperand(Op: MCOperand::createReg(Reg: 0)); // Rm
10330 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
10331 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10332 Spacing));
10333 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10334 Spacing * 2));
10335 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
10336 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
10337 Inst = TmpInst;
10338 return true;
10339 }
10340
10341 case ARM::VST3dWB_register_Asm_8:
10342 case ARM::VST3dWB_register_Asm_16:
10343 case ARM::VST3dWB_register_Asm_32:
10344 case ARM::VST3qWB_register_Asm_8:
10345 case ARM::VST3qWB_register_Asm_16:
10346 case ARM::VST3qWB_register_Asm_32: {
10347 MCInst TmpInst;
10348 unsigned Spacing;
10349 TmpInst.setOpcode(getRealVSTOpcode(Opc: Inst.getOpcode(), Spacing));
10350 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
10351 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn_wb == tied Rn
10352 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // alignment
10353 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // Rm
10354 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
10355 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10356 Spacing));
10357 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10358 Spacing * 2));
10359 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // CondCode
10360 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
10361 Inst = TmpInst;
10362 return true;
10363 }
10364
10365 // VST4 multiple 3-element structure instructions.
10366 case ARM::VST4dAsm_8:
10367 case ARM::VST4dAsm_16:
10368 case ARM::VST4dAsm_32:
10369 case ARM::VST4qAsm_8:
10370 case ARM::VST4qAsm_16:
10371 case ARM::VST4qAsm_32: {
10372 MCInst TmpInst;
10373 unsigned Spacing;
10374 TmpInst.setOpcode(getRealVSTOpcode(Opc: Inst.getOpcode(), Spacing));
10375 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
10376 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // alignment
10377 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
10378 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10379 Spacing));
10380 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10381 Spacing * 2));
10382 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10383 Spacing * 3));
10384 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
10385 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
10386 Inst = TmpInst;
10387 return true;
10388 }
10389
10390 case ARM::VST4dWB_fixed_Asm_8:
10391 case ARM::VST4dWB_fixed_Asm_16:
10392 case ARM::VST4dWB_fixed_Asm_32:
10393 case ARM::VST4qWB_fixed_Asm_8:
10394 case ARM::VST4qWB_fixed_Asm_16:
10395 case ARM::VST4qWB_fixed_Asm_32: {
10396 MCInst TmpInst;
10397 unsigned Spacing;
10398 TmpInst.setOpcode(getRealVSTOpcode(Opc: Inst.getOpcode(), Spacing));
10399 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
10400 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn_wb == tied Rn
10401 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // alignment
10402 TmpInst.addOperand(Op: MCOperand::createReg(Reg: 0)); // Rm
10403 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
10404 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10405 Spacing));
10406 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10407 Spacing * 2));
10408 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10409 Spacing * 3));
10410 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
10411 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
10412 Inst = TmpInst;
10413 return true;
10414 }
10415
10416 case ARM::VST4dWB_register_Asm_8:
10417 case ARM::VST4dWB_register_Asm_16:
10418 case ARM::VST4dWB_register_Asm_32:
10419 case ARM::VST4qWB_register_Asm_8:
10420 case ARM::VST4qWB_register_Asm_16:
10421 case ARM::VST4qWB_register_Asm_32: {
10422 MCInst TmpInst;
10423 unsigned Spacing;
10424 TmpInst.setOpcode(getRealVSTOpcode(Opc: Inst.getOpcode(), Spacing));
10425 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
10426 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn_wb == tied Rn
10427 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // alignment
10428 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // Rm
10429 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Vd
10430 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10431 Spacing));
10432 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10433 Spacing * 2));
10434 TmpInst.addOperand(Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg() +
10435 Spacing * 3));
10436 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // CondCode
10437 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
10438 Inst = TmpInst;
10439 return true;
10440 }
10441
10442 // Handle encoding choice for the shift-immediate instructions.
10443 case ARM::t2LSLri:
10444 case ARM::t2LSRri:
10445 case ARM::t2ASRri:
10446 if (isARMLowRegister(Reg: Inst.getOperand(i: 0).getReg()) &&
10447 isARMLowRegister(Reg: Inst.getOperand(i: 1).getReg()) &&
10448 Inst.getOperand(i: 5).getReg() ==
10449 (inITBlock() ? ARM::NoRegister : ARM::CPSR) &&
10450 !HasWideQualifier) {
10451 unsigned NewOpc;
10452 switch (Inst.getOpcode()) {
10453 default: llvm_unreachable("unexpected opcode");
10454 case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
10455 case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
10456 case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
10457 }
10458 // The Thumb1 operands aren't in the same order. Awesome, eh?
10459 MCInst TmpInst;
10460 TmpInst.setOpcode(NewOpc);
10461 TmpInst.addOperand(Op: Inst.getOperand(i: 0));
10462 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
10463 TmpInst.addOperand(Op: Inst.getOperand(i: 1));
10464 TmpInst.addOperand(Op: Inst.getOperand(i: 2));
10465 TmpInst.addOperand(Op: Inst.getOperand(i: 3));
10466 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
10467 Inst = TmpInst;
10468 return true;
10469 }
10470 return false;
10471
10472 // Handle the Thumb2 mode MOV complex aliases.
10473 case ARM::t2MOVsr:
10474 case ARM::t2MOVSsr: {
10475 // Which instruction to expand to depends on the CCOut operand and
10476 // whether we're in an IT block if the register operands are low
10477 // registers.
10478 bool isNarrow = false;
10479 if (isARMLowRegister(Reg: Inst.getOperand(i: 0).getReg()) &&
10480 isARMLowRegister(Reg: Inst.getOperand(i: 1).getReg()) &&
10481 isARMLowRegister(Reg: Inst.getOperand(i: 2).getReg()) &&
10482 Inst.getOperand(i: 0).getReg() == Inst.getOperand(i: 1).getReg() &&
10483 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr) &&
10484 !HasWideQualifier)
10485 isNarrow = true;
10486 MCInst TmpInst;
10487 unsigned newOpc;
10488 switch(ARM_AM::getSORegShOp(Op: Inst.getOperand(i: 3).getImm())) {
10489 default: llvm_unreachable("unexpected opcode!");
10490 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
10491 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
10492 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
10493 case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR : ARM::t2RORrr; break;
10494 }
10495 TmpInst.setOpcode(newOpc);
10496 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Rd
10497 if (isNarrow)
10498 TmpInst.addOperand(Op: MCOperand::createReg(
10499 Reg: Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : ARM::NoRegister));
10500 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
10501 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rm
10502 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // CondCode
10503 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
10504 if (!isNarrow)
10505 TmpInst.addOperand(Op: MCOperand::createReg(
10506 Reg: Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : ARM::NoRegister));
10507 Inst = TmpInst;
10508 return true;
10509 }
10510 case ARM::t2MOVsi:
10511 case ARM::t2MOVSsi: {
10512 // Which instruction to expand to depends on the CCOut operand and
10513 // whether we're in an IT block if the register operands are low
10514 // registers.
10515 bool isNarrow = false;
10516 if (isARMLowRegister(Reg: Inst.getOperand(i: 0).getReg()) &&
10517 isARMLowRegister(Reg: Inst.getOperand(i: 1).getReg()) &&
10518 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi) &&
10519 !HasWideQualifier)
10520 isNarrow = true;
10521 MCInst TmpInst;
10522 unsigned newOpc;
10523 unsigned Shift = ARM_AM::getSORegShOp(Op: Inst.getOperand(i: 2).getImm());
10524 unsigned Amount = ARM_AM::getSORegOffset(Op: Inst.getOperand(i: 2).getImm());
10525 bool isMov = false;
10526 // MOV rd, rm, LSL #0 is actually a MOV instruction
10527 if (Shift == ARM_AM::lsl && Amount == 0) {
10528 isMov = true;
10529 // The 16-bit encoding of MOV rd, rm, LSL #N is explicitly encoding T2 of
10530 // MOV (register) in the ARMv8-A and ARMv8-M manuals, and immediate 0 is
10531 // unpredictable in an IT block so the 32-bit encoding T3 has to be used
10532 // instead.
10533 if (inITBlock()) {
10534 isNarrow = false;
10535 }
10536 newOpc = isNarrow ? ARM::tMOVSr : ARM::t2MOVr;
10537 } else {
10538 switch(Shift) {
10539 default: llvm_unreachable("unexpected opcode!");
10540 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
10541 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
10542 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
10543 case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
10544 case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
10545 }
10546 }
10547 if (Amount == 32) Amount = 0;
10548 TmpInst.setOpcode(newOpc);
10549 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Rd
10550 if (isNarrow && !isMov)
10551 TmpInst.addOperand(Op: MCOperand::createReg(
10552 Reg: Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : ARM::NoRegister));
10553 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
10554 if (newOpc != ARM::t2RRX && !isMov)
10555 TmpInst.addOperand(Op: MCOperand::createImm(Val: Amount));
10556 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
10557 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
10558 if (!isNarrow)
10559 TmpInst.addOperand(Op: MCOperand::createReg(
10560 Reg: Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : ARM::NoRegister));
10561 Inst = TmpInst;
10562 return true;
10563 }
10564 // Handle the ARM mode MOV complex aliases.
10565 case ARM::ASRr:
10566 case ARM::LSRr:
10567 case ARM::LSLr:
10568 case ARM::RORr: {
10569 ARM_AM::ShiftOpc ShiftTy;
10570 switch(Inst.getOpcode()) {
10571 default: llvm_unreachable("unexpected opcode!");
10572 case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
10573 case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
10574 case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
10575 case ARM::RORr: ShiftTy = ARM_AM::ror; break;
10576 }
10577 unsigned Shifter = ARM_AM::getSORegOpc(ShOp: ShiftTy, Imm: 0);
10578 MCInst TmpInst;
10579 TmpInst.setOpcode(ARM::MOVsr);
10580 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Rd
10581 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
10582 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // Rm
10583 TmpInst.addOperand(Op: MCOperand::createImm(Val: Shifter)); // Shift value and ty
10584 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
10585 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
10586 TmpInst.addOperand(Op: Inst.getOperand(i: 5)); // cc_out
10587 Inst = TmpInst;
10588 return true;
10589 }
10590 case ARM::ASRi:
10591 case ARM::LSRi:
10592 case ARM::LSLi:
10593 case ARM::RORi: {
10594 ARM_AM::ShiftOpc ShiftTy;
10595 switch(Inst.getOpcode()) {
10596 default: llvm_unreachable("unexpected opcode!");
10597 case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
10598 case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
10599 case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
10600 case ARM::RORi: ShiftTy = ARM_AM::ror; break;
10601 }
10602 // A shift by zero is a plain MOVr, not a MOVsi.
10603 unsigned Amt = Inst.getOperand(i: 2).getImm();
10604 unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
10605 // A shift by 32 should be encoded as 0 when permitted
10606 if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
10607 Amt = 0;
10608 unsigned Shifter = ARM_AM::getSORegOpc(ShOp: ShiftTy, Imm: Amt);
10609 MCInst TmpInst;
10610 TmpInst.setOpcode(Opc);
10611 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Rd
10612 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
10613 if (Opc == ARM::MOVsi)
10614 TmpInst.addOperand(Op: MCOperand::createImm(Val: Shifter)); // Shift value and ty
10615 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // CondCode
10616 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
10617 TmpInst.addOperand(Op: Inst.getOperand(i: 5)); // cc_out
10618 Inst = TmpInst;
10619 return true;
10620 }
10621 case ARM::RRXi: {
10622 unsigned Shifter = ARM_AM::getSORegOpc(ShOp: ARM_AM::rrx, Imm: 0);
10623 MCInst TmpInst;
10624 TmpInst.setOpcode(ARM::MOVsi);
10625 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Rd
10626 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
10627 TmpInst.addOperand(Op: MCOperand::createImm(Val: Shifter)); // Shift value and ty
10628 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // CondCode
10629 TmpInst.addOperand(Op: Inst.getOperand(i: 3));
10630 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // cc_out
10631 Inst = TmpInst;
10632 return true;
10633 }
10634 case ARM::t2LDMIA_UPD: {
10635 // If this is a load of a single register, then we should use
10636 // a post-indexed LDR instruction instead, per the ARM ARM.
10637 if (Inst.getNumOperands() != 5)
10638 return false;
10639 MCInst TmpInst;
10640 TmpInst.setOpcode(ARM::t2LDR_POST);
10641 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // Rt
10642 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Rn_wb
10643 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
10644 TmpInst.addOperand(Op: MCOperand::createImm(Val: 4));
10645 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // CondCode
10646 TmpInst.addOperand(Op: Inst.getOperand(i: 3));
10647 Inst = TmpInst;
10648 return true;
10649 }
10650 case ARM::t2STMDB_UPD: {
10651 // If this is a store of a single register, then we should use
10652 // a pre-indexed STR instruction instead, per the ARM ARM.
10653 if (Inst.getNumOperands() != 5)
10654 return false;
10655 MCInst TmpInst;
10656 TmpInst.setOpcode(ARM::t2STR_PRE);
10657 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Rn_wb
10658 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // Rt
10659 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
10660 TmpInst.addOperand(Op: MCOperand::createImm(Val: -4));
10661 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // CondCode
10662 TmpInst.addOperand(Op: Inst.getOperand(i: 3));
10663 Inst = TmpInst;
10664 return true;
10665 }
10666 case ARM::LDMIA_UPD:
10667 // If this is a load of a single register via a 'pop', then we should use
10668 // a post-indexed LDR instruction instead, per the ARM ARM.
10669 if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "pop" &&
10670 Inst.getNumOperands() == 5) {
10671 MCInst TmpInst;
10672 TmpInst.setOpcode(ARM::LDR_POST_IMM);
10673 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // Rt
10674 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Rn_wb
10675 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // Rn
10676 TmpInst.addOperand(Op: MCOperand::createReg(Reg: 0)); // am2offset
10677 TmpInst.addOperand(Op: MCOperand::createImm(Val: 4));
10678 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // CondCode
10679 TmpInst.addOperand(Op: Inst.getOperand(i: 3));
10680 Inst = TmpInst;
10681 return true;
10682 }
10683 break;
10684 case ARM::STMDB_UPD:
10685 // If this is a store of a single register via a 'push', then we should use
10686 // a pre-indexed STR instruction instead, per the ARM ARM.
10687 if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "push" &&
10688 Inst.getNumOperands() == 5) {
10689 MCInst TmpInst;
10690 TmpInst.setOpcode(ARM::STR_PRE_IMM);
10691 TmpInst.addOperand(Op: Inst.getOperand(i: 0)); // Rn_wb
10692 TmpInst.addOperand(Op: Inst.getOperand(i: 4)); // Rt
10693 TmpInst.addOperand(Op: Inst.getOperand(i: 1)); // addrmode_imm12
10694 TmpInst.addOperand(Op: MCOperand::createImm(Val: -4));
10695 TmpInst.addOperand(Op: Inst.getOperand(i: 2)); // CondCode
10696 TmpInst.addOperand(Op: Inst.getOperand(i: 3));
10697 Inst = TmpInst;
10698 }
10699 break;
10700 case ARM::t2ADDri12:
10701 case ARM::t2SUBri12:
10702 case ARM::t2ADDspImm12:
10703 case ARM::t2SUBspImm12: {
10704 // If the immediate fits for encoding T3 and the generic
10705 // mnemonic was used, encoding T3 is preferred.
10706 const StringRef Token = static_cast<ARMOperand &>(*Operands[0]).getToken();
10707 if ((Token != "add" && Token != "sub") ||
10708 ARM_AM::getT2SOImmVal(Arg: Inst.getOperand(i: 2).getImm()) == -1)
10709 break;
10710 switch (Inst.getOpcode()) {
10711 case ARM::t2ADDri12:
10712 Inst.setOpcode(ARM::t2ADDri);
10713 break;
10714 case ARM::t2SUBri12:
10715 Inst.setOpcode(ARM::t2SUBri);
10716 break;
10717 case ARM::t2ADDspImm12:
10718 Inst.setOpcode(ARM::t2ADDspImm);
10719 break;
10720 case ARM::t2SUBspImm12:
10721 Inst.setOpcode(ARM::t2SUBspImm);
10722 break;
10723 }
10724
10725 Inst.addOperand(Op: MCOperand::createReg(Reg: 0)); // cc_out
10726 return true;
10727 }
10728 case ARM::tADDi8:
10729 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
10730 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
10731 // to encoding T2 if <Rd> is specified and encoding T2 is preferred
10732 // to encoding T1 if <Rd> is omitted."
10733 if (Inst.getOperand(i: 3).isImm() &&
10734 (unsigned)Inst.getOperand(i: 3).getImm() < 8 &&
10735 Operands.size() == MnemonicOpsEndInd + 3) {
10736 Inst.setOpcode(ARM::tADDi3);
10737 return true;
10738 }
10739 break;
10740 case ARM::tSUBi8:
10741 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
10742 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
10743 // to encoding T2 if <Rd> is specified and encoding T2 is preferred
10744 // to encoding T1 if <Rd> is omitted."
10745 if ((unsigned)Inst.getOperand(i: 3).getImm() < 8 &&
10746 Operands.size() == MnemonicOpsEndInd + 3) {
10747 Inst.setOpcode(ARM::tSUBi3);
10748 return true;
10749 }
10750 break;
10751 case ARM::t2ADDri:
10752 case ARM::t2SUBri: {
10753 // If the destination and first source operand are the same, and
10754 // the flags are compatible with the current IT status, use encoding T2
10755 // instead of T3. For compatibility with the system 'as'. Make sure the
10756 // wide encoding wasn't explicit.
10757 if (HasWideQualifier)
10758 break; // source code has asked for the 32-bit instruction
10759 if (Inst.getOperand(i: 0).getReg() != Inst.getOperand(i: 1).getReg())
10760 break; // tADDi8 can't take different input and output registers
10761 if (!isARMLowRegister(Reg: Inst.getOperand(i: 0).getReg()))
10762 break; // high register that tADDi8 can't access
10763 if (Inst.getOperand(i: 5).getReg() !=
10764 (inITBlock() ? ARM::NoRegister : ARM::CPSR))
10765 break; // flag-modification would require overriding the IT state
10766 if (Inst.getOperand(i: 2).isImm()) {
10767 if ((unsigned)Inst.getOperand(i: 2).getImm() > 255)
10768 break; // large immediate that tADDi8 can't contain
10769 } else {
10770 int i = (Operands[MnemonicOpsEndInd + 1]->isImm())
10771 ? MnemonicOpsEndInd + 1
10772 : MnemonicOpsEndInd + 2;
10773 MCParsedAsmOperand &Op = *Operands[i];
10774 if (isARMMCExpr(MCOp&: Op) && !isThumbI8Relocation(MCOp&: Op))
10775 break; // a type of non-immediate that tADDi8 can't represent
10776 }
10777 MCInst TmpInst;
10778 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
10779 ARM::tADDi8 : ARM::tSUBi8);
10780 TmpInst.addOperand(Op: Inst.getOperand(i: 0));
10781 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
10782 TmpInst.addOperand(Op: Inst.getOperand(i: 0));
10783 TmpInst.addOperand(Op: Inst.getOperand(i: 2));
10784 TmpInst.addOperand(Op: Inst.getOperand(i: 3));
10785 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
10786 Inst = TmpInst;
10787 return true;
10788 }
10789 case ARM::t2ADDspImm:
10790 case ARM::t2SUBspImm: {
10791 // Prefer T1 encoding if possible
10792 if (Inst.getOperand(i: 5).getReg() || HasWideQualifier)
10793 break;
10794 unsigned V = Inst.getOperand(i: 2).getImm();
10795 if (V & 3 || V > ((1 << 7) - 1) << 2)
10796 break;
10797 MCInst TmpInst;
10798 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDspImm ? ARM::tADDspi
10799 : ARM::tSUBspi);
10800 TmpInst.addOperand(Op: MCOperand::createReg(Reg: ARM::SP)); // destination reg
10801 TmpInst.addOperand(Op: MCOperand::createReg(Reg: ARM::SP)); // source reg
10802 TmpInst.addOperand(Op: MCOperand::createImm(Val: V / 4)); // immediate
10803 TmpInst.addOperand(Op: Inst.getOperand(i: 3)); // pred
10804 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
10805 Inst = TmpInst;
10806 return true;
10807 }
10808 case ARM::t2ADDrr: {
10809 // If the destination and first source operand are the same, and
10810 // there's no setting of the flags, use encoding T2 instead of T3.
10811 // Note that this is only for ADD, not SUB. This mirrors the system
10812 // 'as' behaviour. Also take advantage of ADD being commutative.
10813 // Make sure the wide encoding wasn't explicit.
10814 bool Swap = false;
10815 auto DestReg = Inst.getOperand(i: 0).getReg();
10816 bool Transform = DestReg == Inst.getOperand(i: 1).getReg();
10817 if (!Transform && DestReg == Inst.getOperand(i: 2).getReg()) {
10818 Transform = true;
10819 Swap = true;
10820 }
10821 if (!Transform || Inst.getOperand(i: 5).getReg() || HasWideQualifier)
10822 break;
10823 MCInst TmpInst;
10824 TmpInst.setOpcode(ARM::tADDhirr);
10825 TmpInst.addOperand(Op: Inst.getOperand(i: 0));
10826 TmpInst.addOperand(Op: Inst.getOperand(i: 0));
10827 TmpInst.addOperand(Op: Inst.getOperand(i: Swap ? 1 : 2));
10828 TmpInst.addOperand(Op: Inst.getOperand(i: 3));
10829 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
10830 Inst = TmpInst;
10831 return true;
10832 }
10833 case ARM::tADDrSP:
10834 // If the non-SP source operand and the destination operand are not the
10835 // same, we need to use the 32-bit encoding if it's available.
10836 if (Inst.getOperand(i: 0).getReg() != Inst.getOperand(i: 2).getReg()) {
10837 Inst.setOpcode(ARM::t2ADDrr);
10838 Inst.addOperand(Op: MCOperand::createReg(Reg: 0)); // cc_out
10839 return true;
10840 }
10841 break;
10842 case ARM::tB:
10843 // A Thumb conditional branch outside of an IT block is a tBcc.
10844 if (Inst.getOperand(i: 1).getImm() != ARMCC::AL && !inITBlock()) {
10845 Inst.setOpcode(ARM::tBcc);
10846 return true;
10847 }
10848 break;
10849 case ARM::t2B:
10850 // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
10851 if (Inst.getOperand(i: 1).getImm() != ARMCC::AL && !inITBlock()){
10852 Inst.setOpcode(ARM::t2Bcc);
10853 return true;
10854 }
10855 break;
10856 case ARM::t2Bcc:
10857 // If the conditional is AL or we're in an IT block, we really want t2B.
10858 if (Inst.getOperand(i: 1).getImm() == ARMCC::AL || inITBlock()) {
10859 Inst.setOpcode(ARM::t2B);
10860 return true;
10861 }
10862 break;
10863 case ARM::tBcc:
10864 // If the conditional is AL, we really want tB.
10865 if (Inst.getOperand(i: 1).getImm() == ARMCC::AL) {
10866 Inst.setOpcode(ARM::tB);
10867 return true;
10868 }
10869 break;
10870 case ARM::tLDMIA: {
10871 // If the register list contains any high registers, or if the writeback
10872 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
10873 // instead if we're in Thumb2. Otherwise, this should have generated
10874 // an error in validateInstruction().
10875 MCRegister Rn = Inst.getOperand(i: 0).getReg();
10876 bool hasWritebackToken =
10877 (static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
10878 .isToken() &&
10879 static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
10880 .getToken() == "!");
10881 bool listContainsBase;
10882 if (checkLowRegisterList(Inst, OpNo: 3, Reg: Rn, HiReg: MCRegister(), containsReg&: listContainsBase) ||
10883 (!listContainsBase && !hasWritebackToken) ||
10884 (listContainsBase && hasWritebackToken)) {
10885 // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
10886 assert(isThumbTwo());
10887 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
10888 // If we're switching to the updating version, we need to insert
10889 // the writeback tied operand.
10890 if (hasWritebackToken)
10891 Inst.insert(I: Inst.begin(),
10892 Op: MCOperand::createReg(Reg: Inst.getOperand(i: 0).getReg()));
10893 return true;
10894 }
10895 break;
10896 }
10897 case ARM::tSTMIA_UPD: {
10898 // If the register list contains any high registers, we need to use
10899 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
10900 // should have generated an error in validateInstruction().
10901 MCRegister Rn = Inst.getOperand(i: 0).getReg();
10902 bool listContainsBase;
10903 if (checkLowRegisterList(Inst, OpNo: 4, Reg: Rn, HiReg: MCRegister(), containsReg&: listContainsBase)) {
10904 // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
10905 assert(isThumbTwo());
10906 Inst.setOpcode(ARM::t2STMIA_UPD);
10907 return true;
10908 }
10909 break;
10910 }
10911 case ARM::tPOP: {
10912 bool listContainsBase;
10913 // If the register list contains any high registers, we need to use
10914 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
10915 // should have generated an error in validateInstruction().
10916 if (!checkLowRegisterList(Inst, OpNo: 2, Reg: MCRegister(), HiReg: ARM::PC, containsReg&: listContainsBase))
10917 return false;
10918 assert(isThumbTwo());
10919 Inst.setOpcode(ARM::t2LDMIA_UPD);
10920 // Add the base register and writeback operands.
10921 Inst.insert(I: Inst.begin(), Op: MCOperand::createReg(Reg: ARM::SP));
10922 Inst.insert(I: Inst.begin(), Op: MCOperand::createReg(Reg: ARM::SP));
10923 return true;
10924 }
10925 case ARM::tPUSH: {
10926 bool listContainsBase;
10927 if (!checkLowRegisterList(Inst, OpNo: 2, Reg: MCRegister(), HiReg: ARM::LR, containsReg&: listContainsBase))
10928 return false;
10929 assert(isThumbTwo());
10930 Inst.setOpcode(ARM::t2STMDB_UPD);
10931 // Add the base register and writeback operands.
10932 Inst.insert(I: Inst.begin(), Op: MCOperand::createReg(Reg: ARM::SP));
10933 Inst.insert(I: Inst.begin(), Op: MCOperand::createReg(Reg: ARM::SP));
10934 return true;
10935 }
10936 case ARM::t2MOVi:
10937 // If we can use the 16-bit encoding and the user didn't explicitly
10938 // request the 32-bit variant, transform it here.
10939 if (isARMLowRegister(Reg: Inst.getOperand(i: 0).getReg()) &&
10940 (Inst.getOperand(i: 1).isImm() &&
10941 (unsigned)Inst.getOperand(i: 1).getImm() <= 255) &&
10942 Inst.getOperand(i: 4).getReg() ==
10943 (inITBlock() ? ARM::NoRegister : ARM::CPSR) &&
10944 !HasWideQualifier) {
10945 // The operands aren't in the same order for tMOVi8...
10946 MCInst TmpInst;
10947 TmpInst.setOpcode(ARM::tMOVi8);
10948 TmpInst.addOperand(Op: Inst.getOperand(i: 0));
10949 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
10950 TmpInst.addOperand(Op: Inst.getOperand(i: 1));
10951 TmpInst.addOperand(Op: Inst.getOperand(i: 2));
10952 TmpInst.addOperand(Op: Inst.getOperand(i: 3));
10953 Inst = TmpInst;
10954 return true;
10955 }
10956 break;
10957
10958 case ARM::t2MOVr:
10959 // If we can use the 16-bit encoding and the user didn't explicitly
10960 // request the 32-bit variant, transform it here.
10961 if (isARMLowRegister(Reg: Inst.getOperand(i: 0).getReg()) &&
10962 isARMLowRegister(Reg: Inst.getOperand(i: 1).getReg()) &&
10963 Inst.getOperand(i: 2).getImm() == ARMCC::AL &&
10964 Inst.getOperand(i: 4).getReg() == ARM::CPSR &&
10965 !HasWideQualifier) {
10966 // The operands aren't the same for tMOV[S]r... (no cc_out)
10967 MCInst TmpInst;
10968 unsigned Op = Inst.getOperand(i: 4).getReg() ? ARM::tMOVSr : ARM::tMOVr;
10969 TmpInst.setOpcode(Op);
10970 TmpInst.addOperand(Op: Inst.getOperand(i: 0));
10971 TmpInst.addOperand(Op: Inst.getOperand(i: 1));
10972 if (Op == ARM::tMOVr) {
10973 TmpInst.addOperand(Op: Inst.getOperand(i: 2));
10974 TmpInst.addOperand(Op: Inst.getOperand(i: 3));
10975 }
10976 Inst = TmpInst;
10977 return true;
10978 }
10979 break;
10980
10981 case ARM::t2SXTH:
10982 case ARM::t2SXTB:
10983 case ARM::t2UXTH:
10984 case ARM::t2UXTB:
10985 // If we can use the 16-bit encoding and the user didn't explicitly
10986 // request the 32-bit variant, transform it here.
10987 if (isARMLowRegister(Reg: Inst.getOperand(i: 0).getReg()) &&
10988 isARMLowRegister(Reg: Inst.getOperand(i: 1).getReg()) &&
10989 Inst.getOperand(i: 2).getImm() == 0 &&
10990 !HasWideQualifier) {
10991 unsigned NewOpc;
10992 switch (Inst.getOpcode()) {
10993 default: llvm_unreachable("Illegal opcode!");
10994 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
10995 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
10996 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
10997 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
10998 }
10999 // The operands aren't the same for thumb1 (no rotate operand).
11000 MCInst TmpInst;
11001 TmpInst.setOpcode(NewOpc);
11002 TmpInst.addOperand(Op: Inst.getOperand(i: 0));
11003 TmpInst.addOperand(Op: Inst.getOperand(i: 1));
11004 TmpInst.addOperand(Op: Inst.getOperand(i: 3));
11005 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
11006 Inst = TmpInst;
11007 return true;
11008 }
11009 break;
11010
11011 case ARM::MOVsi: {
11012 ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Op: Inst.getOperand(i: 2).getImm());
11013 // rrx shifts and asr/lsr of #32 is encoded as 0
11014 if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
11015 return false;
11016 if (ARM_AM::getSORegOffset(Op: Inst.getOperand(i: 2).getImm()) == 0) {
11017 // Shifting by zero is accepted as a vanilla 'MOVr'
11018 MCInst TmpInst;
11019 TmpInst.setOpcode(ARM::MOVr);
11020 TmpInst.addOperand(Op: Inst.getOperand(i: 0));
11021 TmpInst.addOperand(Op: Inst.getOperand(i: 1));
11022 TmpInst.addOperand(Op: Inst.getOperand(i: 3));
11023 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
11024 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
11025 Inst = TmpInst;
11026 return true;
11027 }
11028 return false;
11029 }
11030 case ARM::ANDrsi:
11031 case ARM::ORRrsi:
11032 case ARM::EORrsi:
11033 case ARM::BICrsi:
11034 case ARM::SUBrsi:
11035 case ARM::ADDrsi: {
11036 unsigned newOpc;
11037 ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Op: Inst.getOperand(i: 3).getImm());
11038 if (SOpc == ARM_AM::rrx) return false;
11039 switch (Inst.getOpcode()) {
11040 default: llvm_unreachable("unexpected opcode!");
11041 case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
11042 case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
11043 case ARM::EORrsi: newOpc = ARM::EORrr; break;
11044 case ARM::BICrsi: newOpc = ARM::BICrr; break;
11045 case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
11046 case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
11047 }
11048 // If the shift is by zero, use the non-shifted instruction definition.
11049 // The exception is for right shifts, where 0 == 32
11050 if (ARM_AM::getSORegOffset(Op: Inst.getOperand(i: 3).getImm()) == 0 &&
11051 !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
11052 MCInst TmpInst;
11053 TmpInst.setOpcode(newOpc);
11054 TmpInst.addOperand(Op: Inst.getOperand(i: 0));
11055 TmpInst.addOperand(Op: Inst.getOperand(i: 1));
11056 TmpInst.addOperand(Op: Inst.getOperand(i: 2));
11057 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
11058 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
11059 TmpInst.addOperand(Op: Inst.getOperand(i: 6));
11060 Inst = TmpInst;
11061 return true;
11062 }
11063 return false;
11064 }
11065 case ARM::ITasm:
11066 case ARM::t2IT: {
11067 // Set up the IT block state according to the IT instruction we just
11068 // matched.
11069 assert(!inITBlock() && "nested IT blocks?!");
11070 startExplicitITBlock(Cond: ARMCC::CondCodes(Inst.getOperand(i: 0).getImm()),
11071 Mask: Inst.getOperand(i: 1).getImm());
11072 break;
11073 }
11074 case ARM::t2LSLrr:
11075 case ARM::t2LSRrr:
11076 case ARM::t2ASRrr:
11077 case ARM::t2SBCrr:
11078 case ARM::t2RORrr:
11079 case ARM::t2BICrr:
11080 // Assemblers should use the narrow encodings of these instructions when permissible.
11081 if ((isARMLowRegister(Reg: Inst.getOperand(i: 1).getReg()) &&
11082 isARMLowRegister(Reg: Inst.getOperand(i: 2).getReg())) &&
11083 Inst.getOperand(i: 0).getReg() == Inst.getOperand(i: 1).getReg() &&
11084 Inst.getOperand(i: 5).getReg() ==
11085 (inITBlock() ? ARM::NoRegister : ARM::CPSR) &&
11086 !HasWideQualifier) {
11087 unsigned NewOpc;
11088 switch (Inst.getOpcode()) {
11089 default: llvm_unreachable("unexpected opcode");
11090 case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
11091 case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
11092 case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
11093 case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
11094 case ARM::t2RORrr: NewOpc = ARM::tROR; break;
11095 case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
11096 }
11097 MCInst TmpInst;
11098 TmpInst.setOpcode(NewOpc);
11099 TmpInst.addOperand(Op: Inst.getOperand(i: 0));
11100 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
11101 TmpInst.addOperand(Op: Inst.getOperand(i: 1));
11102 TmpInst.addOperand(Op: Inst.getOperand(i: 2));
11103 TmpInst.addOperand(Op: Inst.getOperand(i: 3));
11104 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
11105 Inst = TmpInst;
11106 return true;
11107 }
11108 return false;
11109
11110 case ARM::t2ANDrr:
11111 case ARM::t2EORrr:
11112 case ARM::t2ADCrr:
11113 case ARM::t2ORRrr:
11114 // Assemblers should use the narrow encodings of these instructions when permissible.
11115 // These instructions are special in that they are commutable, so shorter encodings
11116 // are available more often.
11117 if ((isARMLowRegister(Reg: Inst.getOperand(i: 1).getReg()) &&
11118 isARMLowRegister(Reg: Inst.getOperand(i: 2).getReg())) &&
11119 (Inst.getOperand(i: 0).getReg() == Inst.getOperand(i: 1).getReg() ||
11120 Inst.getOperand(i: 0).getReg() == Inst.getOperand(i: 2).getReg()) &&
11121 Inst.getOperand(i: 5).getReg() ==
11122 (inITBlock() ? ARM::NoRegister : ARM::CPSR) &&
11123 !HasWideQualifier) {
11124 unsigned NewOpc;
11125 switch (Inst.getOpcode()) {
11126 default: llvm_unreachable("unexpected opcode");
11127 case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
11128 case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
11129 case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
11130 case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
11131 }
11132 MCInst TmpInst;
11133 TmpInst.setOpcode(NewOpc);
11134 TmpInst.addOperand(Op: Inst.getOperand(i: 0));
11135 TmpInst.addOperand(Op: Inst.getOperand(i: 5));
11136 if (Inst.getOperand(i: 0).getReg() == Inst.getOperand(i: 1).getReg()) {
11137 TmpInst.addOperand(Op: Inst.getOperand(i: 1));
11138 TmpInst.addOperand(Op: Inst.getOperand(i: 2));
11139 } else {
11140 TmpInst.addOperand(Op: Inst.getOperand(i: 2));
11141 TmpInst.addOperand(Op: Inst.getOperand(i: 1));
11142 }
11143 TmpInst.addOperand(Op: Inst.getOperand(i: 3));
11144 TmpInst.addOperand(Op: Inst.getOperand(i: 4));
11145 Inst = TmpInst;
11146 return true;
11147 }
11148 return false;
11149 case ARM::MVE_VPST:
11150 case ARM::MVE_VPTv16i8:
11151 case ARM::MVE_VPTv8i16:
11152 case ARM::MVE_VPTv4i32:
11153 case ARM::MVE_VPTv16u8:
11154 case ARM::MVE_VPTv8u16:
11155 case ARM::MVE_VPTv4u32:
11156 case ARM::MVE_VPTv16s8:
11157 case ARM::MVE_VPTv8s16:
11158 case ARM::MVE_VPTv4s32:
11159 case ARM::MVE_VPTv4f32:
11160 case ARM::MVE_VPTv8f16:
11161 case ARM::MVE_VPTv16i8r:
11162 case ARM::MVE_VPTv8i16r:
11163 case ARM::MVE_VPTv4i32r:
11164 case ARM::MVE_VPTv16u8r:
11165 case ARM::MVE_VPTv8u16r:
11166 case ARM::MVE_VPTv4u32r:
11167 case ARM::MVE_VPTv16s8r:
11168 case ARM::MVE_VPTv8s16r:
11169 case ARM::MVE_VPTv4s32r:
11170 case ARM::MVE_VPTv4f32r:
11171 case ARM::MVE_VPTv8f16r: {
11172 assert(!inVPTBlock() && "Nested VPT blocks are not allowed");
11173 MCOperand &MO = Inst.getOperand(i: 0);
11174 VPTState.Mask = MO.getImm();
11175 VPTState.CurPosition = 0;
11176 break;
11177 }
11178 }
11179 return false;
11180}
11181
11182unsigned
11183ARMAsmParser::checkEarlyTargetMatchPredicate(MCInst &Inst,
11184 const OperandVector &Operands) {
11185 unsigned Opc = Inst.getOpcode();
11186 switch (Opc) {
11187 // Prevent the mov r8 r8 encoding for nop being selected when the v6/thumb 2
11188 // encoding is available.
11189 case ARM::tMOVr: {
11190 if (Operands[0]->isToken() &&
11191 static_cast<ARMOperand &>(*Operands[0]).getToken() == "nop" &&
11192 ((isThumb() && !isThumbOne()) || hasV6MOps())) {
11193 return Match_MnemonicFail;
11194 }
11195 }
11196 [[fallthrough]];
11197 default:
11198 return Match_Success;
11199 }
11200}
11201
11202unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
11203 // 16-bit thumb arithmetic instructions either require or preclude the 'S'
11204 // suffix depending on whether they're in an IT block or not.
11205 unsigned Opc = Inst.getOpcode();
11206 const MCInstrDesc &MCID = MII.get(Opcode: Opc);
11207 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
11208 assert(MCID.hasOptionalDef() &&
11209 "optionally flag setting instruction missing optional def operand");
11210 assert(MCID.NumOperands == Inst.getNumOperands() &&
11211 "operand count mismatch!");
11212 bool IsCPSR = false;
11213 // Check if the instruction has CPSR set.
11214 for (unsigned OpNo = 0; OpNo < MCID.NumOperands; ++OpNo) {
11215 if (MCID.operands()[OpNo].isOptionalDef() &&
11216 Inst.getOperand(i: OpNo).isReg() &&
11217 Inst.getOperand(i: OpNo).getReg() == ARM::CPSR)
11218 IsCPSR = true;
11219 }
11220
11221 // If we're parsing Thumb1, reject it completely.
11222 if (isThumbOne() && !IsCPSR)
11223 return Match_RequiresFlagSetting;
11224 // If we're parsing Thumb2, which form is legal depends on whether we're
11225 // in an IT block.
11226 if (isThumbTwo() && !IsCPSR && !inITBlock())
11227 return Match_RequiresITBlock;
11228 if (isThumbTwo() && IsCPSR && inITBlock())
11229 return Match_RequiresNotITBlock;
11230 // LSL with zero immediate is not allowed in an IT block
11231 if (Opc == ARM::tLSLri && Inst.getOperand(i: 3).getImm() == 0 && inITBlock())
11232 return Match_RequiresNotITBlock;
11233 } else if (isThumbOne()) {
11234 // Some high-register supporting Thumb1 encodings only allow both registers
11235 // to be from r0-r7 when in Thumb2.
11236 if (Opc == ARM::tADDhirr && !hasV6MOps() &&
11237 isARMLowRegister(Reg: Inst.getOperand(i: 1).getReg()) &&
11238 isARMLowRegister(Reg: Inst.getOperand(i: 2).getReg()))
11239 return Match_RequiresThumb2;
11240 // Others only require ARMv6 or later.
11241 else if (Opc == ARM::tMOVr && !hasV6Ops() &&
11242 isARMLowRegister(Reg: Inst.getOperand(i: 0).getReg()) &&
11243 isARMLowRegister(Reg: Inst.getOperand(i: 1).getReg()))
11244 return Match_RequiresV6;
11245 }
11246
11247 // Before ARMv8 the rules for when SP is allowed in t2MOVr are more complex
11248 // than the loop below can handle, so it uses the GPRnopc register class and
11249 // we do SP handling here.
11250 if (Opc == ARM::t2MOVr && !hasV8Ops())
11251 {
11252 // SP as both source and destination is not allowed
11253 if (Inst.getOperand(i: 0).getReg() == ARM::SP &&
11254 Inst.getOperand(i: 1).getReg() == ARM::SP)
11255 return Match_RequiresV8;
11256 // When flags-setting SP as either source or destination is not allowed
11257 if (Inst.getOperand(i: 4).getReg() == ARM::CPSR &&
11258 (Inst.getOperand(i: 0).getReg() == ARM::SP ||
11259 Inst.getOperand(i: 1).getReg() == ARM::SP))
11260 return Match_RequiresV8;
11261 }
11262
11263 switch (Inst.getOpcode()) {
11264 case ARM::VMRS:
11265 case ARM::VMSR:
11266 case ARM::VMRS_FPCXTS:
11267 case ARM::VMRS_FPCXTNS:
11268 case ARM::VMSR_FPCXTS:
11269 case ARM::VMSR_FPCXTNS:
11270 case ARM::VMRS_FPSCR_NZCVQC:
11271 case ARM::VMSR_FPSCR_NZCVQC:
11272 case ARM::FMSTAT:
11273 case ARM::VMRS_VPR:
11274 case ARM::VMRS_P0:
11275 case ARM::VMSR_VPR:
11276 case ARM::VMSR_P0:
11277 // Use of SP for VMRS/VMSR is only allowed in ARM mode with the exception of
11278 // ARMv8-A.
11279 if (Inst.getOperand(i: 0).isReg() && Inst.getOperand(i: 0).getReg() == ARM::SP &&
11280 (isThumb() && !hasV8Ops()))
11281 return Match_InvalidOperand;
11282 break;
11283 case ARM::t2TBB:
11284 case ARM::t2TBH:
11285 // Rn = sp is only allowed with ARMv8-A
11286 if (!hasV8Ops() && (Inst.getOperand(i: 0).getReg() == ARM::SP))
11287 return Match_RequiresV8;
11288 break;
11289 case ARM::tMUL:
11290 // The second source operand must be the same register as the destination
11291 // operand.
11292 // FIXME: Ideally this would be handled by ARMGenAsmMatcher and
11293 // emitAsmTiedOperandConstraints.
11294 if (Inst.getOperand(i: 0).getReg() != Inst.getOperand(i: 3).getReg())
11295 return Match_InvalidTiedOperand;
11296 break;
11297 default:
11298 break;
11299 }
11300
11301 for (unsigned I = 0; I < MCID.NumOperands; ++I)
11302 if (MCID.operands()[I].RegClass == ARM::rGPRRegClassID) {
11303 // rGPRRegClass excludes PC, and also excluded SP before ARMv8
11304 const auto &Op = Inst.getOperand(i: I);
11305 if (!Op.isReg()) {
11306 // This can happen in awkward cases with tied operands, e.g. a
11307 // writeback load/store with a complex addressing mode in
11308 // which there's an output operand corresponding to the
11309 // updated written-back base register: the Tablegen-generated
11310 // AsmMatcher will have written a placeholder operand to that
11311 // slot in the form of an immediate 0, because it can't
11312 // generate the register part of the complex addressing-mode
11313 // operand ahead of time.
11314 continue;
11315 }
11316
11317 MCRegister Reg = Op.getReg();
11318 if ((Reg == ARM::SP) && !hasV8Ops())
11319 return Match_RequiresV8;
11320 else if (Reg == ARM::PC)
11321 return Match_InvalidOperand;
11322 }
11323
11324 return Match_Success;
11325}
11326
11327namespace llvm {
11328
11329template <> inline bool IsCPSRDead<MCInst>(const MCInst *Instr) {
11330 return true; // In an assembly source, no need to second-guess
11331}
11332
11333} // end namespace llvm
11334
11335// Returns true if Inst is unpredictable if it is in and IT block, but is not
11336// the last instruction in the block.
11337bool ARMAsmParser::isITBlockTerminator(MCInst &Inst) const {
11338 const MCInstrDesc &MCID = MII.get(Opcode: Inst.getOpcode());
11339
11340 // All branch & call instructions terminate IT blocks with the exception of
11341 // SVC.
11342 if (MCID.isTerminator() || (MCID.isCall() && Inst.getOpcode() != ARM::tSVC) ||
11343 MCID.isReturn() || MCID.isBranch() || MCID.isIndirectBranch())
11344 return true;
11345
11346 // Any arithmetic instruction which writes to the PC also terminates the IT
11347 // block.
11348 if (MCID.hasDefOfPhysReg(MI: Inst, Reg: ARM::PC, RI: *MRI))
11349 return true;
11350
11351 return false;
11352}
11353
11354unsigned ARMAsmParser::MatchInstruction(OperandVector &Operands, MCInst &Inst,
11355 SmallVectorImpl<NearMissInfo> &NearMisses,
11356 bool MatchingInlineAsm,
11357 bool &EmitInITBlock,
11358 MCStreamer &Out) {
11359 // If we can't use an implicit IT block here, just match as normal.
11360 if (inExplicitITBlock() || !isThumbTwo() || !useImplicitITThumb())
11361 return MatchInstructionImpl(Operands, Inst, NearMisses: &NearMisses, matchingInlineAsm: MatchingInlineAsm);
11362
11363 // Try to match the instruction in an extension of the current IT block (if
11364 // there is one).
11365 if (inImplicitITBlock()) {
11366 extendImplicitITBlock(Cond: ITState.Cond);
11367 if (MatchInstructionImpl(Operands, Inst, NearMisses: nullptr, matchingInlineAsm: MatchingInlineAsm) ==
11368 Match_Success) {
11369 // The match succeded, but we still have to check that the instruction is
11370 // valid in this implicit IT block.
11371 const MCInstrDesc &MCID = MII.get(Opcode: Inst.getOpcode());
11372 if (MCID.isPredicable()) {
11373 ARMCC::CondCodes InstCond =
11374 (ARMCC::CondCodes)Inst.getOperand(i: MCID.findFirstPredOperandIdx())
11375 .getImm();
11376 ARMCC::CondCodes ITCond = currentITCond();
11377 if (InstCond == ITCond) {
11378 EmitInITBlock = true;
11379 return Match_Success;
11380 } else if (InstCond == ARMCC::getOppositeCondition(CC: ITCond)) {
11381 invertCurrentITCondition();
11382 EmitInITBlock = true;
11383 return Match_Success;
11384 }
11385 }
11386 }
11387 rewindImplicitITPosition();
11388 }
11389
11390 // Finish the current IT block, and try to match outside any IT block.
11391 flushPendingInstructions(Out);
11392 unsigned PlainMatchResult =
11393 MatchInstructionImpl(Operands, Inst, NearMisses: &NearMisses, matchingInlineAsm: MatchingInlineAsm);
11394 if (PlainMatchResult == Match_Success) {
11395 const MCInstrDesc &MCID = MII.get(Opcode: Inst.getOpcode());
11396 if (MCID.isPredicable()) {
11397 ARMCC::CondCodes InstCond =
11398 (ARMCC::CondCodes)Inst.getOperand(i: MCID.findFirstPredOperandIdx())
11399 .getImm();
11400 // Some forms of the branch instruction have their own condition code
11401 // fields, so can be conditionally executed without an IT block.
11402 if (Inst.getOpcode() == ARM::tBcc || Inst.getOpcode() == ARM::t2Bcc) {
11403 EmitInITBlock = false;
11404 return Match_Success;
11405 }
11406 if (InstCond == ARMCC::AL) {
11407 EmitInITBlock = false;
11408 return Match_Success;
11409 }
11410 } else {
11411 EmitInITBlock = false;
11412 return Match_Success;
11413 }
11414 }
11415
11416 // Try to match in a new IT block. The matcher doesn't check the actual
11417 // condition, so we create an IT block with a dummy condition, and fix it up
11418 // once we know the actual condition.
11419 startImplicitITBlock();
11420 if (MatchInstructionImpl(Operands, Inst, NearMisses: nullptr, matchingInlineAsm: MatchingInlineAsm) ==
11421 Match_Success) {
11422 const MCInstrDesc &MCID = MII.get(Opcode: Inst.getOpcode());
11423 if (MCID.isPredicable()) {
11424 ITState.Cond =
11425 (ARMCC::CondCodes)Inst.getOperand(i: MCID.findFirstPredOperandIdx())
11426 .getImm();
11427 EmitInITBlock = true;
11428 return Match_Success;
11429 }
11430 }
11431 discardImplicitITBlock();
11432
11433 // If none of these succeed, return the error we got when trying to match
11434 // outside any IT blocks.
11435 EmitInITBlock = false;
11436 return PlainMatchResult;
11437}
11438
11439static std::string ARMMnemonicSpellCheck(StringRef S, const FeatureBitset &FBS,
11440 unsigned VariantID = 0);
11441
11442static const char *getSubtargetFeatureName(uint64_t Val);
11443bool ARMAsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
11444 OperandVector &Operands,
11445 MCStreamer &Out, uint64_t &ErrorInfo,
11446 bool MatchingInlineAsm) {
11447 MCInst Inst;
11448 unsigned MatchResult;
11449 bool PendConditionalInstruction = false;
11450
11451 SmallVector<NearMissInfo, 4> NearMisses;
11452 MatchResult = MatchInstruction(Operands, Inst, NearMisses, MatchingInlineAsm,
11453 EmitInITBlock&: PendConditionalInstruction, Out);
11454
11455 // Find the number of operators that are part of the Mnumonic (LHS).
11456 unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
11457
11458 switch (MatchResult) {
11459 case Match_Success:
11460 LLVM_DEBUG(dbgs() << "Parsed as: ";
11461 Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));
11462 dbgs() << "\n");
11463
11464 // Context sensitive operand constraints aren't handled by the matcher,
11465 // so check them here.
11466 if (validateInstruction(Inst, Operands, MnemonicOpsEndInd)) {
11467 // Still progress the IT block, otherwise one wrong condition causes
11468 // nasty cascading errors.
11469 forwardITPosition();
11470 forwardVPTPosition();
11471 return true;
11472 }
11473
11474 {
11475 // Some instructions need post-processing to, for example, tweak which
11476 // encoding is selected. Loop on it while changes happen so the
11477 // individual transformations can chain off each other. E.g.,
11478 // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
11479 while (processInstruction(Inst, Operands, MnemonicOpsEndInd, Out))
11480 LLVM_DEBUG(dbgs() << "Changed to: ";
11481 Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));
11482 dbgs() << "\n");
11483 }
11484
11485 // Only move forward at the very end so that everything in validate
11486 // and process gets a consistent answer about whether we're in an IT
11487 // block.
11488 forwardITPosition();
11489 forwardVPTPosition();
11490
11491 // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
11492 // doesn't actually encode.
11493 if (Inst.getOpcode() == ARM::ITasm)
11494 return false;
11495
11496 Inst.setLoc(IDLoc);
11497 if (PendConditionalInstruction) {
11498 PendingConditionalInsts.push_back(Elt: Inst);
11499 if (isITBlockFull() || isITBlockTerminator(Inst))
11500 flushPendingInstructions(Out);
11501 } else {
11502 Out.emitInstruction(Inst, STI: getSTI());
11503 }
11504 return false;
11505 case Match_NearMisses:
11506 ReportNearMisses(NearMisses, IDLoc, Operands);
11507 return true;
11508 case Match_MnemonicFail: {
11509 FeatureBitset FBS = ComputeAvailableFeatures(FB: getSTI().getFeatureBits());
11510 std::string Suggestion = ARMMnemonicSpellCheck(
11511 S: ((ARMOperand &)*Operands[0]).getToken(), FBS);
11512 return Error(L: IDLoc, Msg: "invalid instruction" + Suggestion,
11513 Range: ((ARMOperand &)*Operands[0]).getLocRange());
11514 }
11515 }
11516
11517 llvm_unreachable("Implement any new match types added!");
11518}
11519
11520/// ParseDirective parses the arm specific directives
11521bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
11522 const MCContext::Environment Format = getContext().getObjectFileType();
11523 bool IsMachO = Format == MCContext::IsMachO;
11524 bool IsCOFF = Format == MCContext::IsCOFF;
11525
11526 std::string IDVal = DirectiveID.getIdentifier().lower();
11527 if (IDVal == ".word")
11528 parseLiteralValues(Size: 4, L: DirectiveID.getLoc());
11529 else if (IDVal == ".short" || IDVal == ".hword")
11530 parseLiteralValues(Size: 2, L: DirectiveID.getLoc());
11531 else if (IDVal == ".thumb")
11532 parseDirectiveThumb(L: DirectiveID.getLoc());
11533 else if (IDVal == ".arm")
11534 parseDirectiveARM(L: DirectiveID.getLoc());
11535 else if (IDVal == ".thumb_func")
11536 parseDirectiveThumbFunc(L: DirectiveID.getLoc());
11537 else if (IDVal == ".code")
11538 parseDirectiveCode(L: DirectiveID.getLoc());
11539 else if (IDVal == ".syntax")
11540 parseDirectiveSyntax(L: DirectiveID.getLoc());
11541 else if (IDVal == ".unreq")
11542 parseDirectiveUnreq(L: DirectiveID.getLoc());
11543 else if (IDVal == ".fnend")
11544 parseDirectiveFnEnd(L: DirectiveID.getLoc());
11545 else if (IDVal == ".cantunwind")
11546 parseDirectiveCantUnwind(L: DirectiveID.getLoc());
11547 else if (IDVal == ".personality")
11548 parseDirectivePersonality(L: DirectiveID.getLoc());
11549 else if (IDVal == ".handlerdata")
11550 parseDirectiveHandlerData(L: DirectiveID.getLoc());
11551 else if (IDVal == ".setfp")
11552 parseDirectiveSetFP(L: DirectiveID.getLoc());
11553 else if (IDVal == ".pad")
11554 parseDirectivePad(L: DirectiveID.getLoc());
11555 else if (IDVal == ".save")
11556 parseDirectiveRegSave(L: DirectiveID.getLoc(), IsVector: false);
11557 else if (IDVal == ".vsave")
11558 parseDirectiveRegSave(L: DirectiveID.getLoc(), IsVector: true);
11559 else if (IDVal == ".ltorg" || IDVal == ".pool")
11560 parseDirectiveLtorg(L: DirectiveID.getLoc());
11561 else if (IDVal == ".even")
11562 parseDirectiveEven(L: DirectiveID.getLoc());
11563 else if (IDVal == ".personalityindex")
11564 parseDirectivePersonalityIndex(L: DirectiveID.getLoc());
11565 else if (IDVal == ".unwind_raw")
11566 parseDirectiveUnwindRaw(L: DirectiveID.getLoc());
11567 else if (IDVal == ".movsp")
11568 parseDirectiveMovSP(L: DirectiveID.getLoc());
11569 else if (IDVal == ".arch_extension")
11570 parseDirectiveArchExtension(L: DirectiveID.getLoc());
11571 else if (IDVal == ".align")
11572 return parseDirectiveAlign(L: DirectiveID.getLoc()); // Use Generic on failure.
11573 else if (IDVal == ".thumb_set")
11574 parseDirectiveThumbSet(L: DirectiveID.getLoc());
11575 else if (IDVal == ".inst")
11576 parseDirectiveInst(L: DirectiveID.getLoc());
11577 else if (IDVal == ".inst.n")
11578 parseDirectiveInst(L: DirectiveID.getLoc(), Suffix: 'n');
11579 else if (IDVal == ".inst.w")
11580 parseDirectiveInst(L: DirectiveID.getLoc(), Suffix: 'w');
11581 else if (!IsMachO && !IsCOFF) {
11582 if (IDVal == ".arch")
11583 parseDirectiveArch(L: DirectiveID.getLoc());
11584 else if (IDVal == ".cpu")
11585 parseDirectiveCPU(L: DirectiveID.getLoc());
11586 else if (IDVal == ".eabi_attribute")
11587 parseDirectiveEabiAttr(L: DirectiveID.getLoc());
11588 else if (IDVal == ".fpu")
11589 parseDirectiveFPU(L: DirectiveID.getLoc());
11590 else if (IDVal == ".fnstart")
11591 parseDirectiveFnStart(L: DirectiveID.getLoc());
11592 else if (IDVal == ".object_arch")
11593 parseDirectiveObjectArch(L: DirectiveID.getLoc());
11594 else if (IDVal == ".tlsdescseq")
11595 parseDirectiveTLSDescSeq(L: DirectiveID.getLoc());
11596 else
11597 return true;
11598 } else if (IsCOFF) {
11599 if (IDVal == ".seh_stackalloc")
11600 parseDirectiveSEHAllocStack(L: DirectiveID.getLoc(), /*Wide=*/false);
11601 else if (IDVal == ".seh_stackalloc_w")
11602 parseDirectiveSEHAllocStack(L: DirectiveID.getLoc(), /*Wide=*/true);
11603 else if (IDVal == ".seh_save_regs")
11604 parseDirectiveSEHSaveRegs(L: DirectiveID.getLoc(), /*Wide=*/false);
11605 else if (IDVal == ".seh_save_regs_w")
11606 parseDirectiveSEHSaveRegs(L: DirectiveID.getLoc(), /*Wide=*/true);
11607 else if (IDVal == ".seh_save_sp")
11608 parseDirectiveSEHSaveSP(L: DirectiveID.getLoc());
11609 else if (IDVal == ".seh_save_fregs")
11610 parseDirectiveSEHSaveFRegs(L: DirectiveID.getLoc());
11611 else if (IDVal == ".seh_save_lr")
11612 parseDirectiveSEHSaveLR(L: DirectiveID.getLoc());
11613 else if (IDVal == ".seh_endprologue")
11614 parseDirectiveSEHPrologEnd(L: DirectiveID.getLoc(), /*Fragment=*/false);
11615 else if (IDVal == ".seh_endprologue_fragment")
11616 parseDirectiveSEHPrologEnd(L: DirectiveID.getLoc(), /*Fragment=*/true);
11617 else if (IDVal == ".seh_nop")
11618 parseDirectiveSEHNop(L: DirectiveID.getLoc(), /*Wide=*/false);
11619 else if (IDVal == ".seh_nop_w")
11620 parseDirectiveSEHNop(L: DirectiveID.getLoc(), /*Wide=*/true);
11621 else if (IDVal == ".seh_startepilogue")
11622 parseDirectiveSEHEpilogStart(L: DirectiveID.getLoc(), /*Condition=*/false);
11623 else if (IDVal == ".seh_startepilogue_cond")
11624 parseDirectiveSEHEpilogStart(L: DirectiveID.getLoc(), /*Condition=*/true);
11625 else if (IDVal == ".seh_endepilogue")
11626 parseDirectiveSEHEpilogEnd(L: DirectiveID.getLoc());
11627 else if (IDVal == ".seh_custom")
11628 parseDirectiveSEHCustom(L: DirectiveID.getLoc());
11629 else
11630 return true;
11631 } else
11632 return true;
11633 return false;
11634}
11635
11636/// parseLiteralValues
11637/// ::= .hword expression [, expression]*
11638/// ::= .short expression [, expression]*
11639/// ::= .word expression [, expression]*
11640bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) {
11641 auto parseOne = [&]() -> bool {
11642 const MCExpr *Value;
11643 if (getParser().parseExpression(Res&: Value))
11644 return true;
11645 getParser().getStreamer().emitValue(Value, Size, Loc: L);
11646 return false;
11647 };
11648 return (parseMany(parseOne));
11649}
11650
11651/// parseDirectiveThumb
11652/// ::= .thumb
11653bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
11654 if (parseEOL() || check(P: !hasThumb(), Loc: L, Msg: "target does not support Thumb mode"))
11655 return true;
11656
11657 if (!isThumb())
11658 SwitchMode();
11659
11660 getTargetStreamer().emitCode16();
11661 getParser().getStreamer().emitCodeAlignment(Alignment: Align(2), STI: &getSTI(), MaxBytesToEmit: 0);
11662 return false;
11663}
11664
11665/// parseDirectiveARM
11666/// ::= .arm
11667bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
11668 if (parseEOL() || check(P: !hasARM(), Loc: L, Msg: "target does not support ARM mode"))
11669 return true;
11670
11671 if (isThumb())
11672 SwitchMode();
11673 getTargetStreamer().emitCode32();
11674 getParser().getStreamer().emitCodeAlignment(Alignment: Align(4), STI: &getSTI(), MaxBytesToEmit: 0);
11675 return false;
11676}
11677
11678void ARMAsmParser::doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc) {
11679 // We need to flush the current implicit IT block on a label, because it is
11680 // not legal to branch into an IT block.
11681 flushPendingInstructions(Out&: getStreamer());
11682}
11683
11684void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) {
11685 if (NextSymbolIsThumb) {
11686 getTargetStreamer().emitThumbFunc(Symbol);
11687 NextSymbolIsThumb = false;
11688 }
11689}
11690
11691/// parseDirectiveThumbFunc
11692/// ::= .thumbfunc symbol_name
11693bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
11694 MCAsmParser &Parser = getParser();
11695 const auto Format = getContext().getObjectFileType();
11696 bool IsMachO = Format == MCContext::IsMachO;
11697
11698 // Darwin asm has (optionally) function name after .thumb_func direction
11699 // ELF doesn't
11700
11701 if (IsMachO) {
11702 if (Parser.getTok().is(K: AsmToken::Identifier) ||
11703 Parser.getTok().is(K: AsmToken::String)) {
11704 MCSymbol *Func = getParser().getContext().getOrCreateSymbol(
11705 Name: Parser.getTok().getIdentifier());
11706 getTargetStreamer().emitThumbFunc(Symbol: Func);
11707 Parser.Lex();
11708 if (parseEOL())
11709 return true;
11710 return false;
11711 }
11712 }
11713
11714 if (parseEOL())
11715 return true;
11716
11717 // .thumb_func implies .thumb
11718 if (!isThumb())
11719 SwitchMode();
11720
11721 getTargetStreamer().emitCode16();
11722
11723 NextSymbolIsThumb = true;
11724 return false;
11725}
11726
11727/// parseDirectiveSyntax
11728/// ::= .syntax unified | divided
11729bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
11730 MCAsmParser &Parser = getParser();
11731 const AsmToken &Tok = Parser.getTok();
11732 if (Tok.isNot(K: AsmToken::Identifier)) {
11733 Error(L, Msg: "unexpected token in .syntax directive");
11734 return false;
11735 }
11736
11737 StringRef Mode = Tok.getString();
11738 Parser.Lex();
11739 if (check(P: Mode == "divided" || Mode == "DIVIDED", Loc: L,
11740 Msg: "'.syntax divided' arm assembly not supported") ||
11741 check(P: Mode != "unified" && Mode != "UNIFIED", Loc: L,
11742 Msg: "unrecognized syntax mode in .syntax directive") ||
11743 parseEOL())
11744 return true;
11745
11746 // TODO tell the MC streamer the mode
11747 // getParser().getStreamer().Emit???();
11748 return false;
11749}
11750
11751/// parseDirectiveCode
11752/// ::= .code 16 | 32
11753bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
11754 MCAsmParser &Parser = getParser();
11755 const AsmToken &Tok = Parser.getTok();
11756 if (Tok.isNot(K: AsmToken::Integer))
11757 return Error(L, Msg: "unexpected token in .code directive");
11758 int64_t Val = Parser.getTok().getIntVal();
11759 if (Val != 16 && Val != 32) {
11760 Error(L, Msg: "invalid operand to .code directive");
11761 return false;
11762 }
11763 Parser.Lex();
11764
11765 if (parseEOL())
11766 return true;
11767
11768 if (Val == 16) {
11769 if (!hasThumb())
11770 return Error(L, Msg: "target does not support Thumb mode");
11771
11772 if (!isThumb())
11773 SwitchMode();
11774 getTargetStreamer().emitCode16();
11775 } else {
11776 if (!hasARM())
11777 return Error(L, Msg: "target does not support ARM mode");
11778
11779 if (isThumb())
11780 SwitchMode();
11781 getTargetStreamer().emitCode32();
11782 }
11783
11784 return false;
11785}
11786
11787/// parseDirectiveReq
11788/// ::= name .req registername
11789bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
11790 MCAsmParser &Parser = getParser();
11791 Parser.Lex(); // Eat the '.req' token.
11792 MCRegister Reg;
11793 SMLoc SRegLoc, ERegLoc;
11794 const bool parseResult = parseRegister(Reg, StartLoc&: SRegLoc, EndLoc&: ERegLoc);
11795 if (check(P: parseResult, Loc: SRegLoc, Msg: "register name expected") || parseEOL())
11796 return true;
11797
11798 if (RegisterReqs.insert(KV: std::make_pair(x&: Name, y&: Reg)).first->second != Reg)
11799 return Error(L: SRegLoc,
11800 Msg: "redefinition of '" + Name + "' does not match original.");
11801
11802 return false;
11803}
11804
11805/// parseDirectiveUneq
11806/// ::= .unreq registername
11807bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
11808 MCAsmParser &Parser = getParser();
11809 if (Parser.getTok().isNot(K: AsmToken::Identifier))
11810 return Error(L, Msg: "unexpected input in .unreq directive.");
11811 RegisterReqs.erase(Key: Parser.getTok().getIdentifier().lower());
11812 Parser.Lex(); // Eat the identifier.
11813 return parseEOL();
11814}
11815
11816// After changing arch/CPU, try to put the ARM/Thumb mode back to what it was
11817// before, if supported by the new target, or emit mapping symbols for the mode
11818// switch.
11819void ARMAsmParser::FixModeAfterArchChange(bool WasThumb, SMLoc Loc) {
11820 if (WasThumb != isThumb()) {
11821 if (WasThumb && hasThumb()) {
11822 // Stay in Thumb mode
11823 SwitchMode();
11824 } else if (!WasThumb && hasARM()) {
11825 // Stay in ARM mode
11826 SwitchMode();
11827 } else {
11828 // Mode switch forced, because the new arch doesn't support the old mode.
11829 if (isThumb())
11830 getTargetStreamer().emitCode16();
11831 else
11832 getTargetStreamer().emitCode32();
11833 // Warn about the implcit mode switch. GAS does not switch modes here,
11834 // but instead stays in the old mode, reporting an error on any following
11835 // instructions as the mode does not exist on the target.
11836 Warning(L: Loc, Msg: Twine("new target does not support ") +
11837 (WasThumb ? "thumb" : "arm") + " mode, switching to " +
11838 (!WasThumb ? "thumb" : "arm") + " mode");
11839 }
11840 }
11841}
11842
11843/// parseDirectiveArch
11844/// ::= .arch token
11845bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
11846 StringRef Arch = getParser().parseStringToEndOfStatement().trim();
11847 ARM::ArchKind ID = ARM::parseArch(Arch);
11848
11849 if (ID == ARM::ArchKind::INVALID)
11850 return Error(L, Msg: "Unknown arch name");
11851
11852 bool WasThumb = isThumb();
11853 MCSubtargetInfo &STI = copySTI();
11854 STI.setDefaultFeatures(CPU: "", /*TuneCPU*/ "",
11855 FS: ("+" + ARM::getArchName(AK: ID)).str());
11856 setAvailableFeatures(ComputeAvailableFeatures(FB: STI.getFeatureBits()));
11857 FixModeAfterArchChange(WasThumb, Loc: L);
11858
11859 getTargetStreamer().emitArch(Arch: ID);
11860 return false;
11861}
11862
11863/// parseDirectiveEabiAttr
11864/// ::= .eabi_attribute int, int [, "str"]
11865/// ::= .eabi_attribute Tag_name, int [, "str"]
11866bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
11867 MCAsmParser &Parser = getParser();
11868 int64_t Tag;
11869 SMLoc TagLoc;
11870 TagLoc = Parser.getTok().getLoc();
11871 if (Parser.getTok().is(K: AsmToken::Identifier)) {
11872 StringRef Name = Parser.getTok().getIdentifier();
11873 std::optional<unsigned> Ret = ELFAttrs::attrTypeFromString(
11874 tag: Name, tagNameMap: ARMBuildAttrs::getARMAttributeTags());
11875 if (!Ret) {
11876 Error(L: TagLoc, Msg: "attribute name not recognised: " + Name);
11877 return false;
11878 }
11879 Tag = *Ret;
11880 Parser.Lex();
11881 } else {
11882 const MCExpr *AttrExpr;
11883
11884 TagLoc = Parser.getTok().getLoc();
11885 if (Parser.parseExpression(Res&: AttrExpr))
11886 return true;
11887
11888 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: AttrExpr);
11889 if (check(P: !CE, Loc: TagLoc, Msg: "expected numeric constant"))
11890 return true;
11891
11892 Tag = CE->getValue();
11893 }
11894
11895 if (Parser.parseComma())
11896 return true;
11897
11898 StringRef StringValue = "";
11899 bool IsStringValue = false;
11900
11901 int64_t IntegerValue = 0;
11902 bool IsIntegerValue = false;
11903
11904 if (Tag == ARMBuildAttrs::CPU_raw_name || Tag == ARMBuildAttrs::CPU_name)
11905 IsStringValue = true;
11906 else if (Tag == ARMBuildAttrs::compatibility) {
11907 IsStringValue = true;
11908 IsIntegerValue = true;
11909 } else if (Tag < 32 || Tag % 2 == 0)
11910 IsIntegerValue = true;
11911 else if (Tag % 2 == 1)
11912 IsStringValue = true;
11913 else
11914 llvm_unreachable("invalid tag type");
11915
11916 if (IsIntegerValue) {
11917 const MCExpr *ValueExpr;
11918 SMLoc ValueExprLoc = Parser.getTok().getLoc();
11919 if (Parser.parseExpression(Res&: ValueExpr))
11920 return true;
11921
11922 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: ValueExpr);
11923 if (!CE)
11924 return Error(L: ValueExprLoc, Msg: "expected numeric constant");
11925 IntegerValue = CE->getValue();
11926 }
11927
11928 if (Tag == ARMBuildAttrs::compatibility) {
11929 if (Parser.parseComma())
11930 return true;
11931 }
11932
11933 std::string EscapedValue;
11934 if (IsStringValue) {
11935 if (Parser.getTok().isNot(K: AsmToken::String))
11936 return Error(L: Parser.getTok().getLoc(), Msg: "bad string constant");
11937
11938 if (Tag == ARMBuildAttrs::also_compatible_with) {
11939 if (Parser.parseEscapedString(Data&: EscapedValue))
11940 return Error(L: Parser.getTok().getLoc(), Msg: "bad escaped string constant");
11941
11942 StringValue = EscapedValue;
11943 } else {
11944 StringValue = Parser.getTok().getStringContents();
11945 Parser.Lex();
11946 }
11947 }
11948
11949 if (Parser.parseEOL())
11950 return true;
11951
11952 if (IsIntegerValue && IsStringValue) {
11953 assert(Tag == ARMBuildAttrs::compatibility);
11954 getTargetStreamer().emitIntTextAttribute(Attribute: Tag, IntValue: IntegerValue, StringValue);
11955 } else if (IsIntegerValue)
11956 getTargetStreamer().emitAttribute(Attribute: Tag, Value: IntegerValue);
11957 else if (IsStringValue)
11958 getTargetStreamer().emitTextAttribute(Attribute: Tag, String: StringValue);
11959 return false;
11960}
11961
11962/// parseDirectiveCPU
11963/// ::= .cpu str
11964bool ARMAsmParser::parseDirectiveCPU(SMLoc L) {
11965 StringRef CPU = getParser().parseStringToEndOfStatement().trim();
11966 getTargetStreamer().emitTextAttribute(Attribute: ARMBuildAttrs::CPU_name, String: CPU);
11967
11968 // FIXME: This is using table-gen data, but should be moved to
11969 // ARMTargetParser once that is table-gen'd.
11970 if (!getSTI().isCPUStringValid(CPU))
11971 return Error(L, Msg: "Unknown CPU name");
11972
11973 bool WasThumb = isThumb();
11974 MCSubtargetInfo &STI = copySTI();
11975 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, FS: "");
11976 setAvailableFeatures(ComputeAvailableFeatures(FB: STI.getFeatureBits()));
11977 FixModeAfterArchChange(WasThumb, Loc: L);
11978
11979 return false;
11980}
11981
11982/// parseDirectiveFPU
11983/// ::= .fpu str
11984bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
11985 SMLoc FPUNameLoc = getTok().getLoc();
11986 StringRef FPU = getParser().parseStringToEndOfStatement().trim();
11987
11988 ARM::FPUKind ID = ARM::parseFPU(FPU);
11989 std::vector<StringRef> Features;
11990 if (!ARM::getFPUFeatures(FPUKind: ID, Features))
11991 return Error(L: FPUNameLoc, Msg: "Unknown FPU name");
11992
11993 MCSubtargetInfo &STI = copySTI();
11994 for (auto Feature : Features)
11995 STI.ApplyFeatureFlag(FS: Feature);
11996 setAvailableFeatures(ComputeAvailableFeatures(FB: STI.getFeatureBits()));
11997
11998 getTargetStreamer().emitFPU(FPU: ID);
11999 return false;
12000}
12001
12002/// parseDirectiveFnStart
12003/// ::= .fnstart
12004bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
12005 if (parseEOL())
12006 return true;
12007
12008 if (UC.hasFnStart()) {
12009 Error(L, Msg: ".fnstart starts before the end of previous one");
12010 UC.emitFnStartLocNotes();
12011 return true;
12012 }
12013
12014 // Reset the unwind directives parser state
12015 UC.reset();
12016
12017 getTargetStreamer().emitFnStart();
12018
12019 UC.recordFnStart(L);
12020 return false;
12021}
12022
12023/// parseDirectiveFnEnd
12024/// ::= .fnend
12025bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
12026 if (parseEOL())
12027 return true;
12028 // Check the ordering of unwind directives
12029 if (!UC.hasFnStart())
12030 return Error(L, Msg: ".fnstart must precede .fnend directive");
12031
12032 // Reset the unwind directives parser state
12033 getTargetStreamer().emitFnEnd();
12034
12035 UC.reset();
12036 return false;
12037}
12038
12039/// parseDirectiveCantUnwind
12040/// ::= .cantunwind
12041bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
12042 if (parseEOL())
12043 return true;
12044
12045 UC.recordCantUnwind(L);
12046 // Check the ordering of unwind directives
12047 if (check(P: !UC.hasFnStart(), Loc: L, Msg: ".fnstart must precede .cantunwind directive"))
12048 return true;
12049
12050 if (UC.hasHandlerData()) {
12051 Error(L, Msg: ".cantunwind can't be used with .handlerdata directive");
12052 UC.emitHandlerDataLocNotes();
12053 return true;
12054 }
12055 if (UC.hasPersonality()) {
12056 Error(L, Msg: ".cantunwind can't be used with .personality directive");
12057 UC.emitPersonalityLocNotes();
12058 return true;
12059 }
12060
12061 getTargetStreamer().emitCantUnwind();
12062 return false;
12063}
12064
12065/// parseDirectivePersonality
12066/// ::= .personality name
12067bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
12068 MCAsmParser &Parser = getParser();
12069 bool HasExistingPersonality = UC.hasPersonality();
12070
12071 // Parse the name of the personality routine
12072 if (Parser.getTok().isNot(K: AsmToken::Identifier))
12073 return Error(L, Msg: "unexpected input in .personality directive.");
12074 StringRef Name(Parser.getTok().getIdentifier());
12075 Parser.Lex();
12076
12077 if (parseEOL())
12078 return true;
12079
12080 UC.recordPersonality(L);
12081
12082 // Check the ordering of unwind directives
12083 if (!UC.hasFnStart())
12084 return Error(L, Msg: ".fnstart must precede .personality directive");
12085 if (UC.cantUnwind()) {
12086 Error(L, Msg: ".personality can't be used with .cantunwind directive");
12087 UC.emitCantUnwindLocNotes();
12088 return true;
12089 }
12090 if (UC.hasHandlerData()) {
12091 Error(L, Msg: ".personality must precede .handlerdata directive");
12092 UC.emitHandlerDataLocNotes();
12093 return true;
12094 }
12095 if (HasExistingPersonality) {
12096 Error(L, Msg: "multiple personality directives");
12097 UC.emitPersonalityLocNotes();
12098 return true;
12099 }
12100
12101 MCSymbol *PR = getParser().getContext().getOrCreateSymbol(Name);
12102 getTargetStreamer().emitPersonality(Personality: PR);
12103 return false;
12104}
12105
12106/// parseDirectiveHandlerData
12107/// ::= .handlerdata
12108bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
12109 if (parseEOL())
12110 return true;
12111
12112 UC.recordHandlerData(L);
12113 // Check the ordering of unwind directives
12114 if (!UC.hasFnStart())
12115 return Error(L, Msg: ".fnstart must precede .personality directive");
12116 if (UC.cantUnwind()) {
12117 Error(L, Msg: ".handlerdata can't be used with .cantunwind directive");
12118 UC.emitCantUnwindLocNotes();
12119 return true;
12120 }
12121
12122 getTargetStreamer().emitHandlerData();
12123 return false;
12124}
12125
12126/// parseDirectiveSetFP
12127/// ::= .setfp fpreg, spreg [, offset]
12128bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
12129 MCAsmParser &Parser = getParser();
12130 // Check the ordering of unwind directives
12131 if (check(P: !UC.hasFnStart(), Loc: L, Msg: ".fnstart must precede .setfp directive") ||
12132 check(P: UC.hasHandlerData(), Loc: L,
12133 Msg: ".setfp must precede .handlerdata directive"))
12134 return true;
12135
12136 // Parse fpreg
12137 SMLoc FPRegLoc = Parser.getTok().getLoc();
12138 MCRegister FPReg = tryParseRegister();
12139
12140 if (check(P: !FPReg, Loc: FPRegLoc, Msg: "frame pointer register expected") ||
12141 Parser.parseComma())
12142 return true;
12143
12144 // Parse spreg
12145 SMLoc SPRegLoc = Parser.getTok().getLoc();
12146 MCRegister SPReg = tryParseRegister();
12147 if (check(P: !SPReg, Loc: SPRegLoc, Msg: "stack pointer register expected") ||
12148 check(P: SPReg != ARM::SP && SPReg != UC.getFPReg(), Loc: SPRegLoc,
12149 Msg: "register should be either $sp or the latest fp register"))
12150 return true;
12151
12152 // Update the frame pointer register
12153 UC.saveFPReg(Reg: FPReg);
12154
12155 // Parse offset
12156 int64_t Offset = 0;
12157 if (Parser.parseOptionalToken(T: AsmToken::Comma)) {
12158 if (Parser.getTok().isNot(K: AsmToken::Hash) &&
12159 Parser.getTok().isNot(K: AsmToken::Dollar))
12160 return Error(L: Parser.getTok().getLoc(), Msg: "'#' expected");
12161 Parser.Lex(); // skip hash token.
12162
12163 const MCExpr *OffsetExpr;
12164 SMLoc ExLoc = Parser.getTok().getLoc();
12165 SMLoc EndLoc;
12166 if (getParser().parseExpression(Res&: OffsetExpr, EndLoc))
12167 return Error(L: ExLoc, Msg: "malformed setfp offset");
12168 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: OffsetExpr);
12169 if (check(P: !CE, Loc: ExLoc, Msg: "setfp offset must be an immediate"))
12170 return true;
12171 Offset = CE->getValue();
12172 }
12173
12174 if (Parser.parseEOL())
12175 return true;
12176
12177 getTargetStreamer().emitSetFP(FpReg: FPReg, SpReg: SPReg, Offset);
12178 return false;
12179}
12180
12181/// parseDirectivePad
12182/// ::= .pad offset
12183bool ARMAsmParser::parseDirectivePad(SMLoc L) {
12184 MCAsmParser &Parser = getParser();
12185 // Check the ordering of unwind directives
12186 if (!UC.hasFnStart())
12187 return Error(L, Msg: ".fnstart must precede .pad directive");
12188 if (UC.hasHandlerData())
12189 return Error(L, Msg: ".pad must precede .handlerdata directive");
12190
12191 // Parse the offset
12192 if (Parser.getTok().isNot(K: AsmToken::Hash) &&
12193 Parser.getTok().isNot(K: AsmToken::Dollar))
12194 return Error(L: Parser.getTok().getLoc(), Msg: "'#' expected");
12195 Parser.Lex(); // skip hash token.
12196
12197 const MCExpr *OffsetExpr;
12198 SMLoc ExLoc = Parser.getTok().getLoc();
12199 SMLoc EndLoc;
12200 if (getParser().parseExpression(Res&: OffsetExpr, EndLoc))
12201 return Error(L: ExLoc, Msg: "malformed pad offset");
12202 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: OffsetExpr);
12203 if (!CE)
12204 return Error(L: ExLoc, Msg: "pad offset must be an immediate");
12205
12206 if (parseEOL())
12207 return true;
12208
12209 getTargetStreamer().emitPad(Offset: CE->getValue());
12210 return false;
12211}
12212
12213/// parseDirectiveRegSave
12214/// ::= .save { registers }
12215/// ::= .vsave { registers }
12216bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) {
12217 // Check the ordering of unwind directives
12218 if (!UC.hasFnStart())
12219 return Error(L, Msg: ".fnstart must precede .save or .vsave directives");
12220 if (UC.hasHandlerData())
12221 return Error(L, Msg: ".save or .vsave must precede .handlerdata directive");
12222
12223 // RAII object to make sure parsed operands are deleted.
12224 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
12225
12226 // Parse the register list
12227 if (parseRegisterList(Operands, EnforceOrder: true, AllowRAAC: true) || parseEOL())
12228 return true;
12229 ARMOperand &Op = (ARMOperand &)*Operands[0];
12230 if (!IsVector && !Op.isRegList())
12231 return Error(L, Msg: ".save expects GPR registers");
12232 if (IsVector && !Op.isDPRRegList())
12233 return Error(L, Msg: ".vsave expects DPR registers");
12234
12235 getTargetStreamer().emitRegSave(RegList: Op.getRegList(), isVector: IsVector);
12236 return false;
12237}
12238
12239/// parseDirectiveInst
12240/// ::= .inst opcode [, ...]
12241/// ::= .inst.n opcode [, ...]
12242/// ::= .inst.w opcode [, ...]
12243bool ARMAsmParser::parseDirectiveInst(SMLoc Loc, char Suffix) {
12244 int Width = 4;
12245
12246 if (isThumb()) {
12247 switch (Suffix) {
12248 case 'n':
12249 Width = 2;
12250 break;
12251 case 'w':
12252 break;
12253 default:
12254 Width = 0;
12255 break;
12256 }
12257 } else {
12258 if (Suffix)
12259 return Error(L: Loc, Msg: "width suffixes are invalid in ARM mode");
12260 }
12261
12262 auto parseOne = [&]() -> bool {
12263 const MCExpr *Expr;
12264 if (getParser().parseExpression(Res&: Expr))
12265 return true;
12266 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Val: Expr);
12267 if (!Value) {
12268 return Error(L: Loc, Msg: "expected constant expression");
12269 }
12270
12271 char CurSuffix = Suffix;
12272 switch (Width) {
12273 case 2:
12274 if (Value->getValue() > 0xffff)
12275 return Error(L: Loc, Msg: "inst.n operand is too big, use inst.w instead");
12276 break;
12277 case 4:
12278 if (Value->getValue() > 0xffffffff)
12279 return Error(L: Loc, Msg: StringRef(Suffix ? "inst.w" : "inst") +
12280 " operand is too big");
12281 break;
12282 case 0:
12283 // Thumb mode, no width indicated. Guess from the opcode, if possible.
12284 if (Value->getValue() < 0xe800)
12285 CurSuffix = 'n';
12286 else if (Value->getValue() >= 0xe8000000)
12287 CurSuffix = 'w';
12288 else
12289 return Error(L: Loc, Msg: "cannot determine Thumb instruction size, "
12290 "use inst.n/inst.w instead");
12291 break;
12292 default:
12293 llvm_unreachable("only supported widths are 2 and 4");
12294 }
12295
12296 getTargetStreamer().emitInst(Inst: Value->getValue(), Suffix: CurSuffix);
12297 forwardITPosition();
12298 forwardVPTPosition();
12299 return false;
12300 };
12301
12302 if (parseOptionalToken(T: AsmToken::EndOfStatement))
12303 return Error(L: Loc, Msg: "expected expression following directive");
12304 if (parseMany(parseOne))
12305 return true;
12306 return false;
12307}
12308
12309/// parseDirectiveLtorg
12310/// ::= .ltorg | .pool
12311bool ARMAsmParser::parseDirectiveLtorg(SMLoc L) {
12312 if (parseEOL())
12313 return true;
12314 getTargetStreamer().emitCurrentConstantPool();
12315 return false;
12316}
12317
12318bool ARMAsmParser::parseDirectiveEven(SMLoc L) {
12319 const MCSection *Section = getStreamer().getCurrentSectionOnly();
12320
12321 if (parseEOL())
12322 return true;
12323
12324 if (!Section) {
12325 getStreamer().initSections(NoExecStack: false, STI: getSTI());
12326 Section = getStreamer().getCurrentSectionOnly();
12327 }
12328
12329 assert(Section && "must have section to emit alignment");
12330 if (Section->useCodeAlign())
12331 getStreamer().emitCodeAlignment(Alignment: Align(2), STI: &getSTI());
12332 else
12333 getStreamer().emitValueToAlignment(Alignment: Align(2));
12334
12335 return false;
12336}
12337
12338/// parseDirectivePersonalityIndex
12339/// ::= .personalityindex index
12340bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) {
12341 MCAsmParser &Parser = getParser();
12342 bool HasExistingPersonality = UC.hasPersonality();
12343
12344 const MCExpr *IndexExpression;
12345 SMLoc IndexLoc = Parser.getTok().getLoc();
12346 if (Parser.parseExpression(Res&: IndexExpression) || parseEOL()) {
12347 return true;
12348 }
12349
12350 UC.recordPersonalityIndex(L);
12351
12352 if (!UC.hasFnStart()) {
12353 return Error(L, Msg: ".fnstart must precede .personalityindex directive");
12354 }
12355 if (UC.cantUnwind()) {
12356 Error(L, Msg: ".personalityindex cannot be used with .cantunwind");
12357 UC.emitCantUnwindLocNotes();
12358 return true;
12359 }
12360 if (UC.hasHandlerData()) {
12361 Error(L, Msg: ".personalityindex must precede .handlerdata directive");
12362 UC.emitHandlerDataLocNotes();
12363 return true;
12364 }
12365 if (HasExistingPersonality) {
12366 Error(L, Msg: "multiple personality directives");
12367 UC.emitPersonalityLocNotes();
12368 return true;
12369 }
12370
12371 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: IndexExpression);
12372 if (!CE)
12373 return Error(L: IndexLoc, Msg: "index must be a constant number");
12374 if (CE->getValue() < 0 || CE->getValue() >= ARM::EHABI::NUM_PERSONALITY_INDEX)
12375 return Error(L: IndexLoc,
12376 Msg: "personality routine index should be in range [0-3]");
12377
12378 getTargetStreamer().emitPersonalityIndex(Index: CE->getValue());
12379 return false;
12380}
12381
12382/// parseDirectiveUnwindRaw
12383/// ::= .unwind_raw offset, opcode [, opcode...]
12384bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) {
12385 MCAsmParser &Parser = getParser();
12386 int64_t StackOffset;
12387 const MCExpr *OffsetExpr;
12388 SMLoc OffsetLoc = getLexer().getLoc();
12389
12390 if (!UC.hasFnStart())
12391 return Error(L, Msg: ".fnstart must precede .unwind_raw directives");
12392 if (getParser().parseExpression(Res&: OffsetExpr))
12393 return Error(L: OffsetLoc, Msg: "expected expression");
12394
12395 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: OffsetExpr);
12396 if (!CE)
12397 return Error(L: OffsetLoc, Msg: "offset must be a constant");
12398
12399 StackOffset = CE->getValue();
12400
12401 if (Parser.parseComma())
12402 return true;
12403
12404 SmallVector<uint8_t, 16> Opcodes;
12405
12406 auto parseOne = [&]() -> bool {
12407 const MCExpr *OE = nullptr;
12408 SMLoc OpcodeLoc = getLexer().getLoc();
12409 if (check(P: getLexer().is(K: AsmToken::EndOfStatement) ||
12410 Parser.parseExpression(Res&: OE),
12411 Loc: OpcodeLoc, Msg: "expected opcode expression"))
12412 return true;
12413 const MCConstantExpr *OC = dyn_cast<MCConstantExpr>(Val: OE);
12414 if (!OC)
12415 return Error(L: OpcodeLoc, Msg: "opcode value must be a constant");
12416 const int64_t Opcode = OC->getValue();
12417 if (Opcode & ~0xff)
12418 return Error(L: OpcodeLoc, Msg: "invalid opcode");
12419 Opcodes.push_back(Elt: uint8_t(Opcode));
12420 return false;
12421 };
12422
12423 // Must have at least 1 element
12424 SMLoc OpcodeLoc = getLexer().getLoc();
12425 if (parseOptionalToken(T: AsmToken::EndOfStatement))
12426 return Error(L: OpcodeLoc, Msg: "expected opcode expression");
12427 if (parseMany(parseOne))
12428 return true;
12429
12430 getTargetStreamer().emitUnwindRaw(StackOffset, Opcodes);
12431 return false;
12432}
12433
12434/// parseDirectiveTLSDescSeq
12435/// ::= .tlsdescseq tls-variable
12436bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) {
12437 MCAsmParser &Parser = getParser();
12438
12439 if (getLexer().isNot(K: AsmToken::Identifier))
12440 return TokError(Msg: "expected variable after '.tlsdescseq' directive");
12441
12442 auto *Sym = getContext().getOrCreateSymbol(Name: Parser.getTok().getIdentifier());
12443 const auto *SRE =
12444 MCSymbolRefExpr::create(Symbol: Sym, specifier: ARM::S_TLSDESCSEQ, Ctx&: getContext());
12445 Lex();
12446
12447 if (parseEOL())
12448 return true;
12449
12450 getTargetStreamer().annotateTLSDescriptorSequence(SRE);
12451 return false;
12452}
12453
12454/// parseDirectiveMovSP
12455/// ::= .movsp reg [, #offset]
12456bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) {
12457 MCAsmParser &Parser = getParser();
12458 if (!UC.hasFnStart())
12459 return Error(L, Msg: ".fnstart must precede .movsp directives");
12460 if (UC.getFPReg() != ARM::SP)
12461 return Error(L, Msg: "unexpected .movsp directive");
12462
12463 SMLoc SPRegLoc = Parser.getTok().getLoc();
12464 MCRegister SPReg = tryParseRegister();
12465 if (!SPReg)
12466 return Error(L: SPRegLoc, Msg: "register expected");
12467 if (SPReg == ARM::SP || SPReg == ARM::PC)
12468 return Error(L: SPRegLoc, Msg: "sp and pc are not permitted in .movsp directive");
12469
12470 int64_t Offset = 0;
12471 if (Parser.parseOptionalToken(T: AsmToken::Comma)) {
12472 if (Parser.parseToken(T: AsmToken::Hash, Msg: "expected #constant"))
12473 return true;
12474
12475 const MCExpr *OffsetExpr;
12476 SMLoc OffsetLoc = Parser.getTok().getLoc();
12477
12478 if (Parser.parseExpression(Res&: OffsetExpr))
12479 return Error(L: OffsetLoc, Msg: "malformed offset expression");
12480
12481 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: OffsetExpr);
12482 if (!CE)
12483 return Error(L: OffsetLoc, Msg: "offset must be an immediate constant");
12484
12485 Offset = CE->getValue();
12486 }
12487
12488 if (parseEOL())
12489 return true;
12490
12491 getTargetStreamer().emitMovSP(Reg: SPReg, Offset);
12492 UC.saveFPReg(Reg: SPReg);
12493
12494 return false;
12495}
12496
12497/// parseDirectiveObjectArch
12498/// ::= .object_arch name
12499bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) {
12500 MCAsmParser &Parser = getParser();
12501 if (getLexer().isNot(K: AsmToken::Identifier))
12502 return Error(L: getLexer().getLoc(), Msg: "unexpected token");
12503
12504 StringRef Arch = Parser.getTok().getString();
12505 SMLoc ArchLoc = Parser.getTok().getLoc();
12506 Lex();
12507
12508 ARM::ArchKind ID = ARM::parseArch(Arch);
12509
12510 if (ID == ARM::ArchKind::INVALID)
12511 return Error(L: ArchLoc, Msg: "unknown architecture '" + Arch + "'");
12512 if (parseToken(T: AsmToken::EndOfStatement))
12513 return true;
12514
12515 getTargetStreamer().emitObjectArch(Arch: ID);
12516 return false;
12517}
12518
12519/// parseDirectiveAlign
12520/// ::= .align
12521bool ARMAsmParser::parseDirectiveAlign(SMLoc L) {
12522 // NOTE: if this is not the end of the statement, fall back to the target
12523 // agnostic handling for this directive which will correctly handle this.
12524 if (parseOptionalToken(T: AsmToken::EndOfStatement)) {
12525 // '.align' is target specifically handled to mean 2**2 byte alignment.
12526 const MCSection *Section = getStreamer().getCurrentSectionOnly();
12527 assert(Section && "must have section to emit alignment");
12528 if (Section->useCodeAlign())
12529 getStreamer().emitCodeAlignment(Alignment: Align(4), STI: &getSTI(), MaxBytesToEmit: 0);
12530 else
12531 getStreamer().emitValueToAlignment(Alignment: Align(4), Value: 0, ValueSize: 1, MaxBytesToEmit: 0);
12532 return false;
12533 }
12534 return true;
12535}
12536
12537/// parseDirectiveThumbSet
12538/// ::= .thumb_set name, value
12539bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) {
12540 MCAsmParser &Parser = getParser();
12541
12542 StringRef Name;
12543 if (check(P: Parser.parseIdentifier(Res&: Name),
12544 Msg: "expected identifier after '.thumb_set'") ||
12545 Parser.parseComma())
12546 return true;
12547
12548 MCSymbol *Sym;
12549 const MCExpr *Value;
12550 if (MCParserUtils::parseAssignmentExpression(Name, /* allow_redef */ true,
12551 Parser, Symbol&: Sym, Value))
12552 return true;
12553
12554 getTargetStreamer().emitThumbSet(Symbol: Sym, Value);
12555 return false;
12556}
12557
12558/// parseDirectiveSEHAllocStack
12559/// ::= .seh_stackalloc
12560/// ::= .seh_stackalloc_w
12561bool ARMAsmParser::parseDirectiveSEHAllocStack(SMLoc L, bool Wide) {
12562 int64_t Size;
12563 if (parseImmExpr(Out&: Size))
12564 return true;
12565 getTargetStreamer().emitARMWinCFIAllocStack(Size, Wide);
12566 return false;
12567}
12568
12569/// parseDirectiveSEHSaveRegs
12570/// ::= .seh_save_regs
12571/// ::= .seh_save_regs_w
12572bool ARMAsmParser::parseDirectiveSEHSaveRegs(SMLoc L, bool Wide) {
12573 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
12574
12575 if (parseRegisterList(Operands) || parseEOL())
12576 return true;
12577 ARMOperand &Op = (ARMOperand &)*Operands[0];
12578 if (!Op.isRegList())
12579 return Error(L, Msg: ".seh_save_regs{_w} expects GPR registers");
12580 const SmallVectorImpl<MCRegister> &RegList = Op.getRegList();
12581 uint32_t Mask = 0;
12582 for (size_t i = 0; i < RegList.size(); ++i) {
12583 unsigned Reg = MRI->getEncodingValue(Reg: RegList[i]);
12584 if (Reg == 15) // pc -> lr
12585 Reg = 14;
12586 if (Reg == 13)
12587 return Error(L, Msg: ".seh_save_regs{_w} can't include SP");
12588 assert(Reg < 16U && "Register out of range");
12589 unsigned Bit = (1u << Reg);
12590 Mask |= Bit;
12591 }
12592 if (!Wide && (Mask & 0x1f00) != 0)
12593 return Error(L,
12594 Msg: ".seh_save_regs cannot save R8-R12, needs .seh_save_regs_w");
12595 getTargetStreamer().emitARMWinCFISaveRegMask(Mask, Wide);
12596 return false;
12597}
12598
12599/// parseDirectiveSEHSaveSP
12600/// ::= .seh_save_sp
12601bool ARMAsmParser::parseDirectiveSEHSaveSP(SMLoc L) {
12602 MCRegister Reg = tryParseRegister();
12603 if (!Reg || !MRI->getRegClass(i: ARM::GPRRegClassID).contains(Reg))
12604 return Error(L, Msg: "expected GPR");
12605 unsigned Index = MRI->getEncodingValue(Reg);
12606 if (Index > 14 || Index == 13)
12607 return Error(L, Msg: "invalid register for .seh_save_sp");
12608 getTargetStreamer().emitARMWinCFISaveSP(Reg: Index);
12609 return false;
12610}
12611
12612/// parseDirectiveSEHSaveFRegs
12613/// ::= .seh_save_fregs
12614bool ARMAsmParser::parseDirectiveSEHSaveFRegs(SMLoc L) {
12615 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
12616
12617 if (parseRegisterList(Operands) || parseEOL())
12618 return true;
12619 ARMOperand &Op = (ARMOperand &)*Operands[0];
12620 if (!Op.isDPRRegList())
12621 return Error(L, Msg: ".seh_save_fregs expects DPR registers");
12622 const SmallVectorImpl<MCRegister> &RegList = Op.getRegList();
12623 uint32_t Mask = 0;
12624 for (size_t i = 0; i < RegList.size(); ++i) {
12625 unsigned Reg = MRI->getEncodingValue(Reg: RegList[i]);
12626 assert(Reg < 32U && "Register out of range");
12627 unsigned Bit = (1u << Reg);
12628 Mask |= Bit;
12629 }
12630
12631 if (Mask == 0)
12632 return Error(L, Msg: ".seh_save_fregs missing registers");
12633
12634 unsigned First = 0;
12635 while ((Mask & 1) == 0) {
12636 First++;
12637 Mask >>= 1;
12638 }
12639 if (((Mask + 1) & Mask) != 0)
12640 return Error(L,
12641 Msg: ".seh_save_fregs must take a contiguous range of registers");
12642 unsigned Last = First;
12643 while ((Mask & 2) != 0) {
12644 Last++;
12645 Mask >>= 1;
12646 }
12647 if (First < 16 && Last >= 16)
12648 return Error(L, Msg: ".seh_save_fregs must be all d0-d15 or d16-d31");
12649 getTargetStreamer().emitARMWinCFISaveFRegs(First, Last);
12650 return false;
12651}
12652
12653/// parseDirectiveSEHSaveLR
12654/// ::= .seh_save_lr
12655bool ARMAsmParser::parseDirectiveSEHSaveLR(SMLoc L) {
12656 int64_t Offset;
12657 if (parseImmExpr(Out&: Offset))
12658 return true;
12659 getTargetStreamer().emitARMWinCFISaveLR(Offset);
12660 return false;
12661}
12662
12663/// parseDirectiveSEHPrologEnd
12664/// ::= .seh_endprologue
12665/// ::= .seh_endprologue_fragment
12666bool ARMAsmParser::parseDirectiveSEHPrologEnd(SMLoc L, bool Fragment) {
12667 getTargetStreamer().emitARMWinCFIPrologEnd(Fragment);
12668 return false;
12669}
12670
12671/// parseDirectiveSEHNop
12672/// ::= .seh_nop
12673/// ::= .seh_nop_w
12674bool ARMAsmParser::parseDirectiveSEHNop(SMLoc L, bool Wide) {
12675 getTargetStreamer().emitARMWinCFINop(Wide);
12676 return false;
12677}
12678
12679/// parseDirectiveSEHEpilogStart
12680/// ::= .seh_startepilogue
12681/// ::= .seh_startepilogue_cond
12682bool ARMAsmParser::parseDirectiveSEHEpilogStart(SMLoc L, bool Condition) {
12683 unsigned CC = ARMCC::AL;
12684 if (Condition) {
12685 MCAsmParser &Parser = getParser();
12686 SMLoc S = Parser.getTok().getLoc();
12687 const AsmToken &Tok = Parser.getTok();
12688 if (!Tok.is(K: AsmToken::Identifier))
12689 return Error(L: S, Msg: ".seh_startepilogue_cond missing condition");
12690 CC = ARMCondCodeFromString(CC: Tok.getString());
12691 if (CC == ~0U)
12692 return Error(L: S, Msg: "invalid condition");
12693 Parser.Lex(); // Eat the token.
12694 }
12695
12696 getTargetStreamer().emitARMWinCFIEpilogStart(Condition: CC);
12697 return false;
12698}
12699
12700/// parseDirectiveSEHEpilogEnd
12701/// ::= .seh_endepilogue
12702bool ARMAsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
12703 getTargetStreamer().emitARMWinCFIEpilogEnd();
12704 return false;
12705}
12706
12707/// parseDirectiveSEHCustom
12708/// ::= .seh_custom
12709bool ARMAsmParser::parseDirectiveSEHCustom(SMLoc L) {
12710 unsigned Opcode = 0;
12711 do {
12712 int64_t Byte;
12713 if (parseImmExpr(Out&: Byte))
12714 return true;
12715 if (Byte > 0xff || Byte < 0)
12716 return Error(L, Msg: "Invalid byte value in .seh_custom");
12717 if (Opcode > 0x00ffffff)
12718 return Error(L, Msg: "Too many bytes in .seh_custom");
12719 // Store the bytes as one big endian number in Opcode. In a multi byte
12720 // opcode sequence, the first byte can't be zero.
12721 Opcode = (Opcode << 8) | Byte;
12722 } while (parseOptionalToken(T: AsmToken::Comma));
12723 getTargetStreamer().emitARMWinCFICustom(Opcode);
12724 return false;
12725}
12726
12727/// Force static initialization.
12728extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeARMAsmParser() {
12729 RegisterMCAsmParser<ARMAsmParser> X(getTheARMLETarget());
12730 RegisterMCAsmParser<ARMAsmParser> Y(getTheARMBETarget());
12731 RegisterMCAsmParser<ARMAsmParser> A(getTheThumbLETarget());
12732 RegisterMCAsmParser<ARMAsmParser> B(getTheThumbBETarget());
12733}
12734
12735#define GET_REGISTER_MATCHER
12736#define GET_SUBTARGET_FEATURE_NAME
12737#define GET_MATCHER_IMPLEMENTATION
12738#define GET_MNEMONIC_SPELL_CHECKER
12739#include "ARMGenAsmMatcher.inc"
12740
12741// Some diagnostics need to vary with subtarget features, so they are handled
12742// here. For example, the DPR class has either 16 or 32 registers, depending
12743// on the FPU available.
12744const char *
12745ARMAsmParser::getCustomOperandDiag(ARMMatchResultTy MatchError) {
12746 switch (MatchError) {
12747 // rGPR contains sp starting with ARMv8.
12748 case Match_rGPR:
12749 return hasV8Ops() ? "operand must be a register in range [r0, r14]"
12750 : "operand must be a register in range [r0, r12] or r14";
12751 // DPR contains 16 registers for some FPUs, and 32 for others.
12752 case Match_DPR:
12753 return hasD32() ? "operand must be a register in range [d0, d31]"
12754 : "operand must be a register in range [d0, d15]";
12755 case Match_DPR_RegList:
12756 return hasD32() ? "operand must be a list of registers in range [d0, d31]"
12757 : "operand must be a list of registers in range [d0, d15]";
12758
12759 // For all other diags, use the static string from tablegen.
12760 default:
12761 return getMatchKindDiag(MatchResult: MatchError);
12762 }
12763}
12764
12765// Process the list of near-misses, throwing away ones we don't want to report
12766// to the user, and converting the rest to a source location and string that
12767// should be reported.
12768void
12769ARMAsmParser::FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
12770 SmallVectorImpl<NearMissMessage> &NearMissesOut,
12771 SMLoc IDLoc, OperandVector &Operands) {
12772 // TODO: If operand didn't match, sub in a dummy one and run target
12773 // predicate, so that we can avoid reporting near-misses that are invalid?
12774 // TODO: Many operand types dont have SuperClasses set, so we report
12775 // redundant ones.
12776 // TODO: Some operands are superclasses of registers (e.g.
12777 // MCK_RegShiftedImm), we don't have any way to represent that currently.
12778 // TODO: This is not all ARM-specific, can some of it be factored out?
12779
12780 // Record some information about near-misses that we have already seen, so
12781 // that we can avoid reporting redundant ones. For example, if there are
12782 // variants of an instruction that take 8- and 16-bit immediates, we want
12783 // to only report the widest one.
12784 std::multimap<unsigned, unsigned> OperandMissesSeen;
12785 SmallSet<FeatureBitset, 4> FeatureMissesSeen;
12786 bool ReportedTooFewOperands = false;
12787
12788 unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
12789
12790 // Process the near-misses in reverse order, so that we see more general ones
12791 // first, and so can avoid emitting more specific ones.
12792 for (NearMissInfo &I : reverse(C&: NearMissesIn)) {
12793 switch (I.getKind()) {
12794 case NearMissInfo::NearMissOperand: {
12795 SMLoc OperandLoc =
12796 ((ARMOperand &)*Operands[I.getOperandIndex()]).getStartLoc();
12797 const char *OperandDiag =
12798 getCustomOperandDiag(MatchError: (ARMMatchResultTy)I.getOperandError());
12799
12800 // If we have already emitted a message for a superclass, don't also report
12801 // the sub-class. We consider all operand classes that we don't have a
12802 // specialised diagnostic for to be equal for the propose of this check,
12803 // so that we don't report the generic error multiple times on the same
12804 // operand.
12805 unsigned DupCheckMatchClass = OperandDiag ? I.getOperandClass() : ~0U;
12806 auto PrevReports = OperandMissesSeen.equal_range(x: I.getOperandIndex());
12807 if (std::any_of(first: PrevReports.first, last: PrevReports.second,
12808 pred: [DupCheckMatchClass](
12809 const std::pair<unsigned, unsigned> Pair) {
12810 if (DupCheckMatchClass == ~0U || Pair.second == ~0U)
12811 return Pair.second == DupCheckMatchClass;
12812 else
12813 return isSubclass(A: (MatchClassKind)DupCheckMatchClass,
12814 B: (MatchClassKind)Pair.second);
12815 }))
12816 break;
12817 OperandMissesSeen.insert(
12818 x: std::make_pair(x: I.getOperandIndex(), y&: DupCheckMatchClass));
12819
12820 NearMissMessage Message;
12821 Message.Loc = OperandLoc;
12822 if (OperandDiag) {
12823 Message.Message = OperandDiag;
12824 } else if (I.getOperandClass() == InvalidMatchClass) {
12825 Message.Message = "too many operands for instruction";
12826 } else {
12827 Message.Message = "invalid operand for instruction";
12828 LLVM_DEBUG(
12829 dbgs() << "Missing diagnostic string for operand class "
12830 << getMatchClassName((MatchClassKind)I.getOperandClass())
12831 << I.getOperandClass() << ", error " << I.getOperandError()
12832 << ", opcode " << MII.getName(I.getOpcode()) << "\n");
12833 }
12834 NearMissesOut.emplace_back(Args&: Message);
12835 break;
12836 }
12837 case NearMissInfo::NearMissFeature: {
12838 const FeatureBitset &MissingFeatures = I.getFeatures();
12839 // Don't report the same set of features twice.
12840 if (FeatureMissesSeen.count(V: MissingFeatures))
12841 break;
12842 FeatureMissesSeen.insert(V: MissingFeatures);
12843
12844 // Special case: don't report a feature set which includes arm-mode for
12845 // targets that don't have ARM mode.
12846 if (MissingFeatures.test(I: Feature_IsARMBit) && !hasARM())
12847 break;
12848 // Don't report any near-misses that both require switching instruction
12849 // set, and adding other subtarget features.
12850 if (isThumb() && MissingFeatures.test(I: Feature_IsARMBit) &&
12851 MissingFeatures.count() > 1)
12852 break;
12853 if (!isThumb() && MissingFeatures.test(I: Feature_IsThumbBit) &&
12854 MissingFeatures.count() > 1)
12855 break;
12856 if (!isThumb() && MissingFeatures.test(I: Feature_IsThumb2Bit) &&
12857 (MissingFeatures & ~FeatureBitset({Feature_IsThumb2Bit,
12858 Feature_IsThumbBit})).any())
12859 break;
12860 if (isMClass() && MissingFeatures.test(I: Feature_HasNEONBit))
12861 break;
12862
12863 NearMissMessage Message;
12864 Message.Loc = IDLoc;
12865 raw_svector_ostream OS(Message.Message);
12866
12867 OS << "instruction requires:";
12868 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i)
12869 if (MissingFeatures.test(I: i))
12870 OS << ' ' << getSubtargetFeatureName(Val: i);
12871
12872 NearMissesOut.emplace_back(Args&: Message);
12873
12874 break;
12875 }
12876 case NearMissInfo::NearMissPredicate: {
12877 NearMissMessage Message;
12878 Message.Loc = IDLoc;
12879 switch (I.getPredicateError()) {
12880 case Match_RequiresNotITBlock:
12881 Message.Message = "flag setting instruction only valid outside IT block";
12882 break;
12883 case Match_RequiresITBlock:
12884 Message.Message = "instruction only valid inside IT block";
12885 break;
12886 case Match_RequiresV6:
12887 Message.Message = "instruction variant requires ARMv6 or later";
12888 break;
12889 case Match_RequiresThumb2:
12890 Message.Message = "instruction variant requires Thumb2";
12891 break;
12892 case Match_RequiresV8:
12893 Message.Message = "instruction variant requires ARMv8 or later";
12894 break;
12895 case Match_RequiresFlagSetting:
12896 Message.Message = "no flag-preserving variant of this instruction available";
12897 break;
12898 case Match_InvalidTiedOperand: {
12899 ARMOperand &Op = static_cast<ARMOperand &>(*Operands[0]);
12900 if (Op.isToken() && Op.getToken() == "mul") {
12901 Message.Message = "destination register must match a source register";
12902 Message.Loc = Operands[MnemonicOpsEndInd]->getStartLoc();
12903 } else {
12904 llvm_unreachable("Match_InvalidTiedOperand only used for tMUL.");
12905 }
12906 break;
12907 }
12908 case Match_InvalidOperand:
12909 Message.Message = "invalid operand for instruction";
12910 break;
12911 default:
12912 llvm_unreachable("Unhandled target predicate error");
12913 break;
12914 }
12915 NearMissesOut.emplace_back(Args&: Message);
12916 break;
12917 }
12918 case NearMissInfo::NearMissTooFewOperands: {
12919 if (!ReportedTooFewOperands) {
12920 SMLoc EndLoc = ((ARMOperand &)*Operands.back()).getEndLoc();
12921 NearMissesOut.emplace_back(Args: NearMissMessage{
12922 .Loc: EndLoc, .Message: StringRef("too few operands for instruction")});
12923 ReportedTooFewOperands = true;
12924 }
12925 break;
12926 }
12927 case NearMissInfo::NoNearMiss:
12928 // This should never leave the matcher.
12929 llvm_unreachable("not a near-miss");
12930 break;
12931 }
12932 }
12933}
12934
12935void ARMAsmParser::ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses,
12936 SMLoc IDLoc, OperandVector &Operands) {
12937 SmallVector<NearMissMessage, 4> Messages;
12938 FilterNearMisses(NearMissesIn&: NearMisses, NearMissesOut&: Messages, IDLoc, Operands);
12939
12940 if (Messages.size() == 0) {
12941 // No near-misses were found, so the best we can do is "invalid
12942 // instruction".
12943 Error(L: IDLoc, Msg: "invalid instruction");
12944 } else if (Messages.size() == 1) {
12945 // One near miss was found, report it as the sole error.
12946 Error(L: Messages[0].Loc, Msg: Messages[0].Message);
12947 } else {
12948 // More than one near miss, so report a generic "invalid instruction"
12949 // error, followed by notes for each of the near-misses.
12950 Error(L: IDLoc, Msg: "invalid instruction, any one of the following would fix this:");
12951 for (auto &M : Messages) {
12952 Note(L: M.Loc, Msg: M.Message);
12953 }
12954 }
12955}
12956
12957bool ARMAsmParser::enableArchExtFeature(StringRef Name, SMLoc &ExtLoc) {
12958 // FIXME: This structure should be moved inside ARMTargetParser
12959 // when we start to table-generate them, and we can use the ARM
12960 // flags below, that were generated by table-gen.
12961 static const struct {
12962 const uint64_t Kind;
12963 const FeatureBitset ArchCheck;
12964 const FeatureBitset Features;
12965 } Extensions[] = {
12966 {.Kind: ARM::AEK_CRC, .ArchCheck: {Feature_HasV8Bit}, .Features: {ARM::FeatureCRC}},
12967 {.Kind: ARM::AEK_AES,
12968 .ArchCheck: {Feature_HasV8Bit},
12969 .Features: {ARM::FeatureAES, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12970 {.Kind: ARM::AEK_SHA2,
12971 .ArchCheck: {Feature_HasV8Bit},
12972 .Features: {ARM::FeatureSHA2, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12973 {.Kind: ARM::AEK_CRYPTO,
12974 .ArchCheck: {Feature_HasV8Bit},
12975 .Features: {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12976 {.Kind: (ARM::AEK_DSP | ARM::AEK_MVE | ARM::AEK_FP),
12977 .ArchCheck: {Feature_HasV8_1MMainlineBit},
12978 .Features: {ARM::HasMVEFloatOps}},
12979 {.Kind: ARM::AEK_FP,
12980 .ArchCheck: {Feature_HasV8Bit},
12981 .Features: {ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}},
12982 {.Kind: (ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM),
12983 .ArchCheck: {Feature_HasV7Bit, Feature_IsNotMClassBit},
12984 .Features: {ARM::FeatureHWDivThumb, ARM::FeatureHWDivARM}},
12985 {.Kind: ARM::AEK_MP,
12986 .ArchCheck: {Feature_HasV7Bit, Feature_IsNotMClassBit},
12987 .Features: {ARM::FeatureMP}},
12988 {.Kind: ARM::AEK_SIMD,
12989 .ArchCheck: {Feature_HasV8Bit},
12990 .Features: {ARM::FeatureNEON, ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}},
12991 {.Kind: ARM::AEK_SEC, .ArchCheck: {Feature_HasV6KBit}, .Features: {ARM::FeatureTrustZone}},
12992 // FIXME: Only available in A-class, isel not predicated
12993 {.Kind: ARM::AEK_VIRT, .ArchCheck: {Feature_HasV7Bit}, .Features: {ARM::FeatureVirtualization}},
12994 {.Kind: ARM::AEK_FP16,
12995 .ArchCheck: {Feature_HasV8_2aBit},
12996 .Features: {ARM::FeatureFPARMv8, ARM::FeatureFullFP16}},
12997 {.Kind: ARM::AEK_RAS, .ArchCheck: {Feature_HasV8Bit}, .Features: {ARM::FeatureRAS}},
12998 {.Kind: ARM::AEK_LOB, .ArchCheck: {Feature_HasV8_1MMainlineBit}, .Features: {ARM::FeatureLOB}},
12999 {.Kind: ARM::AEK_PACBTI, .ArchCheck: {Feature_HasV8_1MMainlineBit}, .Features: {ARM::FeaturePACBTI}},
13000 // FIXME: Unsupported extensions.
13001 {.Kind: ARM::AEK_OS, .ArchCheck: {}, .Features: {}},
13002 {.Kind: ARM::AEK_IWMMXT, .ArchCheck: {}, .Features: {}},
13003 {.Kind: ARM::AEK_IWMMXT2, .ArchCheck: {}, .Features: {}},
13004 {.Kind: ARM::AEK_MAVERICK, .ArchCheck: {}, .Features: {}},
13005 {.Kind: ARM::AEK_XSCALE, .ArchCheck: {}, .Features: {}},
13006 };
13007 bool EnableFeature = !Name.consume_front_insensitive(Prefix: "no");
13008 uint64_t FeatureKind = ARM::parseArchExt(ArchExt: Name);
13009 if (FeatureKind == ARM::AEK_INVALID)
13010 return Error(L: ExtLoc, Msg: "unknown architectural extension: " + Name);
13011
13012 for (const auto &Extension : Extensions) {
13013 if (Extension.Kind != FeatureKind)
13014 continue;
13015
13016 if (Extension.Features.none())
13017 return Error(L: ExtLoc, Msg: "unsupported architectural extension: " + Name);
13018
13019 if ((getAvailableFeatures() & Extension.ArchCheck) != Extension.ArchCheck)
13020 return Error(L: ExtLoc, Msg: "architectural extension '" + Name +
13021 "' is not "
13022 "allowed for the current base architecture");
13023
13024 MCSubtargetInfo &STI = copySTI();
13025 if (EnableFeature) {
13026 STI.SetFeatureBitsTransitively(Extension.Features);
13027 } else {
13028 STI.ClearFeatureBitsTransitively(FB: Extension.Features);
13029 }
13030 FeatureBitset Features = ComputeAvailableFeatures(FB: STI.getFeatureBits());
13031 setAvailableFeatures(Features);
13032 return true;
13033 }
13034 return false;
13035}
13036
13037/// parseDirectiveArchExtension
13038/// ::= .arch_extension [no]feature
13039bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) {
13040
13041 MCAsmParser &Parser = getParser();
13042
13043 if (getLexer().isNot(K: AsmToken::Identifier))
13044 return Error(L: getLexer().getLoc(), Msg: "expected architecture extension name");
13045
13046 StringRef Name = Parser.getTok().getString();
13047 SMLoc ExtLoc = Parser.getTok().getLoc();
13048 Lex();
13049
13050 if (parseEOL())
13051 return true;
13052
13053 if (Name == "nocrypto") {
13054 enableArchExtFeature(Name: "nosha2", ExtLoc);
13055 enableArchExtFeature(Name: "noaes", ExtLoc);
13056 }
13057
13058 if (enableArchExtFeature(Name, ExtLoc))
13059 return false;
13060
13061 return Error(L: ExtLoc, Msg: "unknown architectural extension: " + Name);
13062}
13063
13064// Define this matcher function after the auto-generated include so we
13065// have the match class enum definitions.
13066unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
13067 unsigned Kind) {
13068 ARMOperand &Op = static_cast<ARMOperand &>(AsmOp);
13069 // If the kind is a token for a literal immediate, check if our asm
13070 // operand matches. This is for InstAliases which have a fixed-value
13071 // immediate in the syntax.
13072 switch (Kind) {
13073 default: break;
13074 case MCK__HASH_0:
13075 if (Op.isImm())
13076 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Op.getImm()))
13077 if (CE->getValue() == 0)
13078 return Match_Success;
13079 break;
13080 case MCK__HASH_8:
13081 if (Op.isImm())
13082 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Op.getImm()))
13083 if (CE->getValue() == 8)
13084 return Match_Success;
13085 break;
13086 case MCK__HASH_16:
13087 if (Op.isImm())
13088 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Op.getImm()))
13089 if (CE->getValue() == 16)
13090 return Match_Success;
13091 break;
13092 case MCK_ModImm:
13093 if (Op.isImm()) {
13094 const MCExpr *SOExpr = Op.getImm();
13095 int64_t Value;
13096 if (!SOExpr->evaluateAsAbsolute(Res&: Value))
13097 return Match_Success;
13098 assert((Value >= std::numeric_limits<int32_t>::min() &&
13099 Value <= std::numeric_limits<uint32_t>::max()) &&
13100 "expression value must be representable in 32 bits");
13101 }
13102 break;
13103 case MCK_rGPR:
13104 if (hasV8Ops() && Op.isReg() && Op.getReg() == ARM::SP)
13105 return Match_Success;
13106 return Match_rGPR;
13107 }
13108 return Match_InvalidOperand;
13109}
13110
13111bool ARMAsmParser::isMnemonicVPTPredicable(StringRef Mnemonic,
13112 StringRef ExtraToken) {
13113 if (!hasMVE())
13114 return false;
13115
13116 if (MS.isVPTPredicableCDEInstr(Mnemonic) ||
13117 (Mnemonic.starts_with(Prefix: "vldrh") && Mnemonic != "vldrhi") ||
13118 (Mnemonic.starts_with(Prefix: "vmov") &&
13119 !(ExtraToken == ".f16" || ExtraToken == ".32" || ExtraToken == ".16" ||
13120 ExtraToken == ".8")) ||
13121 (Mnemonic.starts_with(Prefix: "vrint") && Mnemonic != "vrintr") ||
13122 (Mnemonic.starts_with(Prefix: "vstrh") && Mnemonic != "vstrhi"))
13123 return true;
13124
13125 const char *predicable_prefixes[] = {
13126 "vabav", "vabd", "vabs", "vadc", "vadd",
13127 "vaddlv", "vaddv", "vand", "vbic", "vbrsr",
13128 "vcadd", "vcls", "vclz", "vcmla", "vcmp",
13129 "vcmul", "vctp", "vcvt", "vddup", "vdup",
13130 "vdwdup", "veor", "vfma", "vfmas", "vfms",
13131 "vhadd", "vhcadd", "vhsub", "vidup", "viwdup",
13132 "vldrb", "vldrd", "vldrw", "vmax", "vmaxa",
13133 "vmaxav", "vmaxnm", "vmaxnma", "vmaxnmav", "vmaxnmv",
13134 "vmaxv", "vmin", "vminav", "vminnm", "vminnmav",
13135 "vminnmv", "vminv", "vmla", "vmladav", "vmlaldav",
13136 "vmlalv", "vmlas", "vmlav", "vmlsdav", "vmlsldav",
13137 "vmovlb", "vmovlt", "vmovnb", "vmovnt", "vmul",
13138 "vmvn", "vneg", "vorn", "vorr", "vpnot",
13139 "vpsel", "vqabs", "vqadd", "vqdmladh", "vqdmlah",
13140 "vqdmlash", "vqdmlsdh", "vqdmulh", "vqdmull", "vqmovn",
13141 "vqmovun", "vqneg", "vqrdmladh", "vqrdmlah", "vqrdmlash",
13142 "vqrdmlsdh", "vqrdmulh", "vqrshl", "vqrshrn", "vqrshrun",
13143 "vqshl", "vqshrn", "vqshrun", "vqsub", "vrev16",
13144 "vrev32", "vrev64", "vrhadd", "vrmlaldavh", "vrmlalvh",
13145 "vrmlsldavh", "vrmulh", "vrshl", "vrshr", "vrshrn",
13146 "vsbc", "vshl", "vshlc", "vshll", "vshr",
13147 "vshrn", "vsli", "vsri", "vstrb", "vstrd",
13148 "vstrw", "vsub"};
13149
13150 return any_of(Range&: predicable_prefixes, P: [&Mnemonic](const char *prefix) {
13151 return Mnemonic.starts_with(Prefix: prefix);
13152 });
13153}
13154
13155std::unique_ptr<ARMOperand> ARMAsmParser::defaultCondCodeOp() {
13156 return ARMOperand::CreateCondCode(CC: ARMCC::AL, S: SMLoc(), Parser&: *this);
13157}
13158
13159std::unique_ptr<ARMOperand> ARMAsmParser::defaultCCOutOp() {
13160 return ARMOperand::CreateCCOut(Reg: 0, S: SMLoc(), Parser&: *this);
13161}
13162
13163std::unique_ptr<ARMOperand> ARMAsmParser::defaultVPTPredOp() {
13164 return ARMOperand::CreateVPTPred(CC: ARMVCC::None, S: SMLoc(), Parser&: *this);
13165}
13166