1//===- X86Operand.h - Parsed X86 machine instruction ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H
10#define LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H
11
12#include "MCTargetDesc/X86IntelInstPrinter.h"
13#include "MCTargetDesc/X86MCTargetDesc.h"
14#include "X86AsmParserCommon.h"
15#include "llvm/ADT/STLExtras.h"
16#include "llvm/ADT/StringRef.h"
17#include "llvm/MC/MCExpr.h"
18#include "llvm/MC/MCInst.h"
19#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
20#include "llvm/MC/MCRegisterInfo.h"
21#include "llvm/MC/MCSymbol.h"
22#include "llvm/Support/Casting.h"
23#include "llvm/Support/SMLoc.h"
24#include <cassert>
25#include <memory>
26
27namespace llvm {
28
29/// X86Operand - Instances of this class represent a parsed X86 machine
30/// instruction.
31struct X86Operand final : public MCParsedAsmOperand {
32 enum KindTy { Token, Register, Immediate, Memory, Prefix, DXRegister } Kind;
33
34 SMLoc StartLoc, EndLoc;
35 SMLoc OffsetOfLoc;
36 StringRef SymName;
37 void *OpDecl;
38 bool AddressOf;
39
40 /// This used for inline asm which may specify base reg and index reg for
41 /// MemOp. e.g. ARR[eax + ecx*4], so no extra reg can be used for MemOp.
42 bool UseUpRegs = false;
43
44 struct TokOp {
45 const char *Data;
46 unsigned Length;
47 };
48
49 struct RegOp {
50 MCRegister RegNo;
51 };
52
53 struct PrefOp {
54 unsigned Prefixes;
55 };
56
57 struct ImmOp {
58 const MCExpr *Val;
59 bool LocalRef;
60 };
61
62 struct MemOp {
63 MCRegister SegReg;
64 const MCExpr *Disp;
65 MCRegister BaseReg;
66 MCRegister DefaultBaseReg;
67 MCRegister IndexReg;
68 unsigned Scale;
69 unsigned Size;
70 unsigned ModeSize;
71
72 /// If the memory operand is unsized and there are multiple instruction
73 /// matches, prefer the one with this size.
74 unsigned FrontendSize;
75
76 /// If false, then this operand must be a memory operand for an indirect
77 /// branch instruction. Otherwise, this operand may belong to either a
78 /// direct or indirect branch instruction.
79 bool MaybeDirectBranchDest;
80 };
81
82 union {
83 struct TokOp Tok;
84 struct RegOp Reg;
85 struct ImmOp Imm;
86 struct MemOp Mem;
87 struct PrefOp Pref;
88 };
89
90 X86Operand(KindTy K, SMLoc Start, SMLoc End)
91 : Kind(K), StartLoc(Start), EndLoc(End), OpDecl(nullptr),
92 AddressOf(false) {}
93
94 StringRef getSymName() override { return SymName; }
95 void *getOpDecl() override { return OpDecl; }
96
97 /// getStartLoc - Get the location of the first token of this operand.
98 SMLoc getStartLoc() const override { return StartLoc; }
99
100 /// getEndLoc - Get the location of the last token of this operand.
101 SMLoc getEndLoc() const override { return EndLoc; }
102
103 /// getLocRange - Get the range between the first and last token of this
104 /// operand.
105 SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
106
107 /// getOffsetOfLoc - Get the location of the offset operator.
108 SMLoc getOffsetOfLoc() const override { return OffsetOfLoc; }
109
110 void print(raw_ostream &OS, const MCAsmInfo &) const override {
111 auto PrintImmValue = [&](const MCExpr *Val, const char *VName) {
112 if (Val->getKind() == MCExpr::Constant) {
113 if (auto Imm = cast<MCConstantExpr>(Val)->getValue())
114 OS << VName << Imm;
115 } else if (Val->getKind() == MCExpr::SymbolRef) {
116 if (auto *SRE = dyn_cast<MCSymbolRefExpr>(Val)) {
117 const MCSymbol &Sym = SRE->getSymbol();
118 if (const char *SymNameStr = Sym.getName().data())
119 OS << VName << SymNameStr;
120 }
121 }
122 };
123
124 switch (Kind) {
125 case Token:
126 OS << Tok.Data;
127 break;
128 case Register:
129 OS << "Reg:" << X86IntelInstPrinter::getRegisterName(Reg: Reg.RegNo);
130 break;
131 case DXRegister:
132 OS << "DXReg";
133 break;
134 case Immediate:
135 PrintImmValue(Imm.Val, "Imm:");
136 break;
137 case Prefix:
138 OS << "Prefix:" << Pref.Prefixes;
139 break;
140 case Memory:
141 OS << "Memory: ModeSize=" << Mem.ModeSize;
142 if (Mem.Size)
143 OS << ",Size=" << Mem.Size;
144 if (Mem.BaseReg)
145 OS << ",BaseReg=" << X86IntelInstPrinter::getRegisterName(Reg: Mem.BaseReg);
146 if (Mem.IndexReg)
147 OS << ",IndexReg="
148 << X86IntelInstPrinter::getRegisterName(Reg: Mem.IndexReg);
149 if (Mem.Scale)
150 OS << ",Scale=" << Mem.Scale;
151 if (Mem.Disp)
152 PrintImmValue(Mem.Disp, ",Disp=");
153 if (Mem.SegReg)
154 OS << ",SegReg=" << X86IntelInstPrinter::getRegisterName(Reg: Mem.SegReg);
155 break;
156 }
157 }
158
159 StringRef getToken() const {
160 assert(Kind == Token && "Invalid access!");
161 return StringRef(Tok.Data, Tok.Length);
162 }
163 void setTokenValue(StringRef Value) {
164 assert(Kind == Token && "Invalid access!");
165 Tok.Data = Value.data();
166 Tok.Length = Value.size();
167 }
168
169 MCRegister getReg() const override {
170 assert(Kind == Register && "Invalid access!");
171 return Reg.RegNo;
172 }
173
174 unsigned getPrefix() const {
175 assert(Kind == Prefix && "Invalid access!");
176 return Pref.Prefixes;
177 }
178
179 const MCExpr *getImm() const {
180 assert(Kind == Immediate && "Invalid access!");
181 return Imm.Val;
182 }
183
184 const MCExpr *getMemDisp() const {
185 assert(Kind == Memory && "Invalid access!");
186 return Mem.Disp;
187 }
188 MCRegister getMemSegReg() const {
189 assert(Kind == Memory && "Invalid access!");
190 return Mem.SegReg;
191 }
192 MCRegister getMemBaseReg() const {
193 assert(Kind == Memory && "Invalid access!");
194 return Mem.BaseReg;
195 }
196 MCRegister getMemDefaultBaseReg() const {
197 assert(Kind == Memory && "Invalid access!");
198 return Mem.DefaultBaseReg;
199 }
200 MCRegister getMemIndexReg() const {
201 assert(Kind == Memory && "Invalid access!");
202 return Mem.IndexReg;
203 }
204 unsigned getMemScale() const {
205 assert(Kind == Memory && "Invalid access!");
206 return Mem.Scale;
207 }
208 unsigned getMemModeSize() const {
209 assert(Kind == Memory && "Invalid access!");
210 return Mem.ModeSize;
211 }
212 unsigned getMemFrontendSize() const {
213 assert(Kind == Memory && "Invalid access!");
214 return Mem.FrontendSize;
215 }
216 bool isMaybeDirectBranchDest() const {
217 assert(Kind == Memory && "Invalid access!");
218 return Mem.MaybeDirectBranchDest;
219 }
220
221 bool isToken() const override {return Kind == Token; }
222
223 bool isImm() const override { return Kind == Immediate; }
224
225 bool isImmSExti16i8() const {
226 if (!isImm())
227 return false;
228
229 // If this isn't a constant expr, just assume it fits and let relaxation
230 // handle it.
231 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
232 if (!CE)
233 return true;
234
235 // Otherwise, check the value is in a range that makes sense for this
236 // extension.
237 return isImmSExti16i8Value(Value: CE->getValue());
238 }
239 bool isImmSExti32i8() const {
240 if (!isImm())
241 return false;
242
243 // If this isn't a constant expr, just assume it fits and let relaxation
244 // handle it.
245 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
246 if (!CE)
247 return true;
248
249 // Otherwise, check the value is in a range that makes sense for this
250 // extension.
251 return isImmSExti32i8Value(Value: CE->getValue());
252 }
253 bool isImmSExti64i8() const {
254 if (!isImm())
255 return false;
256
257 // If this isn't a constant expr, just assume it fits and let relaxation
258 // handle it.
259 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
260 if (!CE)
261 return true;
262
263 // Otherwise, check the value is in a range that makes sense for this
264 // extension.
265 return isImmSExti64i8Value(Value: CE->getValue());
266 }
267 bool isImmSExti64i32() const {
268 if (!isImm())
269 return false;
270
271 // If this isn't a constant expr, just assume it fits and let relaxation
272 // handle it.
273 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
274 if (!CE)
275 return true;
276
277 // Otherwise, check the value is in a range that makes sense for this
278 // extension.
279 return isImmSExti64i32Value(Value: CE->getValue());
280 }
281
282 bool isImmUnsignedi4() const {
283 if (!isImm()) return false;
284 // If this isn't a constant expr, reject it. The immediate byte is shared
285 // with a register encoding. We can't have it affected by a relocation.
286 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
287 if (!CE) return false;
288 return isImmUnsignedi4Value(Value: CE->getValue());
289 }
290
291 bool isImmUnsignedi8() const {
292 if (!isImm()) return false;
293 // If this isn't a constant expr, just assume it fits and let relaxation
294 // handle it.
295 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
296 if (!CE) return true;
297 return isImmUnsignedi8Value(Value: CE->getValue());
298 }
299
300 bool isOffsetOfLocal() const override { return isImm() && Imm.LocalRef; }
301
302 bool needAddressOf() const override { return AddressOf; }
303
304 bool isMem() const override { return Kind == Memory; }
305 bool isMemUnsized() const {
306 return Kind == Memory && Mem.Size == 0;
307 }
308 bool isMem8() const {
309 return Kind == Memory && (!Mem.Size || Mem.Size == 8);
310 }
311 bool isMem16() const {
312 return Kind == Memory && (!Mem.Size || Mem.Size == 16);
313 }
314 bool isMem32() const {
315 return Kind == Memory && (!Mem.Size || Mem.Size == 32);
316 }
317 bool isMem64() const {
318 return Kind == Memory && (!Mem.Size || Mem.Size == 64);
319 }
320 bool isMem80() const {
321 return Kind == Memory && (!Mem.Size || Mem.Size == 80);
322 }
323 bool isMem128() const {
324 return Kind == Memory && (!Mem.Size || Mem.Size == 128);
325 }
326 bool isMem256() const {
327 return Kind == Memory && (!Mem.Size || Mem.Size == 256);
328 }
329 bool isMem512() const {
330 return Kind == Memory && (!Mem.Size || Mem.Size == 512);
331 }
332
333 bool isSibMem() const {
334 return isMem() && Mem.BaseReg != X86::RIP && Mem.BaseReg != X86::EIP;
335 }
336
337 bool isMemIndexReg(unsigned LowR, unsigned HighR) const {
338 assert(Kind == Memory && "Invalid access!");
339 return Mem.IndexReg >= LowR && Mem.IndexReg <= HighR;
340 }
341
342 bool isMem32_RC128() const {
343 return isMem32() && isMemIndexReg(LowR: X86::XMM0, HighR: X86::XMM15);
344 }
345 bool isMem64_RC128() const {
346 return isMem64() && isMemIndexReg(LowR: X86::XMM0, HighR: X86::XMM15);
347 }
348 bool isMem32_RC256() const {
349 return isMem32() && isMemIndexReg(LowR: X86::YMM0, HighR: X86::YMM15);
350 }
351 bool isMem64_RC256() const {
352 return isMem64() && isMemIndexReg(LowR: X86::YMM0, HighR: X86::YMM15);
353 }
354
355 bool isMem32_RC128X() const {
356 return isMem32() && X86II::isXMMReg(Reg: Mem.IndexReg);
357 }
358 bool isMem64_RC128X() const {
359 return isMem64() && X86II::isXMMReg(Reg: Mem.IndexReg);
360 }
361 bool isMem32_RC256X() const {
362 return isMem32() && X86II::isYMMReg(Reg: Mem.IndexReg);
363 }
364 bool isMem64_RC256X() const {
365 return isMem64() && X86II::isYMMReg(Reg: Mem.IndexReg);
366 }
367 bool isMem32_RC512() const {
368 return isMem32() && X86II::isZMMReg(Reg: Mem.IndexReg);
369 }
370 bool isMem64_RC512() const {
371 return isMem64() && X86II::isZMMReg(Reg: Mem.IndexReg);
372 }
373
374 bool isMem512_GR16() const {
375 if (!isMem512())
376 return false;
377 if (getMemBaseReg() &&
378 !X86MCRegisterClasses[X86::GR16RegClassID].contains(Reg: getMemBaseReg()))
379 return false;
380 return true;
381 }
382 bool isMem512_GR32() const {
383 if (!isMem512())
384 return false;
385 if (getMemBaseReg() &&
386 !X86MCRegisterClasses[X86::GR32RegClassID].contains(Reg: getMemBaseReg()) &&
387 getMemBaseReg() != X86::EIP)
388 return false;
389 if (getMemIndexReg() &&
390 !X86MCRegisterClasses[X86::GR32RegClassID].contains(Reg: getMemIndexReg()) &&
391 getMemIndexReg() != X86::EIZ)
392 return false;
393 return true;
394 }
395 bool isMem512_GR64() const {
396 if (!isMem512())
397 return false;
398 if (getMemBaseReg() &&
399 !X86MCRegisterClasses[X86::GR64RegClassID].contains(Reg: getMemBaseReg()) &&
400 getMemBaseReg() != X86::RIP)
401 return false;
402 if (getMemIndexReg() &&
403 !X86MCRegisterClasses[X86::GR64RegClassID].contains(Reg: getMemIndexReg()) &&
404 getMemIndexReg() != X86::RIZ)
405 return false;
406 return true;
407 }
408
409 bool isAbsMem() const {
410 return Kind == Memory && !getMemSegReg() && !getMemBaseReg() &&
411 !getMemIndexReg() && getMemScale() == 1 && isMaybeDirectBranchDest();
412 }
413
414 bool isAVX512RC() const{
415 return isImm();
416 }
417
418 bool isAbsMemMode16() const { return isAbsMem() && Mem.ModeSize == 16; }
419
420 bool isDispImm8() const {
421 if (auto *CE = dyn_cast<MCConstantExpr>(Val: getMemDisp()))
422 return isImmSExti64i8Value(Value: CE->getValue());
423 return true;
424 }
425
426 bool isAbsMem8() const { return isAbsMem() && isMem8() && isDispImm8(); }
427
428 bool isMemUseUpRegs() const override { return UseUpRegs; }
429
430 bool isSrcIdx() const {
431 return !getMemIndexReg() && getMemScale() == 1 &&
432 (getMemBaseReg() == X86::RSI || getMemBaseReg() == X86::ESI ||
433 getMemBaseReg() == X86::SI) && isa<MCConstantExpr>(Val: getMemDisp()) &&
434 cast<MCConstantExpr>(Val: getMemDisp())->getValue() == 0;
435 }
436 bool isSrcIdx8() const {
437 return isMem8() && isSrcIdx();
438 }
439 bool isSrcIdx16() const {
440 return isMem16() && isSrcIdx();
441 }
442 bool isSrcIdx32() const {
443 return isMem32() && isSrcIdx();
444 }
445 bool isSrcIdx64() const {
446 return isMem64() && isSrcIdx();
447 }
448
449 bool isDstIdx() const {
450 return !getMemIndexReg() && getMemScale() == 1 &&
451 (!getMemSegReg() || getMemSegReg() == X86::ES) &&
452 (getMemBaseReg() == X86::RDI || getMemBaseReg() == X86::EDI ||
453 getMemBaseReg() == X86::DI) &&
454 isa<MCConstantExpr>(Val: getMemDisp()) &&
455 cast<MCConstantExpr>(Val: getMemDisp())->getValue() == 0;
456 }
457 bool isDstIdx8() const {
458 return isMem8() && isDstIdx();
459 }
460 bool isDstIdx16() const {
461 return isMem16() && isDstIdx();
462 }
463 bool isDstIdx32() const {
464 return isMem32() && isDstIdx();
465 }
466 bool isDstIdx64() const {
467 return isMem64() && isDstIdx();
468 }
469
470 bool isMemOffs() const {
471 return Kind == Memory && !getMemBaseReg() && !getMemIndexReg() &&
472 getMemScale() == 1;
473 }
474
475 bool isMemOffs16_8() const {
476 return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 8);
477 }
478 bool isMemOffs16_16() const {
479 return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 16);
480 }
481 bool isMemOffs16_32() const {
482 return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 32);
483 }
484 bool isMemOffs32_8() const {
485 return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 8);
486 }
487 bool isMemOffs32_16() const {
488 return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 16);
489 }
490 bool isMemOffs32_32() const {
491 return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 32);
492 }
493 bool isMemOffs32_64() const {
494 return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 64);
495 }
496 bool isMemOffs64_8() const {
497 return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 8);
498 }
499 bool isMemOffs64_16() const {
500 return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 16);
501 }
502 bool isMemOffs64_32() const {
503 return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 32);
504 }
505 bool isMemOffs64_64() const {
506 return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 64);
507 }
508
509 bool isPrefix() const { return Kind == Prefix; }
510 bool isReg() const override { return Kind == Register; }
511 bool isDXReg() const { return Kind == DXRegister; }
512
513 bool isGR32orGR64() const {
514 return Kind == Register &&
515 (X86MCRegisterClasses[X86::GR32RegClassID].contains(Reg: getReg()) ||
516 X86MCRegisterClasses[X86::GR64RegClassID].contains(Reg: getReg()));
517 }
518
519 bool isGR16orGR32orGR64() const {
520 return Kind == Register &&
521 (X86MCRegisterClasses[X86::GR16RegClassID].contains(Reg: getReg()) ||
522 X86MCRegisterClasses[X86::GR32RegClassID].contains(Reg: getReg()) ||
523 X86MCRegisterClasses[X86::GR64RegClassID].contains(Reg: getReg()));
524 }
525
526 bool isVectorReg() const {
527 return Kind == Register &&
528 (X86MCRegisterClasses[X86::VR64RegClassID].contains(Reg: getReg()) ||
529 X86MCRegisterClasses[X86::VR128XRegClassID].contains(Reg: getReg()) ||
530 X86MCRegisterClasses[X86::VR256XRegClassID].contains(Reg: getReg()) ||
531 X86MCRegisterClasses[X86::VR512RegClassID].contains(Reg: getReg()));
532 }
533
534 bool isVK1Pair() const {
535 return Kind == Register &&
536 X86MCRegisterClasses[X86::VK1RegClassID].contains(Reg: getReg());
537 }
538
539 bool isVK2Pair() const {
540 return Kind == Register &&
541 X86MCRegisterClasses[X86::VK2RegClassID].contains(Reg: getReg());
542 }
543
544 bool isVK4Pair() const {
545 return Kind == Register &&
546 X86MCRegisterClasses[X86::VK4RegClassID].contains(Reg: getReg());
547 }
548
549 bool isVK8Pair() const {
550 return Kind == Register &&
551 X86MCRegisterClasses[X86::VK8RegClassID].contains(Reg: getReg());
552 }
553
554 bool isVK16Pair() const {
555 return Kind == Register &&
556 X86MCRegisterClasses[X86::VK16RegClassID].contains(Reg: getReg());
557 }
558
559 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
560 // Add as immediates when possible.
561 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Expr))
562 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue()));
563 else
564 Inst.addOperand(Op: MCOperand::createExpr(Val: Expr));
565 }
566
567 void addRegOperands(MCInst &Inst, unsigned N) const {
568 assert(N == 1 && "Invalid number of operands!");
569 Inst.addOperand(Op: MCOperand::createReg(Reg: getReg()));
570 }
571
572 void addGR32orGR64Operands(MCInst &Inst, unsigned N) const {
573 assert(N == 1 && "Invalid number of operands!");
574 MCRegister RegNo = getReg();
575 if (X86MCRegisterClasses[X86::GR64RegClassID].contains(Reg: RegNo))
576 RegNo = getX86SubSuperRegister(Reg: RegNo, Size: 32);
577 Inst.addOperand(Op: MCOperand::createReg(Reg: RegNo));
578 }
579
580 void addGR16orGR32orGR64Operands(MCInst &Inst, unsigned N) const {
581 assert(N == 1 && "Invalid number of operands!");
582 MCRegister RegNo = getReg();
583 if (X86MCRegisterClasses[X86::GR32RegClassID].contains(Reg: RegNo) ||
584 X86MCRegisterClasses[X86::GR64RegClassID].contains(Reg: RegNo))
585 RegNo = getX86SubSuperRegister(Reg: RegNo, Size: 16);
586 Inst.addOperand(Op: MCOperand::createReg(Reg: RegNo));
587 }
588
589 void addAVX512RCOperands(MCInst &Inst, unsigned N) const {
590 assert(N == 1 && "Invalid number of operands!");
591 addExpr(Inst, Expr: getImm());
592 }
593
594 void addImmOperands(MCInst &Inst, unsigned N) const {
595 assert(N == 1 && "Invalid number of operands!");
596 addExpr(Inst, Expr: getImm());
597 }
598
599 void addMaskPairOperands(MCInst &Inst, unsigned N) const {
600 assert(N == 1 && "Invalid number of operands!");
601 MCRegister Reg = getReg();
602 switch (Reg.id()) {
603 case X86::K0:
604 case X86::K1:
605 Reg = X86::K0_K1;
606 break;
607 case X86::K2:
608 case X86::K3:
609 Reg = X86::K2_K3;
610 break;
611 case X86::K4:
612 case X86::K5:
613 Reg = X86::K4_K5;
614 break;
615 case X86::K6:
616 case X86::K7:
617 Reg = X86::K6_K7;
618 break;
619 }
620 Inst.addOperand(Op: MCOperand::createReg(Reg));
621 }
622
623 bool isTILEPair() const {
624 return Kind == Register &&
625 X86MCRegisterClasses[X86::TILERegClassID].contains(Reg: getReg());
626 }
627
628 void addTILEPairOperands(MCInst &Inst, unsigned N) const {
629 assert(N == 1 && "Invalid number of operands!");
630 MCRegister Reg = getReg();
631 switch (Reg.id()) {
632 default:
633 llvm_unreachable("Invalid tile register!");
634 case X86::TMM0:
635 case X86::TMM1:
636 Reg = X86::TMM0_TMM1;
637 break;
638 case X86::TMM2:
639 case X86::TMM3:
640 Reg = X86::TMM2_TMM3;
641 break;
642 case X86::TMM4:
643 case X86::TMM5:
644 Reg = X86::TMM4_TMM5;
645 break;
646 case X86::TMM6:
647 case X86::TMM7:
648 Reg = X86::TMM6_TMM7;
649 break;
650 }
651 Inst.addOperand(Op: MCOperand::createReg(Reg));
652 }
653
654 void addMemOperands(MCInst &Inst, unsigned N) const {
655 assert((N == 5) && "Invalid number of operands!");
656 if (getMemBaseReg())
657 Inst.addOperand(Op: MCOperand::createReg(Reg: getMemBaseReg()));
658 else
659 Inst.addOperand(Op: MCOperand::createReg(Reg: getMemDefaultBaseReg()));
660 Inst.addOperand(Op: MCOperand::createImm(Val: getMemScale()));
661 Inst.addOperand(Op: MCOperand::createReg(Reg: getMemIndexReg()));
662 addExpr(Inst, Expr: getMemDisp());
663 Inst.addOperand(Op: MCOperand::createReg(Reg: getMemSegReg()));
664 }
665
666 void addAbsMemOperands(MCInst &Inst, unsigned N) const {
667 assert((N == 1) && "Invalid number of operands!");
668 // Add as immediates when possible.
669 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getMemDisp()))
670 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue()));
671 else
672 Inst.addOperand(Op: MCOperand::createExpr(Val: getMemDisp()));
673 }
674
675 void addSrcIdxOperands(MCInst &Inst, unsigned N) const {
676 assert((N == 2) && "Invalid number of operands!");
677 Inst.addOperand(Op: MCOperand::createReg(Reg: getMemBaseReg()));
678 Inst.addOperand(Op: MCOperand::createReg(Reg: getMemSegReg()));
679 }
680
681 void addDstIdxOperands(MCInst &Inst, unsigned N) const {
682 assert((N == 1) && "Invalid number of operands!");
683 Inst.addOperand(Op: MCOperand::createReg(Reg: getMemBaseReg()));
684 }
685
686 void addMemOffsOperands(MCInst &Inst, unsigned N) const {
687 assert((N == 2) && "Invalid number of operands!");
688 // Add as immediates when possible.
689 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getMemDisp()))
690 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue()));
691 else
692 Inst.addOperand(Op: MCOperand::createExpr(Val: getMemDisp()));
693 Inst.addOperand(Op: MCOperand::createReg(Reg: getMemSegReg()));
694 }
695
696 static std::unique_ptr<X86Operand> CreateToken(StringRef Str, SMLoc Loc) {
697 SMLoc EndLoc = SMLoc::getFromPointer(Ptr: Loc.getPointer() + Str.size());
698 auto Res = std::make_unique<X86Operand>(args: Token, args&: Loc, args&: EndLoc);
699 Res->Tok.Data = Str.data();
700 Res->Tok.Length = Str.size();
701 return Res;
702 }
703
704 static std::unique_ptr<X86Operand>
705 CreateReg(MCRegister Reg, SMLoc StartLoc, SMLoc EndLoc,
706 bool AddressOf = false, SMLoc OffsetOfLoc = SMLoc(),
707 StringRef SymName = StringRef(), void *OpDecl = nullptr) {
708 auto Res = std::make_unique<X86Operand>(args: Register, args&: StartLoc, args&: EndLoc);
709 Res->Reg.RegNo = Reg;
710 Res->AddressOf = AddressOf;
711 Res->OffsetOfLoc = OffsetOfLoc;
712 Res->SymName = SymName;
713 Res->OpDecl = OpDecl;
714 return Res;
715 }
716
717 static std::unique_ptr<X86Operand>
718 CreateDXReg(SMLoc StartLoc, SMLoc EndLoc) {
719 return std::make_unique<X86Operand>(args: DXRegister, args&: StartLoc, args&: EndLoc);
720 }
721
722 static std::unique_ptr<X86Operand>
723 CreatePrefix(unsigned Prefixes, SMLoc StartLoc, SMLoc EndLoc) {
724 auto Res = std::make_unique<X86Operand>(args: Prefix, args&: StartLoc, args&: EndLoc);
725 Res->Pref.Prefixes = Prefixes;
726 return Res;
727 }
728
729 static std::unique_ptr<X86Operand> CreateImm(const MCExpr *Val,
730 SMLoc StartLoc, SMLoc EndLoc,
731 StringRef SymName = StringRef(),
732 void *OpDecl = nullptr,
733 bool GlobalRef = true) {
734 auto Res = std::make_unique<X86Operand>(args: Immediate, args&: StartLoc, args&: EndLoc);
735 Res->Imm.Val = Val;
736 Res->Imm.LocalRef = !GlobalRef;
737 Res->SymName = SymName;
738 Res->OpDecl = OpDecl;
739 Res->AddressOf = true;
740 return Res;
741 }
742
743 /// Create an absolute memory operand.
744 static std::unique_ptr<X86Operand>
745 CreateMem(unsigned ModeSize, const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc,
746 unsigned Size = 0, StringRef SymName = StringRef(),
747 void *OpDecl = nullptr, unsigned FrontendSize = 0,
748 bool UseUpRegs = false, bool MaybeDirectBranchDest = true) {
749 auto Res = std::make_unique<X86Operand>(args: Memory, args&: StartLoc, args&: EndLoc);
750 Res->Mem.SegReg = MCRegister();
751 Res->Mem.Disp = Disp;
752 Res->Mem.BaseReg = MCRegister();
753 Res->Mem.DefaultBaseReg = MCRegister();
754 Res->Mem.IndexReg = MCRegister();
755 Res->Mem.Scale = 1;
756 Res->Mem.Size = Size;
757 Res->Mem.ModeSize = ModeSize;
758 Res->Mem.FrontendSize = FrontendSize;
759 Res->Mem.MaybeDirectBranchDest = MaybeDirectBranchDest;
760 Res->UseUpRegs = UseUpRegs;
761 Res->SymName = SymName;
762 Res->OpDecl = OpDecl;
763 Res->AddressOf = false;
764 return Res;
765 }
766
767 /// Create a generalized memory operand.
768 static std::unique_ptr<X86Operand>
769 CreateMem(unsigned ModeSize, MCRegister SegReg, const MCExpr *Disp,
770 MCRegister BaseReg, MCRegister IndexReg, unsigned Scale,
771 SMLoc StartLoc, SMLoc EndLoc, unsigned Size = 0,
772 MCRegister DefaultBaseReg = MCRegister(),
773 StringRef SymName = StringRef(), void *OpDecl = nullptr,
774 unsigned FrontendSize = 0, bool UseUpRegs = false,
775 bool MaybeDirectBranchDest = true) {
776 // We should never just have a displacement, that should be parsed as an
777 // absolute memory operand.
778 assert((SegReg || BaseReg || IndexReg || DefaultBaseReg) &&
779 "Invalid memory operand!");
780
781 // The scale should always be one of {1,2,4,8}.
782 assert(((Scale == 1 || Scale == 2 || Scale == 4 || Scale == 8)) &&
783 "Invalid scale!");
784 auto Res = std::make_unique<X86Operand>(args: Memory, args&: StartLoc, args&: EndLoc);
785 Res->Mem.SegReg = SegReg;
786 Res->Mem.Disp = Disp;
787 Res->Mem.BaseReg = BaseReg;
788 Res->Mem.DefaultBaseReg = DefaultBaseReg;
789 Res->Mem.IndexReg = IndexReg;
790 Res->Mem.Scale = Scale;
791 Res->Mem.Size = Size;
792 Res->Mem.ModeSize = ModeSize;
793 Res->Mem.FrontendSize = FrontendSize;
794 Res->Mem.MaybeDirectBranchDest = MaybeDirectBranchDest;
795 Res->UseUpRegs = UseUpRegs;
796 Res->SymName = SymName;
797 Res->OpDecl = OpDecl;
798 Res->AddressOf = false;
799 return Res;
800 }
801};
802
803} // end namespace llvm
804
805#endif // LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H
806