1//===- X86Operand.h - Parsed X86 machine instruction ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H
10#define LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H
11
12#include "MCTargetDesc/X86IntelInstPrinter.h"
13#include "MCTargetDesc/X86MCTargetDesc.h"
14#include "X86AsmParserCommon.h"
15#include "llvm/ADT/STLExtras.h"
16#include "llvm/ADT/StringRef.h"
17#include "llvm/MC/MCExpr.h"
18#include "llvm/MC/MCInst.h"
19#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
20#include "llvm/MC/MCRegisterInfo.h"
21#include "llvm/MC/MCSymbol.h"
22#include "llvm/Support/Casting.h"
23#include "llvm/Support/SMLoc.h"
24#include <cassert>
25#include <memory>
26
27namespace llvm {
28
29/// X86Operand - Instances of this class represent a parsed X86 machine
30/// instruction.
31struct X86Operand final : public MCParsedAsmOperand {
32 enum KindTy { Token, Register, Immediate, Memory, Prefix, DXRegister } Kind;
33
34 SMLoc StartLoc, EndLoc;
35 SMLoc OffsetOfLoc;
36 StringRef SymName;
37 void *OpDecl;
38 bool AddressOf;
39
40 /// This used for inline asm which may specify base reg and index reg for
41 /// MemOp. e.g. ARR[eax + ecx*4], so no extra reg can be used for MemOp.
42 bool UseUpRegs = false;
43
44 struct TokOp {
45 const char *Data;
46 unsigned Length;
47 };
48
49 struct RegOp {
50 MCRegister RegNo;
51 };
52
53 struct PrefOp {
54 unsigned Prefixes;
55 };
56
57 struct ImmOp {
58 const MCExpr *Val;
59 bool LocalRef;
60 };
61
62 struct MemOp {
63 MCRegister SegReg;
64 const MCExpr *Disp;
65 MCRegister BaseReg;
66 MCRegister DefaultBaseReg;
67 MCRegister IndexReg;
68 unsigned Scale;
69 unsigned Size;
70 unsigned ModeSize;
71
72 /// If the memory operand is unsized and there are multiple instruction
73 /// matches, prefer the one with this size.
74 unsigned FrontendSize;
75
76 /// If false, then this operand must be a memory operand for an indirect
77 /// branch instruction. Otherwise, this operand may belong to either a
78 /// direct or indirect branch instruction.
79 bool MaybeDirectBranchDest;
80 };
81
82 union {
83 struct TokOp Tok;
84 struct RegOp Reg;
85 struct ImmOp Imm;
86 struct MemOp Mem;
87 struct PrefOp Pref;
88 };
89
90 X86Operand(KindTy K, SMLoc Start, SMLoc End)
91 : Kind(K), StartLoc(Start), EndLoc(End), OpDecl(nullptr),
92 AddressOf(false) {}
93
94 StringRef getSymName() override { return SymName; }
95 void *getOpDecl() override { return OpDecl; }
96
97 /// getStartLoc - Get the location of the first token of this operand.
98 SMLoc getStartLoc() const override { return StartLoc; }
99
100 /// getEndLoc - Get the location of the last token of this operand.
101 SMLoc getEndLoc() const override { return EndLoc; }
102
103 /// getLocRange - Get the range between the first and last token of this
104 /// operand.
105 SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
106
107 /// getOffsetOfLoc - Get the location of the offset operator.
108 SMLoc getOffsetOfLoc() const override { return OffsetOfLoc; }
109
110 void print(raw_ostream &OS, const MCAsmInfo &) const override {
111 auto PrintImmValue = [&](const MCExpr *Val, const char *VName) {
112 if (Val->getKind() == MCExpr::Constant) {
113 if (auto Imm = cast<MCConstantExpr>(Val)->getValue())
114 OS << VName << Imm;
115 } else if (Val->getKind() == MCExpr::SymbolRef) {
116 if (auto *SRE = dyn_cast<MCSymbolRefExpr>(Val)) {
117 const MCSymbol &Sym = SRE->getSymbol();
118 if (const char *SymNameStr = Sym.getName().data())
119 OS << VName << SymNameStr;
120 }
121 }
122 };
123
124 switch (Kind) {
125 case Token:
126 OS << Tok.Data;
127 break;
128 case Register:
129 OS << "Reg:" << X86IntelInstPrinter::getRegisterName(Reg: Reg.RegNo);
130 break;
131 case DXRegister:
132 OS << "DXReg";
133 break;
134 case Immediate:
135 PrintImmValue(Imm.Val, "Imm:");
136 break;
137 case Prefix:
138 OS << "Prefix:" << Pref.Prefixes;
139 break;
140 case Memory:
141 OS << "Memory: ModeSize=" << Mem.ModeSize;
142 if (Mem.Size)
143 OS << ",Size=" << Mem.Size;
144 if (Mem.BaseReg)
145 OS << ",BaseReg=" << X86IntelInstPrinter::getRegisterName(Reg: Mem.BaseReg);
146 if (Mem.IndexReg)
147 OS << ",IndexReg="
148 << X86IntelInstPrinter::getRegisterName(Reg: Mem.IndexReg);
149 if (Mem.Scale)
150 OS << ",Scale=" << Mem.Scale;
151 if (Mem.Disp)
152 PrintImmValue(Mem.Disp, ",Disp=");
153 if (Mem.SegReg)
154 OS << ",SegReg=" << X86IntelInstPrinter::getRegisterName(Reg: Mem.SegReg);
155 break;
156 }
157 }
158
159 StringRef getToken() const {
160 assert(Kind == Token && "Invalid access!");
161 return StringRef(Tok.Data, Tok.Length);
162 }
163 void setTokenValue(StringRef Value) {
164 assert(Kind == Token && "Invalid access!");
165 Tok.Data = Value.data();
166 Tok.Length = Value.size();
167 }
168
169 MCRegister getReg() const override {
170 assert(Kind == Register && "Invalid access!");
171 return Reg.RegNo;
172 }
173
174 unsigned getPrefix() const {
175 assert(Kind == Prefix && "Invalid access!");
176 return Pref.Prefixes;
177 }
178
179 const MCExpr *getImm() const {
180 assert(Kind == Immediate && "Invalid access!");
181 return Imm.Val;
182 }
183
184 const MCExpr *getMemDisp() const {
185 assert(Kind == Memory && "Invalid access!");
186 return Mem.Disp;
187 }
188 MCRegister getMemSegReg() const {
189 assert(Kind == Memory && "Invalid access!");
190 return Mem.SegReg;
191 }
192 MCRegister getMemBaseReg() const {
193 assert(Kind == Memory && "Invalid access!");
194 return Mem.BaseReg;
195 }
196 MCRegister getMemDefaultBaseReg() const {
197 assert(Kind == Memory && "Invalid access!");
198 return Mem.DefaultBaseReg;
199 }
200 MCRegister getMemIndexReg() const {
201 assert(Kind == Memory && "Invalid access!");
202 return Mem.IndexReg;
203 }
204 unsigned getMemScale() const {
205 assert(Kind == Memory && "Invalid access!");
206 return Mem.Scale;
207 }
208 unsigned getMemModeSize() const {
209 assert(Kind == Memory && "Invalid access!");
210 return Mem.ModeSize;
211 }
212 unsigned getMemFrontendSize() const {
213 assert(Kind == Memory && "Invalid access!");
214 return Mem.FrontendSize;
215 }
216 bool isMaybeDirectBranchDest() const {
217 assert(Kind == Memory && "Invalid access!");
218 return Mem.MaybeDirectBranchDest;
219 }
220
221 bool isToken() const override {return Kind == Token; }
222
223 bool isImm() const override { return Kind == Immediate; }
224
225 bool isImmSExti16i8() const {
226 if (!isImm())
227 return false;
228
229 // If this isn't a constant expr, just assume it fits and let relaxation
230 // handle it.
231 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
232 if (!CE)
233 return true;
234
235 // Otherwise, check the value is in a range that makes sense for this
236 // extension.
237 return isImmSExti16i8Value(Value: CE->getValue());
238 }
239 bool isImmSExti32i8() const {
240 if (!isImm())
241 return false;
242
243 // If this isn't a constant expr, just assume it fits and let relaxation
244 // handle it.
245 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
246 if (!CE)
247 return true;
248
249 // Otherwise, check the value is in a range that makes sense for this
250 // extension.
251 return isImmSExti32i8Value(Value: CE->getValue());
252 }
253 bool isImmSExti64i8() const {
254 if (!isImm())
255 return false;
256
257 // If this isn't a constant expr, just assume it fits and let relaxation
258 // handle it.
259 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
260 if (!CE)
261 return true;
262
263 // Otherwise, check the value is in a range that makes sense for this
264 // extension.
265 return isImmSExti64i8Value(Value: CE->getValue());
266 }
267 bool isImmSExti64i32() const {
268 if (!isImm())
269 return false;
270
271 // If this isn't a constant expr, just assume it fits and let relaxation
272 // handle it.
273 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
274 if (!CE)
275 return true;
276
277 // Otherwise, check the value is in a range that makes sense for this
278 // extension.
279 return isImmSExti64i32Value(Value: CE->getValue());
280 }
281
282 bool isImmUnsignedi4() const {
283 if (!isImm()) return false;
284 // If this isn't a constant expr, reject it. The immediate byte is shared
285 // with a register encoding. We can't have it affected by a relocation.
286 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
287 if (!CE) return false;
288 return isImmUnsignedi4Value(Value: CE->getValue());
289 }
290
291 bool isImmUnsignedi6() const {
292 if (!isImm()) return false;
293 // If this isn't a constant expr, reject it. The immediate byte is shared
294 // with a register encoding. We can't have it affected by a relocation.
295 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
296 if (!CE) return false;
297 return isImmUnsignedi6Value(Value: CE->getValue());
298 }
299
300 bool isImmUnsignedi8() const {
301 if (!isImm()) return false;
302 // If this isn't a constant expr, just assume it fits and let relaxation
303 // handle it.
304 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getImm());
305 if (!CE) return true;
306 return isImmUnsignedi8Value(Value: CE->getValue());
307 }
308
309 bool isOffsetOfLocal() const override { return isImm() && Imm.LocalRef; }
310
311 bool needAddressOf() const override { return AddressOf; }
312
313 bool isMem() const override { return Kind == Memory; }
314 bool isMemUnsized() const {
315 return Kind == Memory && Mem.Size == 0;
316 }
317 bool isMem8() const {
318 return Kind == Memory && (!Mem.Size || Mem.Size == 8);
319 }
320 bool isMem16() const {
321 return Kind == Memory && (!Mem.Size || Mem.Size == 16);
322 }
323 bool isMem32() const {
324 return Kind == Memory && (!Mem.Size || Mem.Size == 32);
325 }
326 bool isMem64() const {
327 return Kind == Memory && (!Mem.Size || Mem.Size == 64);
328 }
329 bool isMem80() const {
330 return Kind == Memory && (!Mem.Size || Mem.Size == 80);
331 }
332 bool isMem128() const {
333 return Kind == Memory && (!Mem.Size || Mem.Size == 128);
334 }
335 bool isMem256() const {
336 return Kind == Memory && (!Mem.Size || Mem.Size == 256);
337 }
338 bool isMem512() const {
339 return Kind == Memory && (!Mem.Size || Mem.Size == 512);
340 }
341
342 bool isSibMem() const {
343 return isMem() && Mem.BaseReg != X86::RIP && Mem.BaseReg != X86::EIP;
344 }
345
346 bool isMemIndexReg(unsigned LowR, unsigned HighR) const {
347 assert(Kind == Memory && "Invalid access!");
348 return Mem.IndexReg >= LowR && Mem.IndexReg <= HighR;
349 }
350
351 bool isMem32_RC128() const {
352 return isMem32() && isMemIndexReg(LowR: X86::XMM0, HighR: X86::XMM15);
353 }
354 bool isMem64_RC128() const {
355 return isMem64() && isMemIndexReg(LowR: X86::XMM0, HighR: X86::XMM15);
356 }
357 bool isMem32_RC256() const {
358 return isMem32() && isMemIndexReg(LowR: X86::YMM0, HighR: X86::YMM15);
359 }
360 bool isMem64_RC256() const {
361 return isMem64() && isMemIndexReg(LowR: X86::YMM0, HighR: X86::YMM15);
362 }
363
364 bool isMem32_RC128X() const {
365 return isMem32() && X86II::isXMMReg(Reg: Mem.IndexReg);
366 }
367 bool isMem64_RC128X() const {
368 return isMem64() && X86II::isXMMReg(Reg: Mem.IndexReg);
369 }
370 bool isMem32_RC256X() const {
371 return isMem32() && X86II::isYMMReg(Reg: Mem.IndexReg);
372 }
373 bool isMem64_RC256X() const {
374 return isMem64() && X86II::isYMMReg(Reg: Mem.IndexReg);
375 }
376 bool isMem32_RC512() const {
377 return isMem32() && X86II::isZMMReg(Reg: Mem.IndexReg);
378 }
379 bool isMem64_RC512() const {
380 return isMem64() && X86II::isZMMReg(Reg: Mem.IndexReg);
381 }
382
383 bool isMem512_GR16() const {
384 if (!isMem512())
385 return false;
386 if (getMemBaseReg() &&
387 !X86MCRegisterClasses[X86::GR16RegClassID].contains(Reg: getMemBaseReg()))
388 return false;
389 return true;
390 }
391 bool isMem512_GR32() const {
392 if (!isMem512())
393 return false;
394 if (getMemBaseReg() &&
395 !X86MCRegisterClasses[X86::GR32RegClassID].contains(Reg: getMemBaseReg()) &&
396 getMemBaseReg() != X86::EIP)
397 return false;
398 if (getMemIndexReg() &&
399 !X86MCRegisterClasses[X86::GR32RegClassID].contains(Reg: getMemIndexReg()) &&
400 getMemIndexReg() != X86::EIZ)
401 return false;
402 return true;
403 }
404 bool isMem512_GR64() const {
405 if (!isMem512())
406 return false;
407 if (getMemBaseReg() &&
408 !X86MCRegisterClasses[X86::GR64RegClassID].contains(Reg: getMemBaseReg()) &&
409 getMemBaseReg() != X86::RIP)
410 return false;
411 if (getMemIndexReg() &&
412 !X86MCRegisterClasses[X86::GR64RegClassID].contains(Reg: getMemIndexReg()) &&
413 getMemIndexReg() != X86::RIZ)
414 return false;
415 return true;
416 }
417
418 bool isAbsMem() const {
419 return Kind == Memory && !getMemSegReg() && !getMemBaseReg() &&
420 !getMemIndexReg() && getMemScale() == 1 && isMaybeDirectBranchDest();
421 }
422
423 bool isAVX512RC() const{
424 return isImm();
425 }
426
427 bool isAbsMemMode16() const { return isAbsMem() && Mem.ModeSize == 16; }
428
429 bool isDispImm8() const {
430 if (auto *CE = dyn_cast<MCConstantExpr>(Val: getMemDisp()))
431 return isImmSExti64i8Value(Value: CE->getValue());
432 return true;
433 }
434
435 bool isAbsMem8() const { return isAbsMem() && isMem8() && isDispImm8(); }
436
437 bool isMemUseUpRegs() const override { return UseUpRegs; }
438
439 bool isSrcIdx() const {
440 return !getMemIndexReg() && getMemScale() == 1 &&
441 (getMemBaseReg() == X86::RSI || getMemBaseReg() == X86::ESI ||
442 getMemBaseReg() == X86::SI) && isa<MCConstantExpr>(Val: getMemDisp()) &&
443 cast<MCConstantExpr>(Val: getMemDisp())->getValue() == 0;
444 }
445 bool isSrcIdx8() const {
446 return isMem8() && isSrcIdx();
447 }
448 bool isSrcIdx16() const {
449 return isMem16() && isSrcIdx();
450 }
451 bool isSrcIdx32() const {
452 return isMem32() && isSrcIdx();
453 }
454 bool isSrcIdx64() const {
455 return isMem64() && isSrcIdx();
456 }
457
458 bool isDstIdx() const {
459 return !getMemIndexReg() && getMemScale() == 1 &&
460 (!getMemSegReg() || getMemSegReg() == X86::ES) &&
461 (getMemBaseReg() == X86::RDI || getMemBaseReg() == X86::EDI ||
462 getMemBaseReg() == X86::DI) &&
463 isa<MCConstantExpr>(Val: getMemDisp()) &&
464 cast<MCConstantExpr>(Val: getMemDisp())->getValue() == 0;
465 }
466 bool isDstIdx8() const {
467 return isMem8() && isDstIdx();
468 }
469 bool isDstIdx16() const {
470 return isMem16() && isDstIdx();
471 }
472 bool isDstIdx32() const {
473 return isMem32() && isDstIdx();
474 }
475 bool isDstIdx64() const {
476 return isMem64() && isDstIdx();
477 }
478
479 bool isMemOffs() const {
480 return Kind == Memory && !getMemBaseReg() && !getMemIndexReg() &&
481 getMemScale() == 1;
482 }
483
484 bool isMemOffs16_8() const {
485 return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 8);
486 }
487 bool isMemOffs16_16() const {
488 return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 16);
489 }
490 bool isMemOffs16_32() const {
491 return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 32);
492 }
493 bool isMemOffs32_8() const {
494 return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 8);
495 }
496 bool isMemOffs32_16() const {
497 return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 16);
498 }
499 bool isMemOffs32_32() const {
500 return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 32);
501 }
502 bool isMemOffs32_64() const {
503 return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 64);
504 }
505 bool isMemOffs64_8() const {
506 return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 8);
507 }
508 bool isMemOffs64_16() const {
509 return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 16);
510 }
511 bool isMemOffs64_32() const {
512 return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 32);
513 }
514 bool isMemOffs64_64() const {
515 return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 64);
516 }
517
518 // Returns true only for a moffset that requires *more than* 32 bits.
519 bool isMemConstOffs64() const {
520 if (!isMemOffs() || Mem.ModeSize != 64)
521 return false;
522
523 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getMemDisp());
524 if (!CE)
525 return false;
526
527 return !isInt<32>(x: CE->getValue());
528 }
529
530 bool isPrefix() const { return Kind == Prefix; }
531 bool isReg() const override { return Kind == Register; }
532 bool isDXReg() const { return Kind == DXRegister; }
533
534 bool isGR32orGR64() const {
535 return Kind == Register &&
536 (X86MCRegisterClasses[X86::GR32RegClassID].contains(Reg: getReg()) ||
537 X86MCRegisterClasses[X86::GR64RegClassID].contains(Reg: getReg()));
538 }
539
540 bool isGR16orGR32orGR64() const {
541 return Kind == Register &&
542 (X86MCRegisterClasses[X86::GR16RegClassID].contains(Reg: getReg()) ||
543 X86MCRegisterClasses[X86::GR32RegClassID].contains(Reg: getReg()) ||
544 X86MCRegisterClasses[X86::GR64RegClassID].contains(Reg: getReg()));
545 }
546
547 bool isVectorReg() const {
548 return Kind == Register &&
549 (X86MCRegisterClasses[X86::VR64RegClassID].contains(Reg: getReg()) ||
550 X86MCRegisterClasses[X86::VR128XRegClassID].contains(Reg: getReg()) ||
551 X86MCRegisterClasses[X86::VR256XRegClassID].contains(Reg: getReg()) ||
552 X86MCRegisterClasses[X86::VR512RegClassID].contains(Reg: getReg()));
553 }
554
555 bool isVK1Pair() const {
556 return Kind == Register &&
557 X86MCRegisterClasses[X86::VK1RegClassID].contains(Reg: getReg());
558 }
559
560 bool isVK2Pair() const {
561 return Kind == Register &&
562 X86MCRegisterClasses[X86::VK2RegClassID].contains(Reg: getReg());
563 }
564
565 bool isVK4Pair() const {
566 return Kind == Register &&
567 X86MCRegisterClasses[X86::VK4RegClassID].contains(Reg: getReg());
568 }
569
570 bool isVK8Pair() const {
571 return Kind == Register &&
572 X86MCRegisterClasses[X86::VK8RegClassID].contains(Reg: getReg());
573 }
574
575 bool isVK16Pair() const {
576 return Kind == Register &&
577 X86MCRegisterClasses[X86::VK16RegClassID].contains(Reg: getReg());
578 }
579
580 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
581 // Add as immediates when possible.
582 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: Expr))
583 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue()));
584 else
585 Inst.addOperand(Op: MCOperand::createExpr(Val: Expr));
586 }
587
588 void addRegOperands(MCInst &Inst, unsigned N) const {
589 assert(N == 1 && "Invalid number of operands!");
590 Inst.addOperand(Op: MCOperand::createReg(Reg: getReg()));
591 }
592
593 void addGR32orGR64Operands(MCInst &Inst, unsigned N) const {
594 assert(N == 1 && "Invalid number of operands!");
595 MCRegister RegNo = getReg();
596 if (X86MCRegisterClasses[X86::GR64RegClassID].contains(Reg: RegNo))
597 RegNo = getX86SubSuperRegister(Reg: RegNo, Size: 32);
598 Inst.addOperand(Op: MCOperand::createReg(Reg: RegNo));
599 }
600
601 void addGR16orGR32orGR64Operands(MCInst &Inst, unsigned N) const {
602 assert(N == 1 && "Invalid number of operands!");
603 MCRegister RegNo = getReg();
604 if (X86MCRegisterClasses[X86::GR32RegClassID].contains(Reg: RegNo) ||
605 X86MCRegisterClasses[X86::GR64RegClassID].contains(Reg: RegNo))
606 RegNo = getX86SubSuperRegister(Reg: RegNo, Size: 16);
607 Inst.addOperand(Op: MCOperand::createReg(Reg: RegNo));
608 }
609
610 void addAVX512RCOperands(MCInst &Inst, unsigned N) const {
611 assert(N == 1 && "Invalid number of operands!");
612 addExpr(Inst, Expr: getImm());
613 }
614
615 void addImmOperands(MCInst &Inst, unsigned N) const {
616 assert(N == 1 && "Invalid number of operands!");
617 addExpr(Inst, Expr: getImm());
618 }
619
620 void addMaskPairOperands(MCInst &Inst, unsigned N) const {
621 assert(N == 1 && "Invalid number of operands!");
622 MCRegister Reg = getReg();
623 switch (Reg.id()) {
624 case X86::K0:
625 case X86::K1:
626 Reg = X86::K0_K1;
627 break;
628 case X86::K2:
629 case X86::K3:
630 Reg = X86::K2_K3;
631 break;
632 case X86::K4:
633 case X86::K5:
634 Reg = X86::K4_K5;
635 break;
636 case X86::K6:
637 case X86::K7:
638 Reg = X86::K6_K7;
639 break;
640 }
641 Inst.addOperand(Op: MCOperand::createReg(Reg));
642 }
643
644 void addMemOperands(MCInst &Inst, unsigned N) const {
645 assert((N == 5) && "Invalid number of operands!");
646 if (getMemBaseReg())
647 Inst.addOperand(Op: MCOperand::createReg(Reg: getMemBaseReg()));
648 else
649 Inst.addOperand(Op: MCOperand::createReg(Reg: getMemDefaultBaseReg()));
650 Inst.addOperand(Op: MCOperand::createImm(Val: getMemScale()));
651 Inst.addOperand(Op: MCOperand::createReg(Reg: getMemIndexReg()));
652 addExpr(Inst, Expr: getMemDisp());
653 Inst.addOperand(Op: MCOperand::createReg(Reg: getMemSegReg()));
654 }
655
656 void addAbsMemOperands(MCInst &Inst, unsigned N) const {
657 assert((N == 1) && "Invalid number of operands!");
658 // Add as immediates when possible.
659 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getMemDisp()))
660 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue()));
661 else
662 Inst.addOperand(Op: MCOperand::createExpr(Val: getMemDisp()));
663 }
664
665 void addSrcIdxOperands(MCInst &Inst, unsigned N) const {
666 assert((N == 2) && "Invalid number of operands!");
667 Inst.addOperand(Op: MCOperand::createReg(Reg: getMemBaseReg()));
668 Inst.addOperand(Op: MCOperand::createReg(Reg: getMemSegReg()));
669 }
670
671 void addDstIdxOperands(MCInst &Inst, unsigned N) const {
672 assert((N == 1) && "Invalid number of operands!");
673 Inst.addOperand(Op: MCOperand::createReg(Reg: getMemBaseReg()));
674 }
675
676 void addMemOffsOperands(MCInst &Inst, unsigned N) const {
677 assert((N == 2) && "Invalid number of operands!");
678 // Add as immediates when possible.
679 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Val: getMemDisp()))
680 Inst.addOperand(Op: MCOperand::createImm(Val: CE->getValue()));
681 else
682 Inst.addOperand(Op: MCOperand::createExpr(Val: getMemDisp()));
683 Inst.addOperand(Op: MCOperand::createReg(Reg: getMemSegReg()));
684 }
685
686 static std::unique_ptr<X86Operand> CreateToken(StringRef Str, SMLoc Loc) {
687 SMLoc EndLoc = SMLoc::getFromPointer(Ptr: Loc.getPointer() + Str.size());
688 auto Res = std::make_unique<X86Operand>(args: Token, args&: Loc, args&: EndLoc);
689 Res->Tok.Data = Str.data();
690 Res->Tok.Length = Str.size();
691 return Res;
692 }
693
694 static std::unique_ptr<X86Operand>
695 CreateReg(MCRegister Reg, SMLoc StartLoc, SMLoc EndLoc,
696 bool AddressOf = false, SMLoc OffsetOfLoc = SMLoc(),
697 StringRef SymName = StringRef(), void *OpDecl = nullptr) {
698 auto Res = std::make_unique<X86Operand>(args: Register, args&: StartLoc, args&: EndLoc);
699 Res->Reg.RegNo = Reg;
700 Res->AddressOf = AddressOf;
701 Res->OffsetOfLoc = OffsetOfLoc;
702 Res->SymName = SymName;
703 Res->OpDecl = OpDecl;
704 return Res;
705 }
706
707 static std::unique_ptr<X86Operand>
708 CreateDXReg(SMLoc StartLoc, SMLoc EndLoc) {
709 return std::make_unique<X86Operand>(args: DXRegister, args&: StartLoc, args&: EndLoc);
710 }
711
712 static std::unique_ptr<X86Operand>
713 CreatePrefix(unsigned Prefixes, SMLoc StartLoc, SMLoc EndLoc) {
714 auto Res = std::make_unique<X86Operand>(args: Prefix, args&: StartLoc, args&: EndLoc);
715 Res->Pref.Prefixes = Prefixes;
716 return Res;
717 }
718
719 static std::unique_ptr<X86Operand> CreateImm(const MCExpr *Val,
720 SMLoc StartLoc, SMLoc EndLoc,
721 StringRef SymName = StringRef(),
722 void *OpDecl = nullptr,
723 bool GlobalRef = true) {
724 auto Res = std::make_unique<X86Operand>(args: Immediate, args&: StartLoc, args&: EndLoc);
725 Res->Imm.Val = Val;
726 Res->Imm.LocalRef = !GlobalRef;
727 Res->SymName = SymName;
728 Res->OpDecl = OpDecl;
729 Res->AddressOf = true;
730 return Res;
731 }
732
733 /// Create an absolute memory operand.
734 static std::unique_ptr<X86Operand>
735 CreateMem(unsigned ModeSize, const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc,
736 unsigned Size = 0, StringRef SymName = StringRef(),
737 void *OpDecl = nullptr, unsigned FrontendSize = 0,
738 bool UseUpRegs = false, bool MaybeDirectBranchDest = true) {
739 auto Res = std::make_unique<X86Operand>(args: Memory, args&: StartLoc, args&: EndLoc);
740 Res->Mem.SegReg = MCRegister();
741 Res->Mem.Disp = Disp;
742 Res->Mem.BaseReg = MCRegister();
743 Res->Mem.DefaultBaseReg = MCRegister();
744 Res->Mem.IndexReg = MCRegister();
745 Res->Mem.Scale = 1;
746 Res->Mem.Size = Size;
747 Res->Mem.ModeSize = ModeSize;
748 Res->Mem.FrontendSize = FrontendSize;
749 Res->Mem.MaybeDirectBranchDest = MaybeDirectBranchDest;
750 Res->UseUpRegs = UseUpRegs;
751 Res->SymName = SymName;
752 Res->OpDecl = OpDecl;
753 Res->AddressOf = false;
754 return Res;
755 }
756
757 /// Create a generalized memory operand.
758 static std::unique_ptr<X86Operand>
759 CreateMem(unsigned ModeSize, MCRegister SegReg, const MCExpr *Disp,
760 MCRegister BaseReg, MCRegister IndexReg, unsigned Scale,
761 SMLoc StartLoc, SMLoc EndLoc, unsigned Size = 0,
762 MCRegister DefaultBaseReg = MCRegister(),
763 StringRef SymName = StringRef(), void *OpDecl = nullptr,
764 unsigned FrontendSize = 0, bool UseUpRegs = false,
765 bool MaybeDirectBranchDest = true) {
766 // We should never just have a displacement, that should be parsed as an
767 // absolute memory operand.
768 assert((SegReg || BaseReg || IndexReg || DefaultBaseReg) &&
769 "Invalid memory operand!");
770
771 // The scale should always be one of {1,2,4,8}.
772 assert(((Scale == 1 || Scale == 2 || Scale == 4 || Scale == 8)) &&
773 "Invalid scale!");
774 auto Res = std::make_unique<X86Operand>(args: Memory, args&: StartLoc, args&: EndLoc);
775 Res->Mem.SegReg = SegReg;
776 Res->Mem.Disp = Disp;
777 Res->Mem.BaseReg = BaseReg;
778 Res->Mem.DefaultBaseReg = DefaultBaseReg;
779 Res->Mem.IndexReg = IndexReg;
780 Res->Mem.Scale = Scale;
781 Res->Mem.Size = Size;
782 Res->Mem.ModeSize = ModeSize;
783 Res->Mem.FrontendSize = FrontendSize;
784 Res->Mem.MaybeDirectBranchDest = MaybeDirectBranchDest;
785 Res->UseUpRegs = UseUpRegs;
786 Res->SymName = SymName;
787 Res->OpDecl = OpDecl;
788 Res->AddressOf = false;
789 return Res;
790 }
791};
792
793} // end namespace llvm
794
795#endif // LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H
796