1//===-- X86MCCodeEmitter.cpp - Convert X86 code to machine code -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the X86MCCodeEmitter class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "MCTargetDesc/X86BaseInfo.h"
14#include "MCTargetDesc/X86FixupKinds.h"
15#include "MCTargetDesc/X86MCTargetDesc.h"
16#include "llvm/ADT/SmallVector.h"
17#include "llvm/MC/MCCodeEmitter.h"
18#include "llvm/MC/MCContext.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCFixup.h"
21#include "llvm/MC/MCInst.h"
22#include "llvm/MC/MCInstrDesc.h"
23#include "llvm/MC/MCInstrInfo.h"
24#include "llvm/MC/MCRegisterInfo.h"
25#include "llvm/MC/MCSubtargetInfo.h"
26#include "llvm/MC/MCSymbol.h"
27#include "llvm/Support/Casting.h"
28#include "llvm/Support/ErrorHandling.h"
29#include <cassert>
30#include <cstdint>
31#include <cstdlib>
32
33using namespace llvm;
34
35#define DEBUG_TYPE "mccodeemitter"
36
37namespace {
38
39enum PrefixKind { None, REX, REX2, XOP, VEX2, VEX3, EVEX };
40
41static void emitByte(uint8_t C, SmallVectorImpl<char> &CB) { CB.push_back(Elt: C); }
42
43class X86OpcodePrefixHelper {
44 // REX (1 byte)
45 // +-----+ +------+
46 // | 40H | | WRXB |
47 // +-----+ +------+
48
49 // REX2 (2 bytes)
50 // +-----+ +-------------------+
51 // | D5H | | M | R'X'B' | WRXB |
52 // +-----+ +-------------------+
53
54 // XOP (3-byte)
55 // +-----+ +--------------+ +-------------------+
56 // | 8Fh | | RXB | m-mmmm | | W | vvvv | L | pp |
57 // +-----+ +--------------+ +-------------------+
58
59 // VEX2 (2 bytes)
60 // +-----+ +-------------------+
61 // | C5h | | R | vvvv | L | pp |
62 // +-----+ +-------------------+
63
64 // VEX3 (3 bytes)
65 // +-----+ +--------------+ +-------------------+
66 // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp |
67 // +-----+ +--------------+ +-------------------+
68
69 // VEX_R: opcode externsion equivalent to REX.R in
70 // 1's complement (inverted) form
71 //
72 // 1: Same as REX_R=0 (must be 1 in 32-bit mode)
73 // 0: Same as REX_R=1 (64 bit mode only)
74
75 // VEX_X: equivalent to REX.X, only used when a
76 // register is used for index in SIB Byte.
77 //
78 // 1: Same as REX.X=0 (must be 1 in 32-bit mode)
79 // 0: Same as REX.X=1 (64-bit mode only)
80
81 // VEX_B:
82 // 1: Same as REX_B=0 (ignored in 32-bit mode)
83 // 0: Same as REX_B=1 (64 bit mode only)
84
85 // VEX_W: opcode specific (use like REX.W, or used for
86 // opcode extension, or ignored, depending on the opcode byte)
87
88 // VEX_5M (VEX m-mmmmm field):
89 //
90 // 0b00000: Reserved for future use
91 // 0b00001: implied 0F leading opcode
92 // 0b00010: implied 0F 38 leading opcode bytes
93 // 0b00011: implied 0F 3A leading opcode bytes
94 // 0b00100: Reserved for future use
95 // 0b00101: VEX MAP5
96 // 0b00110: VEX MAP6
97 // 0b00111: VEX MAP7
98 // 0b00111-0b11111: Reserved for future use
99 // 0b01000: XOP map select - 08h instructions with imm byte
100 // 0b01001: XOP map select - 09h instructions with no imm byte
101 // 0b01010: XOP map select - 0Ah instructions with imm dword
102
103 // VEX_4V (VEX vvvv field): a register specifier
104 // (in 1's complement form) or 1111 if unused.
105
106 // VEX_PP: opcode extension providing equivalent
107 // functionality of a SIMD prefix
108 // 0b00: None
109 // 0b01: 66
110 // 0b10: F3
111 // 0b11: F2
112
113 // EVEX (4 bytes)
114 // +-----+ +---------------+ +--------------------+ +------------------------+
115 // | 62h | | RXBR' | B'mmm | | W | vvvv | X' | pp | | z | L'L | b | v' | aaa |
116 // +-----+ +---------------+ +--------------------+ +------------------------+
117
118 // EVEX_L2/VEX_L (Vector Length):
119 // L2 L
120 // 0 0: scalar or 128-bit vector
121 // 0 1: 256-bit vector
122 // 1 0: 512-bit vector
123
124 // 32-Register Support in 64-bit Mode Using EVEX with Embedded REX/REX2 Bits:
125 //
126 // +----------+---------+--------+-----------+---------+--------------+
127 // | | 4 | 3 | [2:0] | Type | Common Usage |
128 // +----------+---------+--------+-----------+---------+--------------+
129 // | REG | EVEX_R' | EVEX_R | modrm.reg | GPR, VR | Dest or Src |
130 // | VVVV | EVEX_v' | EVEX.vvvv | GPR, VR | Dest or Src |
131 // | RM (VR) | EVEX_X | EVEX_B | modrm.r/m | VR | Dest or Src |
132 // | RM (GPR) | EVEX_B' | EVEX_B | modrm.r/m | GPR | Dest or Src |
133 // | BASE | EVEX_B' | EVEX_B | modrm.r/m | GPR | MA |
134 // | INDEX | EVEX_X' | EVEX_X | sib.index | GPR | MA |
135 // | VIDX | EVEX_v' | EVEX_X | sib.index | VR | VSIB MA |
136 // +----------+---------+--------+-----------+---------+--------------+
137 //
138 // * GPR - General-purpose register
139 // * VR - Vector register
140 // * VIDX - Vector index
141 // * VSIB - Vector SIB
142 // * MA - Memory addressing
143
144private:
145 unsigned W : 1;
146 unsigned R : 1;
147 unsigned X : 1;
148 unsigned B : 1;
149 unsigned M : 1;
150 unsigned R2 : 1;
151 unsigned X2 : 1;
152 unsigned B2 : 1;
153 unsigned VEX_4V : 4;
154 unsigned VEX_L : 1;
155 unsigned VEX_PP : 2;
156 unsigned VEX_5M : 5;
157 unsigned EVEX_z : 1;
158 unsigned EVEX_L2 : 1;
159 unsigned EVEX_b : 1;
160 unsigned EVEX_V2 : 1;
161 unsigned EVEX_aaa : 3;
162 PrefixKind Kind = None;
163 const MCRegisterInfo &MRI;
164
165 unsigned getRegEncoding(const MCInst &MI, unsigned OpNum) const {
166 return MRI.getEncodingValue(RegNo: MI.getOperand(i: OpNum).getReg());
167 }
168
169 void setR(unsigned Encoding) { R = Encoding >> 3 & 1; }
170 void setR2(unsigned Encoding) {
171 R2 = Encoding >> 4 & 1;
172 assert((!R2 || (Kind <= REX2 || Kind == EVEX)) && "invalid setting");
173 }
174 void setX(unsigned Encoding) { X = Encoding >> 3 & 1; }
175 void setX2(unsigned Encoding) {
176 assert((Kind <= REX2 || Kind == EVEX) && "invalid setting");
177 X2 = Encoding >> 4 & 1;
178 }
179 void setB(unsigned Encoding) { B = Encoding >> 3 & 1; }
180 void setB2(unsigned Encoding) {
181 assert((Kind <= REX2 || Kind == EVEX) && "invalid setting");
182 B2 = Encoding >> 4 & 1;
183 }
184 void set4V(unsigned Encoding) { VEX_4V = Encoding & 0xf; }
185 void setV2(unsigned Encoding) { EVEX_V2 = Encoding >> 4 & 1; }
186
187public:
188 void setW(bool V) { W = V; }
189 void setR(const MCInst &MI, unsigned OpNum) {
190 setR(getRegEncoding(MI, OpNum));
191 }
192 void setX(const MCInst &MI, unsigned OpNum, unsigned Shift = 3) {
193 unsigned Reg = MI.getOperand(i: OpNum).getReg();
194 // X is used to extend vector register only when shift is not 3.
195 if (Shift != 3 && X86II::isApxExtendedReg(RegNo: Reg))
196 return;
197 unsigned Encoding = MRI.getEncodingValue(RegNo: Reg);
198 X = Encoding >> Shift & 1;
199 }
200 void setB(const MCInst &MI, unsigned OpNum) {
201 B = getRegEncoding(MI, OpNum) >> 3 & 1;
202 }
203 void set4V(const MCInst &MI, unsigned OpNum, bool IsImm = false) {
204 // OF, SF, ZF and CF reuse VEX_4V bits but are not reversed
205 if (IsImm)
206 set4V(~(MI.getOperand(i: OpNum).getImm()));
207 else
208 set4V(getRegEncoding(MI, OpNum));
209 }
210 void setL(bool V) { VEX_L = V; }
211 void setPP(unsigned V) { VEX_PP = V; }
212 void set5M(unsigned V) { VEX_5M = V; }
213 void setR2(const MCInst &MI, unsigned OpNum) {
214 setR2(getRegEncoding(MI, OpNum));
215 }
216 void setRR2(const MCInst &MI, unsigned OpNum) {
217 unsigned Encoding = getRegEncoding(MI, OpNum);
218 setR(Encoding);
219 setR2(Encoding);
220 }
221 void setM(bool V) { M = V; }
222 void setXX2(const MCInst &MI, unsigned OpNum) {
223 unsigned Reg = MI.getOperand(i: OpNum).getReg();
224 unsigned Encoding = MRI.getEncodingValue(RegNo: Reg);
225 setX(Encoding);
226 // Index can be a vector register while X2 is used to extend GPR only.
227 if (Kind <= REX2 || X86II::isApxExtendedReg(RegNo: Reg))
228 setX2(Encoding);
229 }
230 void setBB2(const MCInst &MI, unsigned OpNum) {
231 unsigned Reg = MI.getOperand(i: OpNum).getReg();
232 unsigned Encoding = MRI.getEncodingValue(RegNo: Reg);
233 setB(Encoding);
234 // Base can be a vector register while B2 is used to extend GPR only
235 if (Kind <= REX2 || X86II::isApxExtendedReg(RegNo: Reg))
236 setB2(Encoding);
237 }
238 void setZ(bool V) { EVEX_z = V; }
239 void setL2(bool V) { EVEX_L2 = V; }
240 void setEVEX_b(bool V) { EVEX_b = V; }
241 void setV2(const MCInst &MI, unsigned OpNum, bool HasVEX_4V) {
242 // Only needed with VSIB which don't use VVVV.
243 if (HasVEX_4V)
244 return;
245 unsigned Reg = MI.getOperand(i: OpNum).getReg();
246 if (X86II::isApxExtendedReg(RegNo: Reg))
247 return;
248 setV2(MRI.getEncodingValue(RegNo: Reg));
249 }
250 void set4VV2(const MCInst &MI, unsigned OpNum) {
251 unsigned Encoding = getRegEncoding(MI, OpNum);
252 set4V(Encoding);
253 setV2(Encoding);
254 }
255 void setAAA(const MCInst &MI, unsigned OpNum) {
256 EVEX_aaa = getRegEncoding(MI, OpNum);
257 }
258 void setNF(bool V) { EVEX_aaa |= V << 2; }
259 void setSC(const MCInst &MI, unsigned OpNum) {
260 unsigned Encoding = MI.getOperand(i: OpNum).getImm();
261 EVEX_V2 = ~(Encoding >> 3) & 0x1;
262 EVEX_aaa = Encoding & 0x7;
263 }
264
265 X86OpcodePrefixHelper(const MCRegisterInfo &MRI)
266 : W(0), R(0), X(0), B(0), M(0), R2(0), X2(0), B2(0), VEX_4V(0), VEX_L(0),
267 VEX_PP(0), VEX_5M(0), EVEX_z(0), EVEX_L2(0), EVEX_b(0), EVEX_V2(0),
268 EVEX_aaa(0), MRI(MRI) {}
269
270 void setLowerBound(PrefixKind K) { Kind = K; }
271
272 PrefixKind determineOptimalKind() {
273 switch (Kind) {
274 case None:
275 // Not M bit here by intention b/c
276 // 1. No guarantee that REX2 is supported by arch w/o explict EGPR
277 // 2. REX2 is longer than 0FH
278 Kind = (R2 | X2 | B2) ? REX2 : (W | R | X | B) ? REX : None;
279 break;
280 case REX:
281 Kind = (R2 | X2 | B2) ? REX2 : REX;
282 break;
283 case REX2:
284 case XOP:
285 case VEX3:
286 case EVEX:
287 break;
288 case VEX2:
289 Kind = (W | X | B | (VEX_5M != 1)) ? VEX3 : VEX2;
290 break;
291 }
292 return Kind;
293 }
294
295 void emit(SmallVectorImpl<char> &CB) const {
296 uint8_t FirstPayload =
297 ((~R) & 0x1) << 7 | ((~X) & 0x1) << 6 | ((~B) & 0x1) << 5;
298 uint8_t LastPayload = ((~VEX_4V) & 0xf) << 3 | VEX_L << 2 | VEX_PP;
299 switch (Kind) {
300 case None:
301 return;
302 case REX:
303 emitByte(C: 0x40 | W << 3 | R << 2 | X << 1 | B, CB);
304 return;
305 case REX2:
306 emitByte(C: 0xD5, CB);
307 emitByte(C: M << 7 | R2 << 6 | X2 << 5 | B2 << 4 | W << 3 | R << 2 | X << 1 |
308 B,
309 CB);
310 return;
311 case VEX2:
312 emitByte(C: 0xC5, CB);
313 emitByte(C: ((~R) & 1) << 7 | LastPayload, CB);
314 return;
315 case VEX3:
316 case XOP:
317 emitByte(C: Kind == VEX3 ? 0xC4 : 0x8F, CB);
318 emitByte(C: FirstPayload | VEX_5M, CB);
319 emitByte(C: W << 7 | LastPayload, CB);
320 return;
321 case EVEX:
322 assert(VEX_5M && !(VEX_5M & 0x8) && "invalid mmm fields for EVEX!");
323 emitByte(C: 0x62, CB);
324 emitByte(C: FirstPayload | ((~R2) & 0x1) << 4 | B2 << 3 | VEX_5M, CB);
325 emitByte(C: W << 7 | ((~VEX_4V) & 0xf) << 3 | ((~X2) & 0x1) << 2 | VEX_PP,
326 CB);
327 emitByte(C: EVEX_z << 7 | EVEX_L2 << 6 | VEX_L << 5 | EVEX_b << 4 |
328 ((~EVEX_V2) & 0x1) << 3 | EVEX_aaa,
329 CB);
330 return;
331 }
332 }
333};
334
335class X86MCCodeEmitter : public MCCodeEmitter {
336 const MCInstrInfo &MCII;
337 MCContext &Ctx;
338
339public:
340 X86MCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx)
341 : MCII(mcii), Ctx(ctx) {}
342 X86MCCodeEmitter(const X86MCCodeEmitter &) = delete;
343 X86MCCodeEmitter &operator=(const X86MCCodeEmitter &) = delete;
344 ~X86MCCodeEmitter() override = default;
345
346 void emitPrefix(const MCInst &MI, SmallVectorImpl<char> &CB,
347 const MCSubtargetInfo &STI) const;
348
349 void encodeInstruction(const MCInst &MI, SmallVectorImpl<char> &CB,
350 SmallVectorImpl<MCFixup> &Fixups,
351 const MCSubtargetInfo &STI) const override;
352
353private:
354 unsigned getX86RegNum(const MCOperand &MO) const;
355
356 unsigned getX86RegEncoding(const MCInst &MI, unsigned OpNum) const;
357
358 void emitImmediate(const MCOperand &Disp, SMLoc Loc, unsigned ImmSize,
359 MCFixupKind FixupKind, uint64_t StartByte,
360 SmallVectorImpl<char> &CB,
361 SmallVectorImpl<MCFixup> &Fixups, int ImmOffset = 0) const;
362
363 void emitRegModRMByte(const MCOperand &ModRMReg, unsigned RegOpcodeFld,
364 SmallVectorImpl<char> &CB) const;
365
366 void emitSIBByte(unsigned SS, unsigned Index, unsigned Base,
367 SmallVectorImpl<char> &CB) const;
368
369 void emitMemModRMByte(const MCInst &MI, unsigned Op, unsigned RegOpcodeField,
370 uint64_t TSFlags, PrefixKind Kind, uint64_t StartByte,
371 SmallVectorImpl<char> &CB,
372 SmallVectorImpl<MCFixup> &Fixups,
373 const MCSubtargetInfo &STI,
374 bool ForceSIB = false) const;
375
376 PrefixKind emitPrefixImpl(unsigned &CurOp, const MCInst &MI,
377 const MCSubtargetInfo &STI,
378 SmallVectorImpl<char> &CB) const;
379
380 PrefixKind emitVEXOpcodePrefix(int MemOperand, const MCInst &MI,
381 const MCSubtargetInfo &STI,
382 SmallVectorImpl<char> &CB) const;
383
384 void emitSegmentOverridePrefix(unsigned SegOperand, const MCInst &MI,
385 SmallVectorImpl<char> &CB) const;
386
387 PrefixKind emitOpcodePrefix(int MemOperand, const MCInst &MI,
388 const MCSubtargetInfo &STI,
389 SmallVectorImpl<char> &CB) const;
390
391 PrefixKind emitREXPrefix(int MemOperand, const MCInst &MI,
392 const MCSubtargetInfo &STI,
393 SmallVectorImpl<char> &CB) const;
394};
395
396} // end anonymous namespace
397
398static uint8_t modRMByte(unsigned Mod, unsigned RegOpcode, unsigned RM) {
399 assert(Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!");
400 return RM | (RegOpcode << 3) | (Mod << 6);
401}
402
403static void emitConstant(uint64_t Val, unsigned Size,
404 SmallVectorImpl<char> &CB) {
405 // Output the constant in little endian byte order.
406 for (unsigned i = 0; i != Size; ++i) {
407 emitByte(C: Val & 255, CB);
408 Val >>= 8;
409 }
410}
411
412/// Determine if this immediate can fit in a disp8 or a compressed disp8 for
413/// EVEX instructions. \p will be set to the value to pass to the ImmOffset
414/// parameter of emitImmediate.
415static bool isDispOrCDisp8(uint64_t TSFlags, int Value, int &ImmOffset) {
416 bool HasEVEX = (TSFlags & X86II::EncodingMask) == X86II::EVEX;
417
418 unsigned CD8_Scale =
419 (TSFlags & X86II::CD8_Scale_Mask) >> X86II::CD8_Scale_Shift;
420 CD8_Scale = CD8_Scale ? 1U << (CD8_Scale - 1) : 0U;
421 if (!HasEVEX || !CD8_Scale)
422 return isInt<8>(x: Value);
423
424 assert(isPowerOf2_32(CD8_Scale) && "Unexpected CD8 scale!");
425 if (Value & (CD8_Scale - 1)) // Unaligned offset
426 return false;
427
428 int CDisp8 = Value / static_cast<int>(CD8_Scale);
429 if (!isInt<8>(x: CDisp8))
430 return false;
431
432 // ImmOffset will be added to Value in emitImmediate leaving just CDisp8.
433 ImmOffset = CDisp8 - Value;
434 return true;
435}
436
437/// \returns the appropriate fixup kind to use for an immediate in an
438/// instruction with the specified TSFlags.
439static MCFixupKind getImmFixupKind(uint64_t TSFlags) {
440 unsigned Size = X86II::getSizeOfImm(TSFlags);
441 bool isPCRel = X86II::isImmPCRel(TSFlags);
442
443 if (X86II::isImmSigned(TSFlags)) {
444 switch (Size) {
445 default:
446 llvm_unreachable("Unsupported signed fixup size!");
447 case 4:
448 return MCFixupKind(X86::reloc_signed_4byte);
449 }
450 }
451 return MCFixup::getKindForSize(Size, IsPCRel: isPCRel);
452}
453
454enum GlobalOffsetTableExprKind { GOT_None, GOT_Normal, GOT_SymDiff };
455
456/// Check if this expression starts with _GLOBAL_OFFSET_TABLE_ and if it is
457/// of the form _GLOBAL_OFFSET_TABLE_-symbol. This is needed to support PIC on
458/// ELF i386 as _GLOBAL_OFFSET_TABLE_ is magical. We check only simple case that
459/// are know to be used: _GLOBAL_OFFSET_TABLE_ by itself or at the start of a
460/// binary expression.
461static GlobalOffsetTableExprKind
462startsWithGlobalOffsetTable(const MCExpr *Expr) {
463 const MCExpr *RHS = nullptr;
464 if (Expr->getKind() == MCExpr::Binary) {
465 const MCBinaryExpr *BE = static_cast<const MCBinaryExpr *>(Expr);
466 Expr = BE->getLHS();
467 RHS = BE->getRHS();
468 }
469
470 if (Expr->getKind() != MCExpr::SymbolRef)
471 return GOT_None;
472
473 const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr *>(Expr);
474 const MCSymbol &S = Ref->getSymbol();
475 if (S.getName() != "_GLOBAL_OFFSET_TABLE_")
476 return GOT_None;
477 if (RHS && RHS->getKind() == MCExpr::SymbolRef)
478 return GOT_SymDiff;
479 return GOT_Normal;
480}
481
482static bool hasSecRelSymbolRef(const MCExpr *Expr) {
483 if (Expr->getKind() == MCExpr::SymbolRef) {
484 const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr *>(Expr);
485 return Ref->getKind() == MCSymbolRefExpr::VK_SECREL;
486 }
487 return false;
488}
489
490static bool isPCRel32Branch(const MCInst &MI, const MCInstrInfo &MCII) {
491 unsigned Opcode = MI.getOpcode();
492 const MCInstrDesc &Desc = MCII.get(Opcode);
493 if ((Opcode != X86::CALL64pcrel32 && Opcode != X86::JMP_4 &&
494 Opcode != X86::JCC_4) ||
495 getImmFixupKind(TSFlags: Desc.TSFlags) != FK_PCRel_4)
496 return false;
497
498 unsigned CurOp = X86II::getOperandBias(Desc);
499 const MCOperand &Op = MI.getOperand(i: CurOp);
500 if (!Op.isExpr())
501 return false;
502
503 const MCSymbolRefExpr *Ref = dyn_cast<MCSymbolRefExpr>(Val: Op.getExpr());
504 return Ref && Ref->getKind() == MCSymbolRefExpr::VK_None;
505}
506
507unsigned X86MCCodeEmitter::getX86RegNum(const MCOperand &MO) const {
508 return Ctx.getRegisterInfo()->getEncodingValue(RegNo: MO.getReg()) & 0x7;
509}
510
511unsigned X86MCCodeEmitter::getX86RegEncoding(const MCInst &MI,
512 unsigned OpNum) const {
513 return Ctx.getRegisterInfo()->getEncodingValue(RegNo: MI.getOperand(i: OpNum).getReg());
514}
515
516void X86MCCodeEmitter::emitImmediate(const MCOperand &DispOp, SMLoc Loc,
517 unsigned Size, MCFixupKind FixupKind,
518 uint64_t StartByte,
519 SmallVectorImpl<char> &CB,
520 SmallVectorImpl<MCFixup> &Fixups,
521 int ImmOffset) const {
522 const MCExpr *Expr = nullptr;
523 if (DispOp.isImm()) {
524 // If this is a simple integer displacement that doesn't require a
525 // relocation, emit it now.
526 if (FixupKind != FK_PCRel_1 && FixupKind != FK_PCRel_2 &&
527 FixupKind != FK_PCRel_4) {
528 emitConstant(Val: DispOp.getImm() + ImmOffset, Size, CB);
529 return;
530 }
531 Expr = MCConstantExpr::create(Value: DispOp.getImm(), Ctx);
532 } else {
533 Expr = DispOp.getExpr();
534 }
535
536 // If we have an immoffset, add it to the expression.
537 if ((FixupKind == FK_Data_4 || FixupKind == FK_Data_8 ||
538 FixupKind == MCFixupKind(X86::reloc_signed_4byte))) {
539 GlobalOffsetTableExprKind Kind = startsWithGlobalOffsetTable(Expr);
540 if (Kind != GOT_None) {
541 assert(ImmOffset == 0);
542
543 if (Size == 8) {
544 FixupKind = MCFixupKind(X86::reloc_global_offset_table8);
545 } else {
546 assert(Size == 4);
547 FixupKind = MCFixupKind(X86::reloc_global_offset_table);
548 }
549
550 if (Kind == GOT_Normal)
551 ImmOffset = static_cast<int>(CB.size() - StartByte);
552 } else if (Expr->getKind() == MCExpr::SymbolRef) {
553 if (hasSecRelSymbolRef(Expr)) {
554 FixupKind = MCFixupKind(FK_SecRel_4);
555 }
556 } else if (Expr->getKind() == MCExpr::Binary) {
557 const MCBinaryExpr *Bin = static_cast<const MCBinaryExpr *>(Expr);
558 if (hasSecRelSymbolRef(Expr: Bin->getLHS()) ||
559 hasSecRelSymbolRef(Expr: Bin->getRHS())) {
560 FixupKind = MCFixupKind(FK_SecRel_4);
561 }
562 }
563 }
564
565 // If the fixup is pc-relative, we need to bias the value to be relative to
566 // the start of the field, not the end of the field.
567 if (FixupKind == FK_PCRel_4 ||
568 FixupKind == MCFixupKind(X86::reloc_riprel_4byte) ||
569 FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load) ||
570 FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax) ||
571 FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax_rex) ||
572 FixupKind == MCFixupKind(X86::reloc_branch_4byte_pcrel)) {
573 ImmOffset -= 4;
574 // If this is a pc-relative load off _GLOBAL_OFFSET_TABLE_:
575 // leaq _GLOBAL_OFFSET_TABLE_(%rip), %r15
576 // this needs to be a GOTPC32 relocation.
577 if (startsWithGlobalOffsetTable(Expr) != GOT_None)
578 FixupKind = MCFixupKind(X86::reloc_global_offset_table);
579 }
580 if (FixupKind == FK_PCRel_2)
581 ImmOffset -= 2;
582 if (FixupKind == FK_PCRel_1)
583 ImmOffset -= 1;
584
585 if (ImmOffset)
586 Expr = MCBinaryExpr::createAdd(LHS: Expr, RHS: MCConstantExpr::create(Value: ImmOffset, Ctx),
587 Ctx);
588
589 // Emit a symbolic constant as a fixup and 4 zeros.
590 Fixups.push_back(Elt: MCFixup::create(Offset: static_cast<uint32_t>(CB.size() - StartByte),
591 Value: Expr, Kind: FixupKind, Loc));
592 emitConstant(Val: 0, Size, CB);
593}
594
595void X86MCCodeEmitter::emitRegModRMByte(const MCOperand &ModRMReg,
596 unsigned RegOpcodeFld,
597 SmallVectorImpl<char> &CB) const {
598 emitByte(C: modRMByte(Mod: 3, RegOpcode: RegOpcodeFld, RM: getX86RegNum(MO: ModRMReg)), CB);
599}
600
601void X86MCCodeEmitter::emitSIBByte(unsigned SS, unsigned Index, unsigned Base,
602 SmallVectorImpl<char> &CB) const {
603 // SIB byte is in the same format as the modRMByte.
604 emitByte(C: modRMByte(Mod: SS, RegOpcode: Index, RM: Base), CB);
605}
606
607void X86MCCodeEmitter::emitMemModRMByte(
608 const MCInst &MI, unsigned Op, unsigned RegOpcodeField, uint64_t TSFlags,
609 PrefixKind Kind, uint64_t StartByte, SmallVectorImpl<char> &CB,
610 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI,
611 bool ForceSIB) const {
612 const MCOperand &Disp = MI.getOperand(i: Op + X86::AddrDisp);
613 const MCOperand &Base = MI.getOperand(i: Op + X86::AddrBaseReg);
614 const MCOperand &Scale = MI.getOperand(i: Op + X86::AddrScaleAmt);
615 const MCOperand &IndexReg = MI.getOperand(i: Op + X86::AddrIndexReg);
616 unsigned BaseReg = Base.getReg();
617
618 // Handle %rip relative addressing.
619 if (BaseReg == X86::RIP ||
620 BaseReg == X86::EIP) { // [disp32+rIP] in X86-64 mode
621 assert(STI.hasFeature(X86::Is64Bit) &&
622 "Rip-relative addressing requires 64-bit mode");
623 assert(IndexReg.getReg() == 0 && !ForceSIB &&
624 "Invalid rip-relative address");
625 emitByte(C: modRMByte(Mod: 0, RegOpcode: RegOpcodeField, RM: 5), CB);
626
627 unsigned Opcode = MI.getOpcode();
628 unsigned FixupKind = [&]() {
629 // Enable relaxed relocation only for a MCSymbolRefExpr. We cannot use a
630 // relaxed relocation if an offset is present (e.g. x@GOTPCREL+4).
631 if (!(Disp.isExpr() && isa<MCSymbolRefExpr>(Val: Disp.getExpr())))
632 return X86::reloc_riprel_4byte;
633
634 // Certain loads for GOT references can be relocated against the symbol
635 // directly if the symbol ends up in the same linkage unit.
636 switch (Opcode) {
637 default:
638 return X86::reloc_riprel_4byte;
639 case X86::MOV64rm:
640 // movq loads is a subset of reloc_riprel_4byte_relax_rex. It is a
641 // special case because COFF and Mach-O don't support ELF's more
642 // flexible R_X86_64_REX_GOTPCRELX relaxation.
643 // TODO: Support new relocation for REX2.
644 assert(Kind == REX || Kind == REX2);
645 return X86::reloc_riprel_4byte_movq_load;
646 case X86::ADC32rm:
647 case X86::ADD32rm:
648 case X86::AND32rm:
649 case X86::CMP32rm:
650 case X86::MOV32rm:
651 case X86::OR32rm:
652 case X86::SBB32rm:
653 case X86::SUB32rm:
654 case X86::TEST32mr:
655 case X86::XOR32rm:
656 case X86::CALL64m:
657 case X86::JMP64m:
658 case X86::TAILJMPm64:
659 case X86::TEST64mr:
660 case X86::ADC64rm:
661 case X86::ADD64rm:
662 case X86::AND64rm:
663 case X86::CMP64rm:
664 case X86::OR64rm:
665 case X86::SBB64rm:
666 case X86::SUB64rm:
667 case X86::XOR64rm:
668 // We haven't support relocation for REX2 prefix, so temporarily use REX
669 // relocation.
670 // TODO: Support new relocation for REX2.
671 return (Kind == REX || Kind == REX2) ? X86::reloc_riprel_4byte_relax_rex
672 : X86::reloc_riprel_4byte_relax;
673 }
674 }();
675
676 // rip-relative addressing is actually relative to the *next* instruction.
677 // Since an immediate can follow the mod/rm byte for an instruction, this
678 // means that we need to bias the displacement field of the instruction with
679 // the size of the immediate field. If we have this case, add it into the
680 // expression to emit.
681 // Note: rip-relative addressing using immediate displacement values should
682 // not be adjusted, assuming it was the user's intent.
683 int ImmSize = !Disp.isImm() && X86II::hasImm(TSFlags)
684 ? X86II::getSizeOfImm(TSFlags)
685 : 0;
686
687 emitImmediate(DispOp: Disp, Loc: MI.getLoc(), Size: 4, FixupKind: MCFixupKind(FixupKind), StartByte, CB,
688 Fixups, ImmOffset: -ImmSize);
689 return;
690 }
691
692 unsigned BaseRegNo = BaseReg ? getX86RegNum(MO: Base) : -1U;
693
694 bool IsAdSize16 = STI.hasFeature(Feature: X86::Is32Bit) &&
695 (TSFlags & X86II::AdSizeMask) == X86II::AdSize16;
696
697 // 16-bit addressing forms of the ModR/M byte have a different encoding for
698 // the R/M field and are far more limited in which registers can be used.
699 if (IsAdSize16 || X86_MC::is16BitMemOperand(MI, Op, STI)) {
700 if (BaseReg) {
701 // For 32-bit addressing, the row and column values in Table 2-2 are
702 // basically the same. It's AX/CX/DX/BX/SP/BP/SI/DI in that order, with
703 // some special cases. And getX86RegNum reflects that numbering.
704 // For 16-bit addressing it's more fun, as shown in the SDM Vol 2A,
705 // Table 2-1 "16-Bit Addressing Forms with the ModR/M byte". We can only
706 // use SI/DI/BP/BX, which have "row" values 4-7 in no particular order,
707 // while values 0-3 indicate the allowed combinations (base+index) of
708 // those: 0 for BX+SI, 1 for BX+DI, 2 for BP+SI, 3 for BP+DI.
709 //
710 // R16Table[] is a lookup from the normal RegNo, to the row values from
711 // Table 2-1 for 16-bit addressing modes. Where zero means disallowed.
712 static const unsigned R16Table[] = {0, 0, 0, 7, 0, 6, 4, 5};
713 unsigned RMfield = R16Table[BaseRegNo];
714
715 assert(RMfield && "invalid 16-bit base register");
716
717 if (IndexReg.getReg()) {
718 unsigned IndexReg16 = R16Table[getX86RegNum(MO: IndexReg)];
719
720 assert(IndexReg16 && "invalid 16-bit index register");
721 // We must have one of SI/DI (4,5), and one of BP/BX (6,7).
722 assert(((IndexReg16 ^ RMfield) & 2) &&
723 "invalid 16-bit base/index register combination");
724 assert(Scale.getImm() == 1 &&
725 "invalid scale for 16-bit memory reference");
726
727 // Allow base/index to appear in either order (although GAS doesn't).
728 if (IndexReg16 & 2)
729 RMfield = (RMfield & 1) | ((7 - IndexReg16) << 1);
730 else
731 RMfield = (IndexReg16 & 1) | ((7 - RMfield) << 1);
732 }
733
734 if (Disp.isImm() && isInt<8>(x: Disp.getImm())) {
735 if (Disp.getImm() == 0 && RMfield != 6) {
736 // There is no displacement; just the register.
737 emitByte(C: modRMByte(Mod: 0, RegOpcode: RegOpcodeField, RM: RMfield), CB);
738 return;
739 }
740 // Use the [REG]+disp8 form, including for [BP] which cannot be encoded.
741 emitByte(C: modRMByte(Mod: 1, RegOpcode: RegOpcodeField, RM: RMfield), CB);
742 emitImmediate(DispOp: Disp, Loc: MI.getLoc(), Size: 1, FixupKind: FK_Data_1, StartByte, CB, Fixups);
743 return;
744 }
745 // This is the [REG]+disp16 case.
746 emitByte(C: modRMByte(Mod: 2, RegOpcode: RegOpcodeField, RM: RMfield), CB);
747 } else {
748 assert(IndexReg.getReg() == 0 && "Unexpected index register!");
749 // There is no BaseReg; this is the plain [disp16] case.
750 emitByte(C: modRMByte(Mod: 0, RegOpcode: RegOpcodeField, RM: 6), CB);
751 }
752
753 // Emit 16-bit displacement for plain disp16 or [REG]+disp16 cases.
754 emitImmediate(DispOp: Disp, Loc: MI.getLoc(), Size: 2, FixupKind: FK_Data_2, StartByte, CB, Fixups);
755 return;
756 }
757
758 // Check for presence of {disp8} or {disp32} pseudo prefixes.
759 bool UseDisp8 = MI.getFlags() & X86::IP_USE_DISP8;
760 bool UseDisp32 = MI.getFlags() & X86::IP_USE_DISP32;
761
762 // We only allow no displacement if no pseudo prefix is present.
763 bool AllowNoDisp = !UseDisp8 && !UseDisp32;
764 // Disp8 is allowed unless the {disp32} prefix is present.
765 bool AllowDisp8 = !UseDisp32;
766
767 // Determine whether a SIB byte is needed.
768 if (!ForceSIB && !X86II::needSIB(BaseReg, IndexReg: IndexReg.getReg(),
769 In64BitMode: STI.hasFeature(Feature: X86::Is64Bit))) {
770 if (BaseReg == 0) { // [disp32] in X86-32 mode
771 emitByte(C: modRMByte(Mod: 0, RegOpcode: RegOpcodeField, RM: 5), CB);
772 emitImmediate(DispOp: Disp, Loc: MI.getLoc(), Size: 4, FixupKind: FK_Data_4, StartByte, CB, Fixups);
773 return;
774 }
775
776 // If the base is not EBP/ESP/R12/R13/R20/R21/R28/R29 and there is no
777 // displacement, use simple indirect register encoding, this handles
778 // addresses like [EAX]. The encoding for [EBP], [R13], [R20], [R21], [R28]
779 // or [R29] with no displacement means [disp32] so we handle it by emitting
780 // a displacement of 0 later.
781 if (BaseRegNo != N86::EBP) {
782 if (Disp.isImm() && Disp.getImm() == 0 && AllowNoDisp) {
783 emitByte(C: modRMByte(Mod: 0, RegOpcode: RegOpcodeField, RM: BaseRegNo), CB);
784 return;
785 }
786
787 // If the displacement is @tlscall, treat it as a zero.
788 if (Disp.isExpr()) {
789 auto *Sym = dyn_cast<MCSymbolRefExpr>(Val: Disp.getExpr());
790 if (Sym && Sym->getKind() == MCSymbolRefExpr::VK_TLSCALL) {
791 // This is exclusively used by call *a@tlscall(base). The relocation
792 // (R_386_TLSCALL or R_X86_64_TLSCALL) applies to the beginning.
793 Fixups.push_back(Elt: MCFixup::create(Offset: 0, Value: Sym, Kind: FK_NONE, Loc: MI.getLoc()));
794 emitByte(C: modRMByte(Mod: 0, RegOpcode: RegOpcodeField, RM: BaseRegNo), CB);
795 return;
796 }
797 }
798 }
799
800 // Otherwise, if the displacement fits in a byte, encode as [REG+disp8].
801 // Including a compressed disp8 for EVEX instructions that support it.
802 // This also handles the 0 displacement for [EBP], [R13], [R21] or [R29]. We
803 // can't use disp8 if the {disp32} pseudo prefix is present.
804 if (Disp.isImm() && AllowDisp8) {
805 int ImmOffset = 0;
806 if (isDispOrCDisp8(TSFlags, Value: Disp.getImm(), ImmOffset)) {
807 emitByte(C: modRMByte(Mod: 1, RegOpcode: RegOpcodeField, RM: BaseRegNo), CB);
808 emitImmediate(DispOp: Disp, Loc: MI.getLoc(), Size: 1, FixupKind: FK_Data_1, StartByte, CB, Fixups,
809 ImmOffset);
810 return;
811 }
812 }
813
814 // Otherwise, emit the most general non-SIB encoding: [REG+disp32].
815 // Displacement may be 0 for [EBP], [R13], [R21], [R29] case if {disp32}
816 // pseudo prefix prevented using disp8 above.
817 emitByte(C: modRMByte(Mod: 2, RegOpcode: RegOpcodeField, RM: BaseRegNo), CB);
818 unsigned Opcode = MI.getOpcode();
819 unsigned FixupKind = Opcode == X86::MOV32rm ? X86::reloc_signed_4byte_relax
820 : X86::reloc_signed_4byte;
821 emitImmediate(DispOp: Disp, Loc: MI.getLoc(), Size: 4, FixupKind: MCFixupKind(FixupKind), StartByte, CB,
822 Fixups);
823 return;
824 }
825
826 // We need a SIB byte, so start by outputting the ModR/M byte first
827 assert(IndexReg.getReg() != X86::ESP && IndexReg.getReg() != X86::RSP &&
828 "Cannot use ESP as index reg!");
829
830 bool ForceDisp32 = false;
831 bool ForceDisp8 = false;
832 int ImmOffset = 0;
833 if (BaseReg == 0) {
834 // If there is no base register, we emit the special case SIB byte with
835 // MOD=0, BASE=5, to JUST get the index, scale, and displacement.
836 BaseRegNo = 5;
837 emitByte(C: modRMByte(Mod: 0, RegOpcode: RegOpcodeField, RM: 4), CB);
838 ForceDisp32 = true;
839 } else if (Disp.isImm() && Disp.getImm() == 0 && AllowNoDisp &&
840 // Base reg can't be EBP/RBP/R13/R21/R29 as that would end up with
841 // '5' as the base field, but that is the magic [*] nomenclature
842 // that indicates no base when mod=0. For these cases we'll emit a
843 // 0 displacement instead.
844 BaseRegNo != N86::EBP) {
845 // Emit no displacement ModR/M byte
846 emitByte(C: modRMByte(Mod: 0, RegOpcode: RegOpcodeField, RM: 4), CB);
847 } else if (Disp.isImm() && AllowDisp8 &&
848 isDispOrCDisp8(TSFlags, Value: Disp.getImm(), ImmOffset)) {
849 // Displacement fits in a byte or matches an EVEX compressed disp8, use
850 // disp8 encoding. This also handles EBP/R13/R21/R29 base with 0
851 // displacement unless {disp32} pseudo prefix was used.
852 emitByte(C: modRMByte(Mod: 1, RegOpcode: RegOpcodeField, RM: 4), CB);
853 ForceDisp8 = true;
854 } else {
855 // Otherwise, emit the normal disp32 encoding.
856 emitByte(C: modRMByte(Mod: 2, RegOpcode: RegOpcodeField, RM: 4), CB);
857 ForceDisp32 = true;
858 }
859
860 // Calculate what the SS field value should be...
861 static const unsigned SSTable[] = {~0U, 0, 1, ~0U, 2, ~0U, ~0U, ~0U, 3};
862 unsigned SS = SSTable[Scale.getImm()];
863
864 unsigned IndexRegNo = IndexReg.getReg() ? getX86RegNum(MO: IndexReg) : 4;
865
866 emitSIBByte(SS, Index: IndexRegNo, Base: BaseRegNo, CB);
867
868 // Do we need to output a displacement?
869 if (ForceDisp8)
870 emitImmediate(DispOp: Disp, Loc: MI.getLoc(), Size: 1, FixupKind: FK_Data_1, StartByte, CB, Fixups,
871 ImmOffset);
872 else if (ForceDisp32)
873 emitImmediate(DispOp: Disp, Loc: MI.getLoc(), Size: 4, FixupKind: MCFixupKind(X86::reloc_signed_4byte),
874 StartByte, CB, Fixups);
875}
876
877/// Emit all instruction prefixes.
878///
879/// \returns one of the REX, XOP, VEX2, VEX3, EVEX if any of them is used,
880/// otherwise returns None.
881PrefixKind X86MCCodeEmitter::emitPrefixImpl(unsigned &CurOp, const MCInst &MI,
882 const MCSubtargetInfo &STI,
883 SmallVectorImpl<char> &CB) const {
884 uint64_t TSFlags = MCII.get(Opcode: MI.getOpcode()).TSFlags;
885 // Determine where the memory operand starts, if present.
886 int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
887 // Emit segment override opcode prefix as needed.
888 if (MemoryOperand != -1) {
889 MemoryOperand += CurOp;
890 emitSegmentOverridePrefix(SegOperand: MemoryOperand + X86::AddrSegmentReg, MI, CB);
891 }
892
893 // Emit the repeat opcode prefix as needed.
894 unsigned Flags = MI.getFlags();
895 if (TSFlags & X86II::REP || Flags & X86::IP_HAS_REPEAT)
896 emitByte(C: 0xF3, CB);
897 if (Flags & X86::IP_HAS_REPEAT_NE)
898 emitByte(C: 0xF2, CB);
899
900 // Emit the address size opcode prefix as needed.
901 if (X86_MC::needsAddressSizeOverride(MI, STI, MemoryOperand, TSFlags) ||
902 Flags & X86::IP_HAS_AD_SIZE)
903 emitByte(C: 0x67, CB);
904
905 uint64_t Form = TSFlags & X86II::FormMask;
906 switch (Form) {
907 default:
908 break;
909 case X86II::RawFrmDstSrc: {
910 // Emit segment override opcode prefix as needed (not for %ds).
911 if (MI.getOperand(i: 2).getReg() != X86::DS)
912 emitSegmentOverridePrefix(SegOperand: 2, MI, CB);
913 CurOp += 3; // Consume operands.
914 break;
915 }
916 case X86II::RawFrmSrc: {
917 // Emit segment override opcode prefix as needed (not for %ds).
918 if (MI.getOperand(i: 1).getReg() != X86::DS)
919 emitSegmentOverridePrefix(SegOperand: 1, MI, CB);
920 CurOp += 2; // Consume operands.
921 break;
922 }
923 case X86II::RawFrmDst: {
924 ++CurOp; // Consume operand.
925 break;
926 }
927 case X86II::RawFrmMemOffs: {
928 // Emit segment override opcode prefix as needed.
929 emitSegmentOverridePrefix(SegOperand: 1, MI, CB);
930 break;
931 }
932 }
933
934 // REX prefix is optional, but if used must be immediately before the opcode
935 // Encoding type for this instruction.
936 return (TSFlags & X86II::EncodingMask)
937 ? emitVEXOpcodePrefix(MemOperand: MemoryOperand, MI, STI, CB)
938 : emitOpcodePrefix(MemOperand: MemoryOperand, MI, STI, CB);
939}
940
941// AVX instructions are encoded using an encoding scheme that combines
942// prefix bytes, opcode extension field, operand encoding fields, and vector
943// length encoding capability into a new prefix, referred to as VEX.
944
945// The majority of the AVX-512 family of instructions (operating on
946// 512/256/128-bit vector register operands) are encoded using a new prefix
947// (called EVEX).
948
949// XOP is a revised subset of what was originally intended as SSE5. It was
950// changed to be similar but not overlapping with AVX.
951
952/// Emit XOP, VEX2, VEX3 or EVEX prefix.
953/// \returns the used prefix.
954PrefixKind
955X86MCCodeEmitter::emitVEXOpcodePrefix(int MemOperand, const MCInst &MI,
956 const MCSubtargetInfo &STI,
957 SmallVectorImpl<char> &CB) const {
958 const MCInstrDesc &Desc = MCII.get(Opcode: MI.getOpcode());
959 uint64_t TSFlags = Desc.TSFlags;
960
961 assert(!(TSFlags & X86II::LOCK) && "Can't have LOCK VEX.");
962
963#ifndef NDEBUG
964 unsigned NumOps = MI.getNumOperands();
965 for (unsigned I = NumOps ? X86II::getOperandBias(Desc) : 0; I != NumOps;
966 ++I) {
967 const MCOperand &MO = MI.getOperand(I);
968 if (!MO.isReg())
969 continue;
970 unsigned Reg = MO.getReg();
971 if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || Reg == X86::DH)
972 report_fatal_error(
973 "Cannot encode high byte register in VEX/EVEX-prefixed instruction");
974 }
975#endif
976
977 X86OpcodePrefixHelper Prefix(*Ctx.getRegisterInfo());
978 switch (TSFlags & X86II::EncodingMask) {
979 default:
980 break;
981 case X86II::XOP:
982 Prefix.setLowerBound(XOP);
983 break;
984 case X86II::VEX:
985 // VEX can be 2 byte or 3 byte, not determined yet if not explicit
986 Prefix.setLowerBound((MI.getFlags() & X86::IP_USE_VEX3) ? VEX3 : VEX2);
987 break;
988 case X86II::EVEX:
989 Prefix.setLowerBound(EVEX);
990 break;
991 }
992
993 Prefix.setW(TSFlags & X86II::REX_W);
994 Prefix.setNF(TSFlags & X86II::EVEX_NF);
995
996 bool HasEVEX_K = TSFlags & X86II::EVEX_K;
997 bool HasVEX_4V = TSFlags & X86II::VEX_4V;
998 bool IsND = X86II::hasNewDataDest(TSFlags); // IsND implies HasVEX_4V
999 bool HasEVEX_RC = TSFlags & X86II::EVEX_RC;
1000
1001 switch (TSFlags & X86II::OpMapMask) {
1002 default:
1003 llvm_unreachable("Invalid prefix!");
1004 case X86II::TB:
1005 Prefix.set5M(0x1); // 0F
1006 break;
1007 case X86II::T8:
1008 Prefix.set5M(0x2); // 0F 38
1009 break;
1010 case X86II::TA:
1011 Prefix.set5M(0x3); // 0F 3A
1012 break;
1013 case X86II::XOP8:
1014 Prefix.set5M(0x8);
1015 break;
1016 case X86II::XOP9:
1017 Prefix.set5M(0x9);
1018 break;
1019 case X86II::XOPA:
1020 Prefix.set5M(0xA);
1021 break;
1022 case X86II::T_MAP4:
1023 Prefix.set5M(0x4);
1024 break;
1025 case X86II::T_MAP5:
1026 Prefix.set5M(0x5);
1027 break;
1028 case X86II::T_MAP6:
1029 Prefix.set5M(0x6);
1030 break;
1031 case X86II::T_MAP7:
1032 Prefix.set5M(0x7);
1033 break;
1034 }
1035
1036 Prefix.setL(TSFlags & X86II::VEX_L);
1037 Prefix.setL2(TSFlags & X86II::EVEX_L2);
1038 if ((TSFlags & X86II::EVEX_L2) && STI.hasFeature(Feature: X86::FeatureAVX512) &&
1039 !STI.hasFeature(Feature: X86::FeatureEVEX512))
1040 report_fatal_error(reason: "ZMM registers are not supported without EVEX512");
1041 switch (TSFlags & X86II::OpPrefixMask) {
1042 case X86II::PD:
1043 Prefix.setPP(0x1); // 66
1044 break;
1045 case X86II::XS:
1046 Prefix.setPP(0x2); // F3
1047 break;
1048 case X86II::XD:
1049 Prefix.setPP(0x3); // F2
1050 break;
1051 }
1052
1053 Prefix.setZ(HasEVEX_K && (TSFlags & X86II::EVEX_Z));
1054 Prefix.setEVEX_b(TSFlags & X86II::EVEX_B);
1055
1056 bool EncodeRC = false;
1057 uint8_t EVEX_rc = 0;
1058
1059 unsigned CurOp = X86II::getOperandBias(Desc);
1060 bool HasTwoConditionalOps = TSFlags & X86II::TwoConditionalOps;
1061
1062 switch (TSFlags & X86II::FormMask) {
1063 default:
1064 llvm_unreachable("Unexpected form in emitVEXOpcodePrefix!");
1065 case X86II::MRMDestMem4VOp3CC: {
1066 // src1(ModR/M), MemAddr, src2(VEX_4V)
1067 Prefix.setRR2(MI, OpNum: CurOp++);
1068 Prefix.setBB2(MI, OpNum: MemOperand + X86::AddrBaseReg);
1069 Prefix.setXX2(MI, OpNum: MemOperand + X86::AddrIndexReg);
1070 CurOp += X86::AddrNumOperands;
1071 Prefix.set4VV2(MI, OpNum: CurOp++);
1072 break;
1073 }
1074 case X86II::MRM_C0:
1075 case X86II::RawFrm:
1076 break;
1077 case X86II::MRMDestMemCC:
1078 case X86II::MRMDestMemFSIB:
1079 case X86II::MRMDestMem: {
1080 // MRMDestMem instructions forms:
1081 // MemAddr, src1(ModR/M)
1082 // MemAddr, src1(VEX_4V), src2(ModR/M)
1083 // MemAddr, src1(ModR/M), imm8
1084 //
1085 // NDD:
1086 // dst(VEX_4V), MemAddr, src1(ModR/M)
1087 Prefix.setBB2(MI, OpNum: MemOperand + X86::AddrBaseReg);
1088 Prefix.setXX2(MI, OpNum: MemOperand + X86::AddrIndexReg);
1089 Prefix.setV2(MI, OpNum: MemOperand + X86::AddrIndexReg, HasVEX_4V);
1090
1091 if (IsND)
1092 Prefix.set4VV2(MI, OpNum: CurOp++);
1093
1094 CurOp += X86::AddrNumOperands;
1095
1096 if (HasEVEX_K)
1097 Prefix.setAAA(MI, OpNum: CurOp++);
1098
1099 if (!IsND && HasVEX_4V)
1100 Prefix.set4VV2(MI, OpNum: CurOp++);
1101
1102 Prefix.setRR2(MI, OpNum: CurOp++);
1103 if (HasTwoConditionalOps) {
1104 Prefix.set4V(MI, OpNum: CurOp++, /*IsImm=*/true);
1105 Prefix.setSC(MI, OpNum: CurOp++);
1106 }
1107 break;
1108 }
1109 case X86II::MRMSrcMemCC:
1110 case X86II::MRMSrcMemFSIB:
1111 case X86II::MRMSrcMem: {
1112 // MRMSrcMem instructions forms:
1113 // src1(ModR/M), MemAddr
1114 // src1(ModR/M), src2(VEX_4V), MemAddr
1115 // src1(ModR/M), MemAddr, imm8
1116 // src1(ModR/M), MemAddr, src2(Imm[7:4])
1117 //
1118 // FMA4:
1119 // dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4])
1120 //
1121 // NDD:
1122 // dst(VEX_4V), src1(ModR/M), MemAddr
1123 if (IsND)
1124 Prefix.set4VV2(MI, OpNum: CurOp++);
1125
1126 Prefix.setRR2(MI, OpNum: CurOp++);
1127
1128 if (HasEVEX_K)
1129 Prefix.setAAA(MI, OpNum: CurOp++);
1130
1131 if (!IsND && HasVEX_4V)
1132 Prefix.set4VV2(MI, OpNum: CurOp++);
1133
1134 Prefix.setBB2(MI, OpNum: MemOperand + X86::AddrBaseReg);
1135 Prefix.setXX2(MI, OpNum: MemOperand + X86::AddrIndexReg);
1136 Prefix.setV2(MI, OpNum: MemOperand + X86::AddrIndexReg, HasVEX_4V);
1137 CurOp += X86::AddrNumOperands;
1138 if (HasTwoConditionalOps) {
1139 Prefix.set4V(MI, OpNum: CurOp++, /*IsImm=*/true);
1140 Prefix.setSC(MI, OpNum: CurOp++);
1141 }
1142 break;
1143 }
1144 case X86II::MRMSrcMem4VOp3: {
1145 // Instruction format for 4VOp3:
1146 // src1(ModR/M), MemAddr, src3(VEX_4V)
1147 Prefix.setRR2(MI, OpNum: CurOp++);
1148 Prefix.setBB2(MI, OpNum: MemOperand + X86::AddrBaseReg);
1149 Prefix.setXX2(MI, OpNum: MemOperand + X86::AddrIndexReg);
1150 Prefix.set4VV2(MI, OpNum: CurOp + X86::AddrNumOperands);
1151 break;
1152 }
1153 case X86II::MRMSrcMemOp4: {
1154 // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
1155 Prefix.setR(MI, OpNum: CurOp++);
1156 Prefix.set4V(MI, OpNum: CurOp++);
1157 Prefix.setBB2(MI, OpNum: MemOperand + X86::AddrBaseReg);
1158 Prefix.setXX2(MI, OpNum: MemOperand + X86::AddrIndexReg);
1159 break;
1160 }
1161 case X86II::MRMXmCC:
1162 case X86II::MRM0m:
1163 case X86II::MRM1m:
1164 case X86II::MRM2m:
1165 case X86II::MRM3m:
1166 case X86II::MRM4m:
1167 case X86II::MRM5m:
1168 case X86II::MRM6m:
1169 case X86II::MRM7m: {
1170 // MRM[0-9]m instructions forms:
1171 // MemAddr
1172 // src1(VEX_4V), MemAddr
1173 if (HasVEX_4V)
1174 Prefix.set4VV2(MI, OpNum: CurOp++);
1175
1176 if (HasEVEX_K)
1177 Prefix.setAAA(MI, OpNum: CurOp++);
1178
1179 Prefix.setBB2(MI, OpNum: MemOperand + X86::AddrBaseReg);
1180 Prefix.setXX2(MI, OpNum: MemOperand + X86::AddrIndexReg);
1181 Prefix.setV2(MI, OpNum: MemOperand + X86::AddrIndexReg, HasVEX_4V);
1182 CurOp += X86::AddrNumOperands + 1; // Skip first imm.
1183 if (HasTwoConditionalOps) {
1184 Prefix.set4V(MI, OpNum: CurOp++, /*IsImm=*/true);
1185 Prefix.setSC(MI, OpNum: CurOp++);
1186 }
1187 break;
1188 }
1189 case X86II::MRMSrcRegCC:
1190 case X86II::MRMSrcReg: {
1191 // MRMSrcReg instructions forms:
1192 // dst(ModR/M), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4])
1193 // dst(ModR/M), src1(ModR/M)
1194 // dst(ModR/M), src1(ModR/M), imm8
1195 //
1196 // FMA4:
1197 // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
1198 //
1199 // NDD:
1200 // dst(VEX_4V), src1(ModR/M.reg), src2(ModR/M)
1201 if (IsND)
1202 Prefix.set4VV2(MI, OpNum: CurOp++);
1203 Prefix.setRR2(MI, OpNum: CurOp++);
1204
1205 if (HasEVEX_K)
1206 Prefix.setAAA(MI, OpNum: CurOp++);
1207
1208 if (!IsND && HasVEX_4V)
1209 Prefix.set4VV2(MI, OpNum: CurOp++);
1210
1211 Prefix.setBB2(MI, OpNum: CurOp);
1212 Prefix.setX(MI, OpNum: CurOp, Shift: 4);
1213 ++CurOp;
1214
1215 if (HasTwoConditionalOps) {
1216 Prefix.set4V(MI, OpNum: CurOp++, /*IsImm=*/true);
1217 Prefix.setSC(MI, OpNum: CurOp++);
1218 }
1219
1220 if (TSFlags & X86II::EVEX_B) {
1221 if (HasEVEX_RC) {
1222 unsigned NumOps = Desc.getNumOperands();
1223 unsigned RcOperand = NumOps - 1;
1224 assert(RcOperand >= CurOp);
1225 EVEX_rc = MI.getOperand(i: RcOperand).getImm();
1226 assert(EVEX_rc <= 3 && "Invalid rounding control!");
1227 }
1228 EncodeRC = true;
1229 }
1230 break;
1231 }
1232 case X86II::MRMSrcReg4VOp3: {
1233 // Instruction format for 4VOp3:
1234 // src1(ModR/M), src2(ModR/M), src3(VEX_4V)
1235 Prefix.setRR2(MI, OpNum: CurOp++);
1236 Prefix.setBB2(MI, OpNum: CurOp++);
1237 Prefix.set4VV2(MI, OpNum: CurOp++);
1238 break;
1239 }
1240 case X86II::MRMSrcRegOp4: {
1241 // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
1242 Prefix.setR(MI, OpNum: CurOp++);
1243 Prefix.set4V(MI, OpNum: CurOp++);
1244 // Skip second register source (encoded in Imm[7:4])
1245 ++CurOp;
1246
1247 Prefix.setB(MI, OpNum: CurOp);
1248 Prefix.setX(MI, OpNum: CurOp, Shift: 4);
1249 ++CurOp;
1250 break;
1251 }
1252 case X86II::MRMDestRegCC:
1253 case X86II::MRMDestReg: {
1254 // MRMDestReg instructions forms:
1255 // dst(ModR/M), src(ModR/M)
1256 // dst(ModR/M), src(ModR/M), imm8
1257 // dst(ModR/M), src1(VEX_4V), src2(ModR/M)
1258 //
1259 // NDD:
1260 // dst(VEX_4V), src1(ModR/M), src2(ModR/M)
1261 if (IsND)
1262 Prefix.set4VV2(MI, OpNum: CurOp++);
1263 Prefix.setBB2(MI, OpNum: CurOp);
1264 Prefix.setX(MI, OpNum: CurOp, Shift: 4);
1265 ++CurOp;
1266
1267 if (HasEVEX_K)
1268 Prefix.setAAA(MI, OpNum: CurOp++);
1269
1270 if (!IsND && HasVEX_4V)
1271 Prefix.set4VV2(MI, OpNum: CurOp++);
1272
1273 Prefix.setRR2(MI, OpNum: CurOp++);
1274 if (HasTwoConditionalOps) {
1275 Prefix.set4V(MI, OpNum: CurOp++, /*IsImm=*/true);
1276 Prefix.setSC(MI, OpNum: CurOp++);
1277 }
1278 if (TSFlags & X86II::EVEX_B)
1279 EncodeRC = true;
1280 break;
1281 }
1282 case X86II::MRMr0: {
1283 // MRMr0 instructions forms:
1284 // 11:rrr:000
1285 // dst(ModR/M)
1286 Prefix.setRR2(MI, OpNum: CurOp++);
1287 break;
1288 }
1289 case X86II::MRMXrCC:
1290 case X86II::MRM0r:
1291 case X86II::MRM1r:
1292 case X86II::MRM2r:
1293 case X86II::MRM3r:
1294 case X86II::MRM4r:
1295 case X86II::MRM5r:
1296 case X86II::MRM6r:
1297 case X86II::MRM7r: {
1298 // MRM0r-MRM7r instructions forms:
1299 // dst(VEX_4V), src(ModR/M), imm8
1300 if (HasVEX_4V)
1301 Prefix.set4VV2(MI, OpNum: CurOp++);
1302
1303 if (HasEVEX_K)
1304 Prefix.setAAA(MI, OpNum: CurOp++);
1305
1306 Prefix.setBB2(MI, OpNum: CurOp);
1307 Prefix.setX(MI, OpNum: CurOp, Shift: 4);
1308 ++CurOp;
1309 if (HasTwoConditionalOps) {
1310 Prefix.set4V(MI, OpNum: ++CurOp, /*IsImm=*/true);
1311 Prefix.setSC(MI, OpNum: ++CurOp);
1312 }
1313 break;
1314 }
1315 }
1316 if (EncodeRC) {
1317 Prefix.setL(EVEX_rc & 0x1);
1318 Prefix.setL2(EVEX_rc & 0x2);
1319 }
1320 PrefixKind Kind = Prefix.determineOptimalKind();
1321 Prefix.emit(CB);
1322 return Kind;
1323}
1324
1325/// Emit REX prefix which specifies
1326/// 1) 64-bit instructions,
1327/// 2) non-default operand size, and
1328/// 3) use of X86-64 extended registers.
1329///
1330/// \returns the used prefix (REX or None).
1331PrefixKind X86MCCodeEmitter::emitREXPrefix(int MemOperand, const MCInst &MI,
1332 const MCSubtargetInfo &STI,
1333 SmallVectorImpl<char> &CB) const {
1334 if (!STI.hasFeature(Feature: X86::Is64Bit))
1335 return None;
1336 X86OpcodePrefixHelper Prefix(*Ctx.getRegisterInfo());
1337 const MCInstrDesc &Desc = MCII.get(Opcode: MI.getOpcode());
1338 uint64_t TSFlags = Desc.TSFlags;
1339 Prefix.setW(TSFlags & X86II::REX_W);
1340 unsigned NumOps = MI.getNumOperands();
1341 bool UsesHighByteReg = false;
1342#ifndef NDEBUG
1343 bool HasRegOp = false;
1344#endif
1345 unsigned CurOp = NumOps ? X86II::getOperandBias(Desc) : 0;
1346 for (unsigned i = CurOp; i != NumOps; ++i) {
1347 const MCOperand &MO = MI.getOperand(i);
1348 if (MO.isReg()) {
1349#ifndef NDEBUG
1350 HasRegOp = true;
1351#endif
1352 unsigned Reg = MO.getReg();
1353 if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || Reg == X86::DH)
1354 UsesHighByteReg = true;
1355 // If it accesses SPL, BPL, SIL, or DIL, then it requires a REX prefix.
1356 if (X86II::isX86_64NonExtLowByteReg(reg: Reg))
1357 Prefix.setLowerBound(REX);
1358 } else if (MO.isExpr() && STI.getTargetTriple().isX32()) {
1359 // GOTTPOFF and TLSDESC relocations require a REX prefix to allow
1360 // linker optimizations: even if the instructions we see may not require
1361 // any prefix, they may be replaced by instructions that do. This is
1362 // handled as a special case here so that it also works for hand-written
1363 // assembly without the user needing to write REX, as with GNU as.
1364 const auto *Ref = dyn_cast<MCSymbolRefExpr>(Val: MO.getExpr());
1365 if (Ref && (Ref->getKind() == MCSymbolRefExpr::VK_GOTTPOFF ||
1366 Ref->getKind() == MCSymbolRefExpr::VK_TLSDESC)) {
1367 Prefix.setLowerBound(REX);
1368 }
1369 }
1370 }
1371 if (MI.getFlags() & X86::IP_USE_REX)
1372 Prefix.setLowerBound(REX);
1373 if ((TSFlags & X86II::ExplicitOpPrefixMask) == X86II::ExplicitREX2Prefix ||
1374 MI.getFlags() & X86::IP_USE_REX2)
1375 Prefix.setLowerBound(REX2);
1376 switch (TSFlags & X86II::FormMask) {
1377 default:
1378 assert(!HasRegOp && "Unexpected form in emitREXPrefix!");
1379 break;
1380 case X86II::RawFrm:
1381 case X86II::RawFrmMemOffs:
1382 case X86II::RawFrmSrc:
1383 case X86II::RawFrmDst:
1384 case X86II::RawFrmDstSrc:
1385 break;
1386 case X86II::AddRegFrm:
1387 Prefix.setBB2(MI, OpNum: CurOp++);
1388 break;
1389 case X86II::MRMSrcReg:
1390 case X86II::MRMSrcRegCC:
1391 Prefix.setRR2(MI, OpNum: CurOp++);
1392 Prefix.setBB2(MI, OpNum: CurOp++);
1393 break;
1394 case X86II::MRMSrcMem:
1395 case X86II::MRMSrcMemCC:
1396 Prefix.setRR2(MI, OpNum: CurOp++);
1397 Prefix.setBB2(MI, OpNum: MemOperand + X86::AddrBaseReg);
1398 Prefix.setXX2(MI, OpNum: MemOperand + X86::AddrIndexReg);
1399 CurOp += X86::AddrNumOperands;
1400 break;
1401 case X86II::MRMDestReg:
1402 Prefix.setBB2(MI, OpNum: CurOp++);
1403 Prefix.setRR2(MI, OpNum: CurOp++);
1404 break;
1405 case X86II::MRMDestMem:
1406 Prefix.setBB2(MI, OpNum: MemOperand + X86::AddrBaseReg);
1407 Prefix.setXX2(MI, OpNum: MemOperand + X86::AddrIndexReg);
1408 CurOp += X86::AddrNumOperands;
1409 Prefix.setRR2(MI, OpNum: CurOp++);
1410 break;
1411 case X86II::MRMXmCC:
1412 case X86II::MRMXm:
1413 case X86II::MRM0m:
1414 case X86II::MRM1m:
1415 case X86II::MRM2m:
1416 case X86II::MRM3m:
1417 case X86II::MRM4m:
1418 case X86II::MRM5m:
1419 case X86II::MRM6m:
1420 case X86II::MRM7m:
1421 Prefix.setBB2(MI, OpNum: MemOperand + X86::AddrBaseReg);
1422 Prefix.setXX2(MI, OpNum: MemOperand + X86::AddrIndexReg);
1423 break;
1424 case X86II::MRMXrCC:
1425 case X86II::MRMXr:
1426 case X86II::MRM0r:
1427 case X86II::MRM1r:
1428 case X86II::MRM2r:
1429 case X86II::MRM3r:
1430 case X86II::MRM4r:
1431 case X86II::MRM5r:
1432 case X86II::MRM6r:
1433 case X86II::MRM7r:
1434 Prefix.setBB2(MI, OpNum: CurOp++);
1435 break;
1436 }
1437 Prefix.setM((TSFlags & X86II::OpMapMask) == X86II::TB);
1438 PrefixKind Kind = Prefix.determineOptimalKind();
1439 if (Kind && UsesHighByteReg)
1440 report_fatal_error(
1441 reason: "Cannot encode high byte register in REX-prefixed instruction");
1442 Prefix.emit(CB);
1443 return Kind;
1444}
1445
1446/// Emit segment override opcode prefix as needed.
1447void X86MCCodeEmitter::emitSegmentOverridePrefix(
1448 unsigned SegOperand, const MCInst &MI, SmallVectorImpl<char> &CB) const {
1449 // Check for explicit segment override on memory operand.
1450 if (unsigned Reg = MI.getOperand(i: SegOperand).getReg())
1451 emitByte(C: X86::getSegmentOverridePrefixForReg(Reg), CB);
1452}
1453
1454/// Emit all instruction prefixes prior to the opcode.
1455///
1456/// \param MemOperand the operand # of the start of a memory operand if present.
1457/// If not present, it is -1.
1458///
1459/// \returns the used prefix (REX or None).
1460PrefixKind X86MCCodeEmitter::emitOpcodePrefix(int MemOperand, const MCInst &MI,
1461 const MCSubtargetInfo &STI,
1462 SmallVectorImpl<char> &CB) const {
1463 const MCInstrDesc &Desc = MCII.get(Opcode: MI.getOpcode());
1464 uint64_t TSFlags = Desc.TSFlags;
1465
1466 // Emit the operand size opcode prefix as needed.
1467 if ((TSFlags & X86II::OpSizeMask) ==
1468 (STI.hasFeature(Feature: X86::Is16Bit) ? X86II::OpSize32 : X86II::OpSize16))
1469 emitByte(C: 0x66, CB);
1470
1471 // Emit the LOCK opcode prefix.
1472 if (TSFlags & X86II::LOCK || MI.getFlags() & X86::IP_HAS_LOCK)
1473 emitByte(C: 0xF0, CB);
1474
1475 // Emit the NOTRACK opcode prefix.
1476 if (TSFlags & X86II::NOTRACK || MI.getFlags() & X86::IP_HAS_NOTRACK)
1477 emitByte(C: 0x3E, CB);
1478
1479 switch (TSFlags & X86II::OpPrefixMask) {
1480 case X86II::PD: // 66
1481 emitByte(C: 0x66, CB);
1482 break;
1483 case X86II::XS: // F3
1484 emitByte(C: 0xF3, CB);
1485 break;
1486 case X86II::XD: // F2
1487 emitByte(C: 0xF2, CB);
1488 break;
1489 }
1490
1491 // Handle REX prefix.
1492 assert((STI.hasFeature(X86::Is64Bit) || !(TSFlags & X86II::REX_W)) &&
1493 "REX.W requires 64bit mode.");
1494 PrefixKind Kind = emitREXPrefix(MemOperand, MI, STI, CB);
1495
1496 // 0x0F escape code must be emitted just before the opcode.
1497 switch (TSFlags & X86II::OpMapMask) {
1498 case X86II::TB: // Two-byte opcode map
1499 // Encoded by M bit in REX2
1500 if (Kind == REX2)
1501 break;
1502 [[fallthrough]];
1503 case X86II::T8: // 0F 38
1504 case X86II::TA: // 0F 3A
1505 case X86II::ThreeDNow: // 0F 0F, second 0F emitted by caller.
1506 emitByte(C: 0x0F, CB);
1507 break;
1508 }
1509
1510 switch (TSFlags & X86II::OpMapMask) {
1511 case X86II::T8: // 0F 38
1512 emitByte(C: 0x38, CB);
1513 break;
1514 case X86II::TA: // 0F 3A
1515 emitByte(C: 0x3A, CB);
1516 break;
1517 }
1518
1519 return Kind;
1520}
1521
1522void X86MCCodeEmitter::emitPrefix(const MCInst &MI, SmallVectorImpl<char> &CB,
1523 const MCSubtargetInfo &STI) const {
1524 unsigned Opcode = MI.getOpcode();
1525 const MCInstrDesc &Desc = MCII.get(Opcode);
1526 uint64_t TSFlags = Desc.TSFlags;
1527
1528 // Pseudo instructions don't get encoded.
1529 if (X86II::isPseudo(TSFlags))
1530 return;
1531
1532 unsigned CurOp = X86II::getOperandBias(Desc);
1533
1534 emitPrefixImpl(CurOp, MI, STI, CB);
1535}
1536
1537void X86_MC::emitPrefix(MCCodeEmitter &MCE, const MCInst &MI,
1538 SmallVectorImpl<char> &CB, const MCSubtargetInfo &STI) {
1539 static_cast<X86MCCodeEmitter &>(MCE).emitPrefix(MI, CB, STI);
1540}
1541
1542void X86MCCodeEmitter::encodeInstruction(const MCInst &MI,
1543 SmallVectorImpl<char> &CB,
1544 SmallVectorImpl<MCFixup> &Fixups,
1545 const MCSubtargetInfo &STI) const {
1546 unsigned Opcode = MI.getOpcode();
1547 const MCInstrDesc &Desc = MCII.get(Opcode);
1548 uint64_t TSFlags = Desc.TSFlags;
1549
1550 // Pseudo instructions don't get encoded.
1551 if (X86II::isPseudo(TSFlags))
1552 return;
1553
1554 unsigned NumOps = Desc.getNumOperands();
1555 unsigned CurOp = X86II::getOperandBias(Desc);
1556
1557 uint64_t StartByte = CB.size();
1558
1559 PrefixKind Kind = emitPrefixImpl(CurOp, MI, STI, CB);
1560
1561 // It uses the VEX.VVVV field?
1562 bool HasVEX_4V = TSFlags & X86II::VEX_4V;
1563 bool HasVEX_I8Reg = (TSFlags & X86II::ImmMask) == X86II::Imm8Reg;
1564
1565 // It uses the EVEX.aaa field?
1566 bool HasEVEX_K = TSFlags & X86II::EVEX_K;
1567 bool HasEVEX_RC = TSFlags & X86II::EVEX_RC;
1568
1569 // Used if a register is encoded in 7:4 of immediate.
1570 unsigned I8RegNum = 0;
1571
1572 uint8_t BaseOpcode = X86II::getBaseOpcodeFor(TSFlags);
1573
1574 if ((TSFlags & X86II::OpMapMask) == X86II::ThreeDNow)
1575 BaseOpcode = 0x0F; // Weird 3DNow! encoding.
1576
1577 unsigned OpcodeOffset = 0;
1578
1579 bool IsND = X86II::hasNewDataDest(TSFlags);
1580 bool HasTwoConditionalOps = TSFlags & X86II::TwoConditionalOps;
1581
1582 uint64_t Form = TSFlags & X86II::FormMask;
1583 switch (Form) {
1584 default:
1585 errs() << "FORM: " << Form << "\n";
1586 llvm_unreachable("Unknown FormMask value in X86MCCodeEmitter!");
1587 case X86II::Pseudo:
1588 llvm_unreachable("Pseudo instruction shouldn't be emitted");
1589 case X86II::RawFrmDstSrc:
1590 case X86II::RawFrmSrc:
1591 case X86II::RawFrmDst:
1592 case X86II::PrefixByte:
1593 emitByte(C: BaseOpcode, CB);
1594 break;
1595 case X86II::AddCCFrm: {
1596 // This will be added to the opcode in the fallthrough.
1597 OpcodeOffset = MI.getOperand(i: NumOps - 1).getImm();
1598 assert(OpcodeOffset < 16 && "Unexpected opcode offset!");
1599 --NumOps; // Drop the operand from the end.
1600 [[fallthrough]];
1601 case X86II::RawFrm:
1602 emitByte(C: BaseOpcode + OpcodeOffset, CB);
1603
1604 if (!STI.hasFeature(Feature: X86::Is64Bit) || !isPCRel32Branch(MI, MCII))
1605 break;
1606
1607 const MCOperand &Op = MI.getOperand(i: CurOp++);
1608 emitImmediate(DispOp: Op, Loc: MI.getLoc(), Size: X86II::getSizeOfImm(TSFlags),
1609 FixupKind: MCFixupKind(X86::reloc_branch_4byte_pcrel), StartByte, CB,
1610 Fixups);
1611 break;
1612 }
1613 case X86II::RawFrmMemOffs:
1614 emitByte(C: BaseOpcode, CB);
1615 emitImmediate(DispOp: MI.getOperand(i: CurOp++), Loc: MI.getLoc(),
1616 Size: X86II::getSizeOfImm(TSFlags), FixupKind: getImmFixupKind(TSFlags),
1617 StartByte, CB, Fixups);
1618 ++CurOp; // skip segment operand
1619 break;
1620 case X86II::RawFrmImm8:
1621 emitByte(C: BaseOpcode, CB);
1622 emitImmediate(DispOp: MI.getOperand(i: CurOp++), Loc: MI.getLoc(),
1623 Size: X86II::getSizeOfImm(TSFlags), FixupKind: getImmFixupKind(TSFlags),
1624 StartByte, CB, Fixups);
1625 emitImmediate(DispOp: MI.getOperand(i: CurOp++), Loc: MI.getLoc(), Size: 1, FixupKind: FK_Data_1, StartByte,
1626 CB, Fixups);
1627 break;
1628 case X86II::RawFrmImm16:
1629 emitByte(C: BaseOpcode, CB);
1630 emitImmediate(DispOp: MI.getOperand(i: CurOp++), Loc: MI.getLoc(),
1631 Size: X86II::getSizeOfImm(TSFlags), FixupKind: getImmFixupKind(TSFlags),
1632 StartByte, CB, Fixups);
1633 emitImmediate(DispOp: MI.getOperand(i: CurOp++), Loc: MI.getLoc(), Size: 2, FixupKind: FK_Data_2, StartByte,
1634 CB, Fixups);
1635 break;
1636
1637 case X86II::AddRegFrm:
1638 emitByte(C: BaseOpcode + getX86RegNum(MO: MI.getOperand(i: CurOp++)), CB);
1639 break;
1640
1641 case X86II::MRMDestReg: {
1642 emitByte(C: BaseOpcode, CB);
1643 unsigned SrcRegNum = CurOp + 1;
1644
1645 if (HasEVEX_K) // Skip writemask
1646 ++SrcRegNum;
1647
1648 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1649 ++SrcRegNum;
1650 if (IsND) // Skip the NDD operand encoded in EVEX_VVVV
1651 ++CurOp;
1652
1653 emitRegModRMByte(ModRMReg: MI.getOperand(i: CurOp),
1654 RegOpcodeFld: getX86RegNum(MO: MI.getOperand(i: SrcRegNum)), CB);
1655 CurOp = SrcRegNum + 1;
1656 break;
1657 }
1658 case X86II::MRMDestRegCC: {
1659 unsigned FirstOp = CurOp++;
1660 unsigned SecondOp = CurOp++;
1661 unsigned CC = MI.getOperand(i: CurOp++).getImm();
1662 emitByte(C: BaseOpcode + CC, CB);
1663 emitRegModRMByte(ModRMReg: MI.getOperand(i: FirstOp),
1664 RegOpcodeFld: getX86RegNum(MO: MI.getOperand(i: SecondOp)), CB);
1665 break;
1666 }
1667 case X86II::MRMDestMem4VOp3CC: {
1668 unsigned CC = MI.getOperand(i: 8).getImm();
1669 emitByte(C: BaseOpcode + CC, CB);
1670 unsigned SrcRegNum = CurOp + X86::AddrNumOperands;
1671 emitMemModRMByte(MI, Op: CurOp + 1, RegOpcodeField: getX86RegNum(MO: MI.getOperand(i: 0)), TSFlags,
1672 Kind, StartByte, CB, Fixups, STI, ForceSIB: false);
1673 CurOp = SrcRegNum + 3; // skip reg, VEX_V4 and CC
1674 break;
1675 }
1676 case X86II::MRMDestMemFSIB:
1677 case X86II::MRMDestMem: {
1678 emitByte(C: BaseOpcode, CB);
1679 unsigned SrcRegNum = CurOp + X86::AddrNumOperands;
1680
1681 if (HasEVEX_K) // Skip writemask
1682 ++SrcRegNum;
1683
1684 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1685 ++SrcRegNum;
1686
1687 if (IsND) // Skip new data destination
1688 ++CurOp;
1689
1690 bool ForceSIB = (Form == X86II::MRMDestMemFSIB);
1691 emitMemModRMByte(MI, Op: CurOp, RegOpcodeField: getX86RegNum(MO: MI.getOperand(i: SrcRegNum)), TSFlags,
1692 Kind, StartByte, CB, Fixups, STI, ForceSIB);
1693 CurOp = SrcRegNum + 1;
1694 break;
1695 }
1696 case X86II::MRMDestMemCC: {
1697 unsigned MemOp = CurOp;
1698 CurOp = MemOp + X86::AddrNumOperands;
1699 unsigned RegOp = CurOp++;
1700 unsigned CC = MI.getOperand(i: CurOp++).getImm();
1701 emitByte(C: BaseOpcode + CC, CB);
1702 emitMemModRMByte(MI, Op: MemOp, RegOpcodeField: getX86RegNum(MO: MI.getOperand(i: RegOp)), TSFlags,
1703 Kind, StartByte, CB, Fixups, STI);
1704 break;
1705 }
1706 case X86II::MRMSrcReg: {
1707 emitByte(C: BaseOpcode, CB);
1708 unsigned SrcRegNum = CurOp + 1;
1709
1710 if (HasEVEX_K) // Skip writemask
1711 ++SrcRegNum;
1712
1713 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1714 ++SrcRegNum;
1715
1716 if (IsND) // Skip new data destination
1717 ++CurOp;
1718
1719 emitRegModRMByte(ModRMReg: MI.getOperand(i: SrcRegNum),
1720 RegOpcodeFld: getX86RegNum(MO: MI.getOperand(i: CurOp)), CB);
1721 CurOp = SrcRegNum + 1;
1722 if (HasVEX_I8Reg)
1723 I8RegNum = getX86RegEncoding(MI, OpNum: CurOp++);
1724 // do not count the rounding control operand
1725 if (HasEVEX_RC)
1726 --NumOps;
1727 break;
1728 }
1729 case X86II::MRMSrcReg4VOp3: {
1730 emitByte(C: BaseOpcode, CB);
1731 unsigned SrcRegNum = CurOp + 1;
1732
1733 emitRegModRMByte(ModRMReg: MI.getOperand(i: SrcRegNum),
1734 RegOpcodeFld: getX86RegNum(MO: MI.getOperand(i: CurOp)), CB);
1735 CurOp = SrcRegNum + 1;
1736 ++CurOp; // Encoded in VEX.VVVV
1737 break;
1738 }
1739 case X86II::MRMSrcRegOp4: {
1740 emitByte(C: BaseOpcode, CB);
1741 unsigned SrcRegNum = CurOp + 1;
1742
1743 // Skip 1st src (which is encoded in VEX_VVVV)
1744 ++SrcRegNum;
1745
1746 // Capture 2nd src (which is encoded in Imm[7:4])
1747 assert(HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg");
1748 I8RegNum = getX86RegEncoding(MI, OpNum: SrcRegNum++);
1749
1750 emitRegModRMByte(ModRMReg: MI.getOperand(i: SrcRegNum),
1751 RegOpcodeFld: getX86RegNum(MO: MI.getOperand(i: CurOp)), CB);
1752 CurOp = SrcRegNum + 1;
1753 break;
1754 }
1755 case X86II::MRMSrcRegCC: {
1756 if (IsND) // Skip new data destination
1757 ++CurOp;
1758 unsigned FirstOp = CurOp++;
1759 unsigned SecondOp = CurOp++;
1760
1761 unsigned CC = MI.getOperand(i: CurOp++).getImm();
1762 emitByte(C: BaseOpcode + CC, CB);
1763
1764 emitRegModRMByte(ModRMReg: MI.getOperand(i: SecondOp),
1765 RegOpcodeFld: getX86RegNum(MO: MI.getOperand(i: FirstOp)), CB);
1766 break;
1767 }
1768 case X86II::MRMSrcMemFSIB:
1769 case X86II::MRMSrcMem: {
1770 unsigned FirstMemOp = CurOp + 1;
1771
1772 if (IsND) // Skip new data destination
1773 CurOp++;
1774
1775 if (HasEVEX_K) // Skip writemask
1776 ++FirstMemOp;
1777
1778 if (HasVEX_4V)
1779 ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
1780
1781 emitByte(C: BaseOpcode, CB);
1782
1783 bool ForceSIB = (Form == X86II::MRMSrcMemFSIB);
1784 emitMemModRMByte(MI, Op: FirstMemOp, RegOpcodeField: getX86RegNum(MO: MI.getOperand(i: CurOp)),
1785 TSFlags, Kind, StartByte, CB, Fixups, STI, ForceSIB);
1786 CurOp = FirstMemOp + X86::AddrNumOperands;
1787 if (HasVEX_I8Reg)
1788 I8RegNum = getX86RegEncoding(MI, OpNum: CurOp++);
1789 break;
1790 }
1791 case X86II::MRMSrcMem4VOp3: {
1792 unsigned FirstMemOp = CurOp + 1;
1793
1794 emitByte(C: BaseOpcode, CB);
1795
1796 emitMemModRMByte(MI, Op: FirstMemOp, RegOpcodeField: getX86RegNum(MO: MI.getOperand(i: CurOp)),
1797 TSFlags, Kind, StartByte, CB, Fixups, STI);
1798 CurOp = FirstMemOp + X86::AddrNumOperands;
1799 ++CurOp; // Encoded in VEX.VVVV.
1800 break;
1801 }
1802 case X86II::MRMSrcMemOp4: {
1803 unsigned FirstMemOp = CurOp + 1;
1804
1805 ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
1806
1807 // Capture second register source (encoded in Imm[7:4])
1808 assert(HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg");
1809 I8RegNum = getX86RegEncoding(MI, OpNum: FirstMemOp++);
1810
1811 emitByte(C: BaseOpcode, CB);
1812
1813 emitMemModRMByte(MI, Op: FirstMemOp, RegOpcodeField: getX86RegNum(MO: MI.getOperand(i: CurOp)),
1814 TSFlags, Kind, StartByte, CB, Fixups, STI);
1815 CurOp = FirstMemOp + X86::AddrNumOperands;
1816 break;
1817 }
1818 case X86II::MRMSrcMemCC: {
1819 if (IsND) // Skip new data destination
1820 ++CurOp;
1821 unsigned RegOp = CurOp++;
1822 unsigned FirstMemOp = CurOp;
1823 CurOp = FirstMemOp + X86::AddrNumOperands;
1824
1825 unsigned CC = MI.getOperand(i: CurOp++).getImm();
1826 emitByte(C: BaseOpcode + CC, CB);
1827
1828 emitMemModRMByte(MI, Op: FirstMemOp, RegOpcodeField: getX86RegNum(MO: MI.getOperand(i: RegOp)),
1829 TSFlags, Kind, StartByte, CB, Fixups, STI);
1830 break;
1831 }
1832
1833 case X86II::MRMXrCC: {
1834 unsigned RegOp = CurOp++;
1835
1836 unsigned CC = MI.getOperand(i: CurOp++).getImm();
1837 emitByte(C: BaseOpcode + CC, CB);
1838 emitRegModRMByte(ModRMReg: MI.getOperand(i: RegOp), RegOpcodeFld: 0, CB);
1839 break;
1840 }
1841
1842 case X86II::MRMXr:
1843 case X86II::MRM0r:
1844 case X86II::MRM1r:
1845 case X86II::MRM2r:
1846 case X86II::MRM3r:
1847 case X86II::MRM4r:
1848 case X86II::MRM5r:
1849 case X86II::MRM6r:
1850 case X86II::MRM7r:
1851 if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
1852 ++CurOp;
1853 if (HasEVEX_K) // Skip writemask
1854 ++CurOp;
1855 emitByte(C: BaseOpcode, CB);
1856 emitRegModRMByte(ModRMReg: MI.getOperand(i: CurOp++),
1857 RegOpcodeFld: (Form == X86II::MRMXr) ? 0 : Form - X86II::MRM0r, CB);
1858 break;
1859 case X86II::MRMr0:
1860 emitByte(C: BaseOpcode, CB);
1861 emitByte(C: modRMByte(Mod: 3, RegOpcode: getX86RegNum(MO: MI.getOperand(i: CurOp++)), RM: 0), CB);
1862 break;
1863
1864 case X86II::MRMXmCC: {
1865 unsigned FirstMemOp = CurOp;
1866 CurOp = FirstMemOp + X86::AddrNumOperands;
1867
1868 unsigned CC = MI.getOperand(i: CurOp++).getImm();
1869 emitByte(C: BaseOpcode + CC, CB);
1870
1871 emitMemModRMByte(MI, Op: FirstMemOp, RegOpcodeField: 0, TSFlags, Kind, StartByte, CB, Fixups,
1872 STI);
1873 break;
1874 }
1875
1876 case X86II::MRMXm:
1877 case X86II::MRM0m:
1878 case X86II::MRM1m:
1879 case X86II::MRM2m:
1880 case X86II::MRM3m:
1881 case X86II::MRM4m:
1882 case X86II::MRM5m:
1883 case X86II::MRM6m:
1884 case X86II::MRM7m:
1885 if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
1886 ++CurOp;
1887 if (HasEVEX_K) // Skip writemask
1888 ++CurOp;
1889 emitByte(C: BaseOpcode, CB);
1890 emitMemModRMByte(MI, Op: CurOp,
1891 RegOpcodeField: (Form == X86II::MRMXm) ? 0 : Form - X86II::MRM0m, TSFlags,
1892 Kind, StartByte, CB, Fixups, STI);
1893 CurOp += X86::AddrNumOperands;
1894 break;
1895
1896 case X86II::MRM0X:
1897 case X86II::MRM1X:
1898 case X86II::MRM2X:
1899 case X86II::MRM3X:
1900 case X86II::MRM4X:
1901 case X86II::MRM5X:
1902 case X86II::MRM6X:
1903 case X86II::MRM7X:
1904 emitByte(C: BaseOpcode, CB);
1905 emitByte(C: 0xC0 + ((Form - X86II::MRM0X) << 3), CB);
1906 break;
1907
1908 case X86II::MRM_C0:
1909 case X86II::MRM_C1:
1910 case X86II::MRM_C2:
1911 case X86II::MRM_C3:
1912 case X86II::MRM_C4:
1913 case X86II::MRM_C5:
1914 case X86II::MRM_C6:
1915 case X86II::MRM_C7:
1916 case X86II::MRM_C8:
1917 case X86II::MRM_C9:
1918 case X86II::MRM_CA:
1919 case X86II::MRM_CB:
1920 case X86II::MRM_CC:
1921 case X86II::MRM_CD:
1922 case X86II::MRM_CE:
1923 case X86II::MRM_CF:
1924 case X86II::MRM_D0:
1925 case X86II::MRM_D1:
1926 case X86II::MRM_D2:
1927 case X86II::MRM_D3:
1928 case X86II::MRM_D4:
1929 case X86II::MRM_D5:
1930 case X86II::MRM_D6:
1931 case X86II::MRM_D7:
1932 case X86II::MRM_D8:
1933 case X86II::MRM_D9:
1934 case X86II::MRM_DA:
1935 case X86II::MRM_DB:
1936 case X86II::MRM_DC:
1937 case X86II::MRM_DD:
1938 case X86II::MRM_DE:
1939 case X86II::MRM_DF:
1940 case X86II::MRM_E0:
1941 case X86II::MRM_E1:
1942 case X86II::MRM_E2:
1943 case X86II::MRM_E3:
1944 case X86II::MRM_E4:
1945 case X86II::MRM_E5:
1946 case X86II::MRM_E6:
1947 case X86II::MRM_E7:
1948 case X86II::MRM_E8:
1949 case X86II::MRM_E9:
1950 case X86II::MRM_EA:
1951 case X86II::MRM_EB:
1952 case X86II::MRM_EC:
1953 case X86II::MRM_ED:
1954 case X86II::MRM_EE:
1955 case X86II::MRM_EF:
1956 case X86II::MRM_F0:
1957 case X86II::MRM_F1:
1958 case X86II::MRM_F2:
1959 case X86II::MRM_F3:
1960 case X86II::MRM_F4:
1961 case X86II::MRM_F5:
1962 case X86II::MRM_F6:
1963 case X86II::MRM_F7:
1964 case X86II::MRM_F8:
1965 case X86II::MRM_F9:
1966 case X86II::MRM_FA:
1967 case X86II::MRM_FB:
1968 case X86II::MRM_FC:
1969 case X86II::MRM_FD:
1970 case X86II::MRM_FE:
1971 case X86II::MRM_FF:
1972 emitByte(C: BaseOpcode, CB);
1973 emitByte(C: 0xC0 + Form - X86II::MRM_C0, CB);
1974 break;
1975 }
1976
1977 if (HasVEX_I8Reg) {
1978 // The last source register of a 4 operand instruction in AVX is encoded
1979 // in bits[7:4] of a immediate byte.
1980 assert(I8RegNum < 16 && "Register encoding out of range");
1981 I8RegNum <<= 4;
1982 if (CurOp != NumOps) {
1983 unsigned Val = MI.getOperand(i: CurOp++).getImm();
1984 assert(Val < 16 && "Immediate operand value out of range");
1985 I8RegNum |= Val;
1986 }
1987 emitImmediate(DispOp: MCOperand::createImm(Val: I8RegNum), Loc: MI.getLoc(), Size: 1, FixupKind: FK_Data_1,
1988 StartByte, CB, Fixups);
1989 } else {
1990 // If there is a remaining operand, it must be a trailing immediate. Emit it
1991 // according to the right size for the instruction. Some instructions
1992 // (SSE4a extrq and insertq) have two trailing immediates.
1993
1994 // Skip two trainling conditional operands encoded in EVEX prefix
1995 unsigned RemaningOps = NumOps - CurOp - 2 * HasTwoConditionalOps;
1996 while (RemaningOps) {
1997 emitImmediate(DispOp: MI.getOperand(i: CurOp++), Loc: MI.getLoc(),
1998 Size: X86II::getSizeOfImm(TSFlags), FixupKind: getImmFixupKind(TSFlags),
1999 StartByte, CB, Fixups);
2000 --RemaningOps;
2001 }
2002 CurOp += 2 * HasTwoConditionalOps;
2003 }
2004
2005 if ((TSFlags & X86II::OpMapMask) == X86II::ThreeDNow)
2006 emitByte(C: X86II::getBaseOpcodeFor(TSFlags), CB);
2007
2008 if (CB.size() - StartByte > 15)
2009 Ctx.reportError(L: MI.getLoc(), Msg: "instruction length exceeds the limit of 15");
2010#ifndef NDEBUG
2011 // FIXME: Verify.
2012 if (/*!Desc.isVariadic() &&*/ CurOp != NumOps) {
2013 errs() << "Cannot encode all operands of: ";
2014 MI.dump();
2015 errs() << '\n';
2016 abort();
2017 }
2018#endif
2019}
2020
2021MCCodeEmitter *llvm::createX86MCCodeEmitter(const MCInstrInfo &MCII,
2022 MCContext &Ctx) {
2023 return new X86MCCodeEmitter(MCII, Ctx);
2024}
2025