1//===-- X86MCTargetDesc.cpp - X86 Target Descriptions ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file provides X86 specific target descriptions.
10//
11//===----------------------------------------------------------------------===//
12
13#include "X86MCTargetDesc.h"
14#include "TargetInfo/X86TargetInfo.h"
15#include "X86ATTInstPrinter.h"
16#include "X86BaseInfo.h"
17#include "X86IntelInstPrinter.h"
18#include "X86MCAsmInfo.h"
19#include "X86TargetStreamer.h"
20#include "llvm/ADT/APInt.h"
21#include "llvm/DebugInfo/CodeView/CodeView.h"
22#include "llvm/MC/MCDwarf.h"
23#include "llvm/MC/MCInstrAnalysis.h"
24#include "llvm/MC/MCInstrInfo.h"
25#include "llvm/MC/MCRegisterInfo.h"
26#include "llvm/MC/MCStreamer.h"
27#include "llvm/MC/MCSubtargetInfo.h"
28#include "llvm/MC/TargetRegistry.h"
29#include "llvm/Support/ErrorHandling.h"
30#include "llvm/TargetParser/Host.h"
31#include "llvm/TargetParser/Triple.h"
32
33using namespace llvm;
34
35#define GET_REGINFO_MC_DESC
36#include "X86GenRegisterInfo.inc"
37
38#define GET_INSTRINFO_MC_DESC
39#define GET_INSTRINFO_MC_HELPERS
40#define ENABLE_INSTR_PREDICATE_VERIFIER
41#include "X86GenInstrInfo.inc"
42
43#define GET_SUBTARGETINFO_MC_DESC
44#include "X86GenSubtargetInfo.inc"
45
46std::string X86_MC::ParseX86Triple(const Triple &TT) {
47 std::string FS;
48 // SSE2 should default to enabled in 64-bit mode, but can be turned off
49 // explicitly.
50 if (TT.isArch64Bit())
51 FS = "+64bit-mode,-32bit-mode,-16bit-mode,+sse2";
52 else if (TT.getEnvironment() != Triple::CODE16)
53 FS = "-64bit-mode,+32bit-mode,-16bit-mode";
54 else
55 FS = "-64bit-mode,-32bit-mode,+16bit-mode";
56
57 return FS;
58}
59
60unsigned X86_MC::getDwarfRegFlavour(const Triple &TT, bool isEH) {
61 if (TT.getArch() == Triple::x86_64)
62 return DWARFFlavour::X86_64;
63
64 if (TT.isOSDarwin())
65 return isEH ? DWARFFlavour::X86_32_DarwinEH : DWARFFlavour::X86_32_Generic;
66 if (TT.isOSCygMing())
67 // Unsupported by now, just quick fallback
68 return DWARFFlavour::X86_32_Generic;
69 return DWARFFlavour::X86_32_Generic;
70}
71
72bool X86_MC::hasLockPrefix(const MCInst &MI) {
73 return MI.getFlags() & X86::IP_HAS_LOCK;
74}
75
76static bool isMemOperand(const MCInst &MI, unsigned Op, unsigned RegClassID) {
77 const MCOperand &Base = MI.getOperand(i: Op + X86::AddrBaseReg);
78 const MCOperand &Index = MI.getOperand(i: Op + X86::AddrIndexReg);
79 const MCRegisterClass &RC = X86MCRegisterClasses[RegClassID];
80
81 return (Base.isReg() && Base.getReg() && RC.contains(Reg: Base.getReg())) ||
82 (Index.isReg() && Index.getReg() && RC.contains(Reg: Index.getReg()));
83}
84
85bool X86_MC::is16BitMemOperand(const MCInst &MI, unsigned Op,
86 const MCSubtargetInfo &STI) {
87 const MCOperand &Base = MI.getOperand(i: Op + X86::AddrBaseReg);
88 const MCOperand &Index = MI.getOperand(i: Op + X86::AddrIndexReg);
89
90 if (STI.hasFeature(Feature: X86::Is16Bit) && Base.isReg() && !Base.getReg() &&
91 Index.isReg() && !Index.getReg())
92 return true;
93 return isMemOperand(MI, Op, RegClassID: X86::GR16RegClassID);
94}
95
96bool X86_MC::is32BitMemOperand(const MCInst &MI, unsigned Op) {
97 const MCOperand &Base = MI.getOperand(i: Op + X86::AddrBaseReg);
98 const MCOperand &Index = MI.getOperand(i: Op + X86::AddrIndexReg);
99 if (Base.isReg() && Base.getReg() == X86::EIP) {
100 assert(Index.isReg() && !Index.getReg() && "Invalid eip-based address");
101 return true;
102 }
103 if (Index.isReg() && Index.getReg() == X86::EIZ)
104 return true;
105 return isMemOperand(MI, Op, RegClassID: X86::GR32RegClassID);
106}
107
108#ifndef NDEBUG
109bool X86_MC::is64BitMemOperand(const MCInst &MI, unsigned Op) {
110 return isMemOperand(MI, Op, X86::GR64RegClassID);
111}
112#endif
113
114bool X86_MC::needsAddressSizeOverride(const MCInst &MI,
115 const MCSubtargetInfo &STI,
116 int MemoryOperand, uint64_t TSFlags) {
117 uint64_t AdSize = TSFlags & X86II::AdSizeMask;
118 bool Is16BitMode = STI.hasFeature(Feature: X86::Is16Bit);
119 bool Is32BitMode = STI.hasFeature(Feature: X86::Is32Bit);
120 bool Is64BitMode = STI.hasFeature(Feature: X86::Is64Bit);
121 if ((Is16BitMode && AdSize == X86II::AdSize32) ||
122 (Is32BitMode && AdSize == X86II::AdSize16) ||
123 (Is64BitMode && AdSize == X86II::AdSize32))
124 return true;
125 uint64_t Form = TSFlags & X86II::FormMask;
126 switch (Form) {
127 default:
128 break;
129 case X86II::RawFrmDstSrc: {
130 MCRegister siReg = MI.getOperand(i: 1).getReg();
131 assert(((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) ||
132 (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) ||
133 (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) &&
134 "SI and DI register sizes do not match");
135 return (!Is32BitMode && siReg == X86::ESI) ||
136 (Is32BitMode && siReg == X86::SI);
137 }
138 case X86II::RawFrmSrc: {
139 MCRegister siReg = MI.getOperand(i: 0).getReg();
140 return (!Is32BitMode && siReg == X86::ESI) ||
141 (Is32BitMode && siReg == X86::SI);
142 }
143 case X86II::RawFrmDst: {
144 MCRegister siReg = MI.getOperand(i: 0).getReg();
145 return (!Is32BitMode && siReg == X86::EDI) ||
146 (Is32BitMode && siReg == X86::DI);
147 }
148 }
149
150 // Determine where the memory operand starts, if present.
151 if (MemoryOperand < 0)
152 return false;
153
154 if (STI.hasFeature(Feature: X86::Is64Bit)) {
155 assert(!is16BitMemOperand(MI, MemoryOperand, STI));
156 return is32BitMemOperand(MI, Op: MemoryOperand);
157 }
158 if (STI.hasFeature(Feature: X86::Is32Bit)) {
159 assert(!is64BitMemOperand(MI, MemoryOperand));
160 return is16BitMemOperand(MI, Op: MemoryOperand, STI);
161 }
162 assert(STI.hasFeature(X86::Is16Bit));
163 assert(!is64BitMemOperand(MI, MemoryOperand));
164 return !is16BitMemOperand(MI, Op: MemoryOperand, STI);
165}
166
167void X86_MC::initLLVMToSEHAndCVRegMapping(MCRegisterInfo *MRI) {
168 // FIXME: TableGen these.
169 for (unsigned Reg = X86::NoRegister + 1; Reg < X86::NUM_TARGET_REGS; ++Reg) {
170 unsigned SEH = MRI->getEncodingValue(Reg);
171 MRI->mapLLVMRegToSEHReg(LLVMReg: Reg, SEHReg: SEH);
172 }
173
174 // Mapping from CodeView to MC register id.
175 static const struct {
176 codeview::RegisterId CVReg;
177 MCPhysReg Reg;
178 } RegMap[] = {
179 {.CVReg: codeview::RegisterId::AL, .Reg: X86::AL},
180 {.CVReg: codeview::RegisterId::CL, .Reg: X86::CL},
181 {.CVReg: codeview::RegisterId::DL, .Reg: X86::DL},
182 {.CVReg: codeview::RegisterId::BL, .Reg: X86::BL},
183 {.CVReg: codeview::RegisterId::AH, .Reg: X86::AH},
184 {.CVReg: codeview::RegisterId::CH, .Reg: X86::CH},
185 {.CVReg: codeview::RegisterId::DH, .Reg: X86::DH},
186 {.CVReg: codeview::RegisterId::BH, .Reg: X86::BH},
187 {.CVReg: codeview::RegisterId::AX, .Reg: X86::AX},
188 {.CVReg: codeview::RegisterId::CX, .Reg: X86::CX},
189 {.CVReg: codeview::RegisterId::DX, .Reg: X86::DX},
190 {.CVReg: codeview::RegisterId::BX, .Reg: X86::BX},
191 {.CVReg: codeview::RegisterId::SP, .Reg: X86::SP},
192 {.CVReg: codeview::RegisterId::BP, .Reg: X86::BP},
193 {.CVReg: codeview::RegisterId::SI, .Reg: X86::SI},
194 {.CVReg: codeview::RegisterId::DI, .Reg: X86::DI},
195 {.CVReg: codeview::RegisterId::EAX, .Reg: X86::EAX},
196 {.CVReg: codeview::RegisterId::ECX, .Reg: X86::ECX},
197 {.CVReg: codeview::RegisterId::EDX, .Reg: X86::EDX},
198 {.CVReg: codeview::RegisterId::EBX, .Reg: X86::EBX},
199 {.CVReg: codeview::RegisterId::ESP, .Reg: X86::ESP},
200 {.CVReg: codeview::RegisterId::EBP, .Reg: X86::EBP},
201 {.CVReg: codeview::RegisterId::ESI, .Reg: X86::ESI},
202 {.CVReg: codeview::RegisterId::EDI, .Reg: X86::EDI},
203
204 {.CVReg: codeview::RegisterId::EFLAGS, .Reg: X86::EFLAGS},
205
206 {.CVReg: codeview::RegisterId::ST0, .Reg: X86::ST0},
207 {.CVReg: codeview::RegisterId::ST1, .Reg: X86::ST1},
208 {.CVReg: codeview::RegisterId::ST2, .Reg: X86::ST2},
209 {.CVReg: codeview::RegisterId::ST3, .Reg: X86::ST3},
210 {.CVReg: codeview::RegisterId::ST4, .Reg: X86::ST4},
211 {.CVReg: codeview::RegisterId::ST5, .Reg: X86::ST5},
212 {.CVReg: codeview::RegisterId::ST6, .Reg: X86::ST6},
213 {.CVReg: codeview::RegisterId::ST7, .Reg: X86::ST7},
214
215 {.CVReg: codeview::RegisterId::ST0, .Reg: X86::FP0},
216 {.CVReg: codeview::RegisterId::ST1, .Reg: X86::FP1},
217 {.CVReg: codeview::RegisterId::ST2, .Reg: X86::FP2},
218 {.CVReg: codeview::RegisterId::ST3, .Reg: X86::FP3},
219 {.CVReg: codeview::RegisterId::ST4, .Reg: X86::FP4},
220 {.CVReg: codeview::RegisterId::ST5, .Reg: X86::FP5},
221 {.CVReg: codeview::RegisterId::ST6, .Reg: X86::FP6},
222 {.CVReg: codeview::RegisterId::ST7, .Reg: X86::FP7},
223
224 {.CVReg: codeview::RegisterId::MM0, .Reg: X86::MM0},
225 {.CVReg: codeview::RegisterId::MM1, .Reg: X86::MM1},
226 {.CVReg: codeview::RegisterId::MM2, .Reg: X86::MM2},
227 {.CVReg: codeview::RegisterId::MM3, .Reg: X86::MM3},
228 {.CVReg: codeview::RegisterId::MM4, .Reg: X86::MM4},
229 {.CVReg: codeview::RegisterId::MM5, .Reg: X86::MM5},
230 {.CVReg: codeview::RegisterId::MM6, .Reg: X86::MM6},
231 {.CVReg: codeview::RegisterId::MM7, .Reg: X86::MM7},
232
233 {.CVReg: codeview::RegisterId::XMM0, .Reg: X86::XMM0},
234 {.CVReg: codeview::RegisterId::XMM1, .Reg: X86::XMM1},
235 {.CVReg: codeview::RegisterId::XMM2, .Reg: X86::XMM2},
236 {.CVReg: codeview::RegisterId::XMM3, .Reg: X86::XMM3},
237 {.CVReg: codeview::RegisterId::XMM4, .Reg: X86::XMM4},
238 {.CVReg: codeview::RegisterId::XMM5, .Reg: X86::XMM5},
239 {.CVReg: codeview::RegisterId::XMM6, .Reg: X86::XMM6},
240 {.CVReg: codeview::RegisterId::XMM7, .Reg: X86::XMM7},
241
242 {.CVReg: codeview::RegisterId::XMM8, .Reg: X86::XMM8},
243 {.CVReg: codeview::RegisterId::XMM9, .Reg: X86::XMM9},
244 {.CVReg: codeview::RegisterId::XMM10, .Reg: X86::XMM10},
245 {.CVReg: codeview::RegisterId::XMM11, .Reg: X86::XMM11},
246 {.CVReg: codeview::RegisterId::XMM12, .Reg: X86::XMM12},
247 {.CVReg: codeview::RegisterId::XMM13, .Reg: X86::XMM13},
248 {.CVReg: codeview::RegisterId::XMM14, .Reg: X86::XMM14},
249 {.CVReg: codeview::RegisterId::XMM15, .Reg: X86::XMM15},
250
251 {.CVReg: codeview::RegisterId::SIL, .Reg: X86::SIL},
252 {.CVReg: codeview::RegisterId::DIL, .Reg: X86::DIL},
253 {.CVReg: codeview::RegisterId::BPL, .Reg: X86::BPL},
254 {.CVReg: codeview::RegisterId::SPL, .Reg: X86::SPL},
255 {.CVReg: codeview::RegisterId::RAX, .Reg: X86::RAX},
256 {.CVReg: codeview::RegisterId::RBX, .Reg: X86::RBX},
257 {.CVReg: codeview::RegisterId::RCX, .Reg: X86::RCX},
258 {.CVReg: codeview::RegisterId::RDX, .Reg: X86::RDX},
259 {.CVReg: codeview::RegisterId::RSI, .Reg: X86::RSI},
260 {.CVReg: codeview::RegisterId::RDI, .Reg: X86::RDI},
261 {.CVReg: codeview::RegisterId::RBP, .Reg: X86::RBP},
262 {.CVReg: codeview::RegisterId::RSP, .Reg: X86::RSP},
263 {.CVReg: codeview::RegisterId::R8, .Reg: X86::R8},
264 {.CVReg: codeview::RegisterId::R9, .Reg: X86::R9},
265 {.CVReg: codeview::RegisterId::R10, .Reg: X86::R10},
266 {.CVReg: codeview::RegisterId::R11, .Reg: X86::R11},
267 {.CVReg: codeview::RegisterId::R12, .Reg: X86::R12},
268 {.CVReg: codeview::RegisterId::R13, .Reg: X86::R13},
269 {.CVReg: codeview::RegisterId::R14, .Reg: X86::R14},
270 {.CVReg: codeview::RegisterId::R15, .Reg: X86::R15},
271 {.CVReg: codeview::RegisterId::R8B, .Reg: X86::R8B},
272 {.CVReg: codeview::RegisterId::R9B, .Reg: X86::R9B},
273 {.CVReg: codeview::RegisterId::R10B, .Reg: X86::R10B},
274 {.CVReg: codeview::RegisterId::R11B, .Reg: X86::R11B},
275 {.CVReg: codeview::RegisterId::R12B, .Reg: X86::R12B},
276 {.CVReg: codeview::RegisterId::R13B, .Reg: X86::R13B},
277 {.CVReg: codeview::RegisterId::R14B, .Reg: X86::R14B},
278 {.CVReg: codeview::RegisterId::R15B, .Reg: X86::R15B},
279 {.CVReg: codeview::RegisterId::R8W, .Reg: X86::R8W},
280 {.CVReg: codeview::RegisterId::R9W, .Reg: X86::R9W},
281 {.CVReg: codeview::RegisterId::R10W, .Reg: X86::R10W},
282 {.CVReg: codeview::RegisterId::R11W, .Reg: X86::R11W},
283 {.CVReg: codeview::RegisterId::R12W, .Reg: X86::R12W},
284 {.CVReg: codeview::RegisterId::R13W, .Reg: X86::R13W},
285 {.CVReg: codeview::RegisterId::R14W, .Reg: X86::R14W},
286 {.CVReg: codeview::RegisterId::R15W, .Reg: X86::R15W},
287 {.CVReg: codeview::RegisterId::R8D, .Reg: X86::R8D},
288 {.CVReg: codeview::RegisterId::R9D, .Reg: X86::R9D},
289 {.CVReg: codeview::RegisterId::R10D, .Reg: X86::R10D},
290 {.CVReg: codeview::RegisterId::R11D, .Reg: X86::R11D},
291 {.CVReg: codeview::RegisterId::R12D, .Reg: X86::R12D},
292 {.CVReg: codeview::RegisterId::R13D, .Reg: X86::R13D},
293 {.CVReg: codeview::RegisterId::R14D, .Reg: X86::R14D},
294 {.CVReg: codeview::RegisterId::R15D, .Reg: X86::R15D},
295 {.CVReg: codeview::RegisterId::AMD64_YMM0, .Reg: X86::YMM0},
296 {.CVReg: codeview::RegisterId::AMD64_YMM1, .Reg: X86::YMM1},
297 {.CVReg: codeview::RegisterId::AMD64_YMM2, .Reg: X86::YMM2},
298 {.CVReg: codeview::RegisterId::AMD64_YMM3, .Reg: X86::YMM3},
299 {.CVReg: codeview::RegisterId::AMD64_YMM4, .Reg: X86::YMM4},
300 {.CVReg: codeview::RegisterId::AMD64_YMM5, .Reg: X86::YMM5},
301 {.CVReg: codeview::RegisterId::AMD64_YMM6, .Reg: X86::YMM6},
302 {.CVReg: codeview::RegisterId::AMD64_YMM7, .Reg: X86::YMM7},
303 {.CVReg: codeview::RegisterId::AMD64_YMM8, .Reg: X86::YMM8},
304 {.CVReg: codeview::RegisterId::AMD64_YMM9, .Reg: X86::YMM9},
305 {.CVReg: codeview::RegisterId::AMD64_YMM10, .Reg: X86::YMM10},
306 {.CVReg: codeview::RegisterId::AMD64_YMM11, .Reg: X86::YMM11},
307 {.CVReg: codeview::RegisterId::AMD64_YMM12, .Reg: X86::YMM12},
308 {.CVReg: codeview::RegisterId::AMD64_YMM13, .Reg: X86::YMM13},
309 {.CVReg: codeview::RegisterId::AMD64_YMM14, .Reg: X86::YMM14},
310 {.CVReg: codeview::RegisterId::AMD64_YMM15, .Reg: X86::YMM15},
311 {.CVReg: codeview::RegisterId::AMD64_YMM16, .Reg: X86::YMM16},
312 {.CVReg: codeview::RegisterId::AMD64_YMM17, .Reg: X86::YMM17},
313 {.CVReg: codeview::RegisterId::AMD64_YMM18, .Reg: X86::YMM18},
314 {.CVReg: codeview::RegisterId::AMD64_YMM19, .Reg: X86::YMM19},
315 {.CVReg: codeview::RegisterId::AMD64_YMM20, .Reg: X86::YMM20},
316 {.CVReg: codeview::RegisterId::AMD64_YMM21, .Reg: X86::YMM21},
317 {.CVReg: codeview::RegisterId::AMD64_YMM22, .Reg: X86::YMM22},
318 {.CVReg: codeview::RegisterId::AMD64_YMM23, .Reg: X86::YMM23},
319 {.CVReg: codeview::RegisterId::AMD64_YMM24, .Reg: X86::YMM24},
320 {.CVReg: codeview::RegisterId::AMD64_YMM25, .Reg: X86::YMM25},
321 {.CVReg: codeview::RegisterId::AMD64_YMM26, .Reg: X86::YMM26},
322 {.CVReg: codeview::RegisterId::AMD64_YMM27, .Reg: X86::YMM27},
323 {.CVReg: codeview::RegisterId::AMD64_YMM28, .Reg: X86::YMM28},
324 {.CVReg: codeview::RegisterId::AMD64_YMM29, .Reg: X86::YMM29},
325 {.CVReg: codeview::RegisterId::AMD64_YMM30, .Reg: X86::YMM30},
326 {.CVReg: codeview::RegisterId::AMD64_YMM31, .Reg: X86::YMM31},
327 {.CVReg: codeview::RegisterId::AMD64_ZMM0, .Reg: X86::ZMM0},
328 {.CVReg: codeview::RegisterId::AMD64_ZMM1, .Reg: X86::ZMM1},
329 {.CVReg: codeview::RegisterId::AMD64_ZMM2, .Reg: X86::ZMM2},
330 {.CVReg: codeview::RegisterId::AMD64_ZMM3, .Reg: X86::ZMM3},
331 {.CVReg: codeview::RegisterId::AMD64_ZMM4, .Reg: X86::ZMM4},
332 {.CVReg: codeview::RegisterId::AMD64_ZMM5, .Reg: X86::ZMM5},
333 {.CVReg: codeview::RegisterId::AMD64_ZMM6, .Reg: X86::ZMM6},
334 {.CVReg: codeview::RegisterId::AMD64_ZMM7, .Reg: X86::ZMM7},
335 {.CVReg: codeview::RegisterId::AMD64_ZMM8, .Reg: X86::ZMM8},
336 {.CVReg: codeview::RegisterId::AMD64_ZMM9, .Reg: X86::ZMM9},
337 {.CVReg: codeview::RegisterId::AMD64_ZMM10, .Reg: X86::ZMM10},
338 {.CVReg: codeview::RegisterId::AMD64_ZMM11, .Reg: X86::ZMM11},
339 {.CVReg: codeview::RegisterId::AMD64_ZMM12, .Reg: X86::ZMM12},
340 {.CVReg: codeview::RegisterId::AMD64_ZMM13, .Reg: X86::ZMM13},
341 {.CVReg: codeview::RegisterId::AMD64_ZMM14, .Reg: X86::ZMM14},
342 {.CVReg: codeview::RegisterId::AMD64_ZMM15, .Reg: X86::ZMM15},
343 {.CVReg: codeview::RegisterId::AMD64_ZMM16, .Reg: X86::ZMM16},
344 {.CVReg: codeview::RegisterId::AMD64_ZMM17, .Reg: X86::ZMM17},
345 {.CVReg: codeview::RegisterId::AMD64_ZMM18, .Reg: X86::ZMM18},
346 {.CVReg: codeview::RegisterId::AMD64_ZMM19, .Reg: X86::ZMM19},
347 {.CVReg: codeview::RegisterId::AMD64_ZMM20, .Reg: X86::ZMM20},
348 {.CVReg: codeview::RegisterId::AMD64_ZMM21, .Reg: X86::ZMM21},
349 {.CVReg: codeview::RegisterId::AMD64_ZMM22, .Reg: X86::ZMM22},
350 {.CVReg: codeview::RegisterId::AMD64_ZMM23, .Reg: X86::ZMM23},
351 {.CVReg: codeview::RegisterId::AMD64_ZMM24, .Reg: X86::ZMM24},
352 {.CVReg: codeview::RegisterId::AMD64_ZMM25, .Reg: X86::ZMM25},
353 {.CVReg: codeview::RegisterId::AMD64_ZMM26, .Reg: X86::ZMM26},
354 {.CVReg: codeview::RegisterId::AMD64_ZMM27, .Reg: X86::ZMM27},
355 {.CVReg: codeview::RegisterId::AMD64_ZMM28, .Reg: X86::ZMM28},
356 {.CVReg: codeview::RegisterId::AMD64_ZMM29, .Reg: X86::ZMM29},
357 {.CVReg: codeview::RegisterId::AMD64_ZMM30, .Reg: X86::ZMM30},
358 {.CVReg: codeview::RegisterId::AMD64_ZMM31, .Reg: X86::ZMM31},
359 {.CVReg: codeview::RegisterId::AMD64_K0, .Reg: X86::K0},
360 {.CVReg: codeview::RegisterId::AMD64_K1, .Reg: X86::K1},
361 {.CVReg: codeview::RegisterId::AMD64_K2, .Reg: X86::K2},
362 {.CVReg: codeview::RegisterId::AMD64_K3, .Reg: X86::K3},
363 {.CVReg: codeview::RegisterId::AMD64_K4, .Reg: X86::K4},
364 {.CVReg: codeview::RegisterId::AMD64_K5, .Reg: X86::K5},
365 {.CVReg: codeview::RegisterId::AMD64_K6, .Reg: X86::K6},
366 {.CVReg: codeview::RegisterId::AMD64_K7, .Reg: X86::K7},
367 {.CVReg: codeview::RegisterId::AMD64_XMM16, .Reg: X86::XMM16},
368 {.CVReg: codeview::RegisterId::AMD64_XMM17, .Reg: X86::XMM17},
369 {.CVReg: codeview::RegisterId::AMD64_XMM18, .Reg: X86::XMM18},
370 {.CVReg: codeview::RegisterId::AMD64_XMM19, .Reg: X86::XMM19},
371 {.CVReg: codeview::RegisterId::AMD64_XMM20, .Reg: X86::XMM20},
372 {.CVReg: codeview::RegisterId::AMD64_XMM21, .Reg: X86::XMM21},
373 {.CVReg: codeview::RegisterId::AMD64_XMM22, .Reg: X86::XMM22},
374 {.CVReg: codeview::RegisterId::AMD64_XMM23, .Reg: X86::XMM23},
375 {.CVReg: codeview::RegisterId::AMD64_XMM24, .Reg: X86::XMM24},
376 {.CVReg: codeview::RegisterId::AMD64_XMM25, .Reg: X86::XMM25},
377 {.CVReg: codeview::RegisterId::AMD64_XMM26, .Reg: X86::XMM26},
378 {.CVReg: codeview::RegisterId::AMD64_XMM27, .Reg: X86::XMM27},
379 {.CVReg: codeview::RegisterId::AMD64_XMM28, .Reg: X86::XMM28},
380 {.CVReg: codeview::RegisterId::AMD64_XMM29, .Reg: X86::XMM29},
381 {.CVReg: codeview::RegisterId::AMD64_XMM30, .Reg: X86::XMM30},
382 {.CVReg: codeview::RegisterId::AMD64_XMM31, .Reg: X86::XMM31},
383
384 };
385 for (const auto &I : RegMap)
386 MRI->mapLLVMRegToCVReg(LLVMReg: I.Reg, CVReg: static_cast<int>(I.CVReg));
387}
388
389MCSubtargetInfo *X86_MC::createX86MCSubtargetInfo(const Triple &TT,
390 StringRef CPU, StringRef FS) {
391 std::string ArchFS = X86_MC::ParseX86Triple(TT);
392 assert(!ArchFS.empty() && "Failed to parse X86 triple");
393 if (!FS.empty())
394 ArchFS = (Twine(ArchFS) + "," + FS).str();
395
396 if (CPU.empty())
397 CPU = "generic";
398
399 size_t posNoEVEX512 = FS.rfind(Str: "-evex512");
400 // Make sure we won't be cheated by "-avx512fp16".
401 size_t posNoAVX512F =
402 FS.ends_with(Suffix: "-avx512f") ? FS.size() - 8 : FS.rfind(Str: "-avx512f,");
403 size_t posEVEX512 = FS.rfind(Str: "+evex512");
404 size_t posAVX512F = FS.rfind(Str: "+avx512"); // Any AVX512XXX will enable AVX512F.
405
406 if (posAVX512F != StringRef::npos &&
407 (posNoAVX512F == StringRef::npos || posNoAVX512F < posAVX512F))
408 if (posEVEX512 == StringRef::npos && posNoEVEX512 == StringRef::npos)
409 ArchFS += ",+evex512";
410
411 return createX86MCSubtargetInfoImpl(TT, CPU, /*TuneCPU*/ CPU, FS: ArchFS);
412}
413
414static MCInstrInfo *createX86MCInstrInfo() {
415 MCInstrInfo *X = new MCInstrInfo();
416 InitX86MCInstrInfo(II: X);
417 return X;
418}
419
420static MCRegisterInfo *createX86MCRegisterInfo(const Triple &TT) {
421 unsigned RA = (TT.getArch() == Triple::x86_64)
422 ? X86::RIP // Should have dwarf #16.
423 : X86::EIP; // Should have dwarf #8.
424
425 MCRegisterInfo *X = new MCRegisterInfo();
426 InitX86MCRegisterInfo(RI: X, RA, DwarfFlavour: X86_MC::getDwarfRegFlavour(TT, isEH: false),
427 EHFlavour: X86_MC::getDwarfRegFlavour(TT, isEH: true), PC: RA);
428 X86_MC::initLLVMToSEHAndCVRegMapping(MRI: X);
429 return X;
430}
431
432static MCAsmInfo *createX86MCAsmInfo(const MCRegisterInfo &MRI,
433 const Triple &TheTriple,
434 const MCTargetOptions &Options) {
435 bool is64Bit = TheTriple.getArch() == Triple::x86_64;
436
437 MCAsmInfo *MAI;
438 if (TheTriple.isOSBinFormatMachO()) {
439 if (is64Bit)
440 MAI = new X86_64MCAsmInfoDarwin(TheTriple);
441 else
442 MAI = new X86MCAsmInfoDarwin(TheTriple);
443 } else if (TheTriple.isOSBinFormatELF()) {
444 // Force the use of an ELF container.
445 MAI = new X86ELFMCAsmInfo(TheTriple);
446 } else if (TheTriple.isWindowsMSVCEnvironment() ||
447 TheTriple.isWindowsCoreCLREnvironment() || TheTriple.isUEFI()) {
448 if (Options.getAssemblyLanguage().equals_insensitive(RHS: "masm"))
449 MAI = new X86MCAsmInfoMicrosoftMASM(TheTriple);
450 else
451 MAI = new X86MCAsmInfoMicrosoft(TheTriple);
452 } else if (TheTriple.isOSCygMing() ||
453 TheTriple.isWindowsItaniumEnvironment()) {
454 MAI = new X86MCAsmInfoGNUCOFF(TheTriple);
455 } else {
456 // The default is ELF.
457 MAI = new X86ELFMCAsmInfo(TheTriple);
458 }
459
460 // Initialize initial frame state.
461 // Calculate amount of bytes used for return address storing
462 int stackGrowth = is64Bit ? -8 : -4;
463
464 // Initial state of the frame pointer is esp+stackGrowth.
465 unsigned StackPtr = is64Bit ? X86::RSP : X86::ESP;
466 MCCFIInstruction Inst = MCCFIInstruction::cfiDefCfa(
467 L: nullptr, Register: MRI.getDwarfRegNum(RegNum: StackPtr, isEH: true), Offset: -stackGrowth);
468 MAI->addInitialFrameState(Inst);
469
470 // Add return address to move list
471 unsigned InstPtr = is64Bit ? X86::RIP : X86::EIP;
472 MCCFIInstruction Inst2 = MCCFIInstruction::createOffset(
473 L: nullptr, Register: MRI.getDwarfRegNum(RegNum: InstPtr, isEH: true), Offset: stackGrowth);
474 MAI->addInitialFrameState(Inst: Inst2);
475
476 return MAI;
477}
478
479static MCInstPrinter *createX86MCInstPrinter(const Triple &T,
480 unsigned SyntaxVariant,
481 const MCAsmInfo &MAI,
482 const MCInstrInfo &MII,
483 const MCRegisterInfo &MRI) {
484 if (SyntaxVariant == 0)
485 return new X86ATTInstPrinter(MAI, MII, MRI);
486 if (SyntaxVariant == 1)
487 return new X86IntelInstPrinter(MAI, MII, MRI);
488 return nullptr;
489}
490
491static MCRelocationInfo *createX86MCRelocationInfo(const Triple &TheTriple,
492 MCContext &Ctx) {
493 // Default to the stock relocation info.
494 return llvm::createMCRelocationInfo(TT: TheTriple, Ctx);
495}
496
497namespace llvm {
498namespace X86_MC {
499
500class X86MCInstrAnalysis : public MCInstrAnalysis {
501 X86MCInstrAnalysis(const X86MCInstrAnalysis &) = delete;
502 X86MCInstrAnalysis &operator=(const X86MCInstrAnalysis &) = delete;
503 virtual ~X86MCInstrAnalysis() = default;
504
505public:
506 X86MCInstrAnalysis(const MCInstrInfo *MCII) : MCInstrAnalysis(MCII) {}
507
508#define GET_STIPREDICATE_DECLS_FOR_MC_ANALYSIS
509#include "X86GenSubtargetInfo.inc"
510
511 bool clearsSuperRegisters(const MCRegisterInfo &MRI, const MCInst &Inst,
512 APInt &Mask) const override;
513 std::vector<std::pair<uint64_t, uint64_t>>
514 findPltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents,
515 const MCSubtargetInfo &STI) const override;
516
517 bool evaluateBranch(const MCInst &Inst, uint64_t Addr, uint64_t Size,
518 uint64_t &Target) const override;
519 std::optional<uint64_t>
520 evaluateMemoryOperandAddress(const MCInst &Inst, const MCSubtargetInfo *STI,
521 uint64_t Addr, uint64_t Size) const override;
522 std::optional<uint64_t>
523 getMemoryOperandRelocationOffset(const MCInst &Inst,
524 uint64_t Size) const override;
525};
526
527#define GET_STIPREDICATE_DEFS_FOR_MC_ANALYSIS
528#include "X86GenSubtargetInfo.inc"
529
530bool X86MCInstrAnalysis::clearsSuperRegisters(const MCRegisterInfo &MRI,
531 const MCInst &Inst,
532 APInt &Mask) const {
533 const MCInstrDesc &Desc = Info->get(Opcode: Inst.getOpcode());
534 unsigned NumDefs = Desc.getNumDefs();
535 unsigned NumImplicitDefs = Desc.implicit_defs().size();
536 assert(Mask.getBitWidth() == NumDefs + NumImplicitDefs &&
537 "Unexpected number of bits in the mask!");
538
539 bool HasVEX = (Desc.TSFlags & X86II::EncodingMask) == X86II::VEX;
540 bool HasEVEX = (Desc.TSFlags & X86II::EncodingMask) == X86II::EVEX;
541 bool HasXOP = (Desc.TSFlags & X86II::EncodingMask) == X86II::XOP;
542
543 const MCRegisterClass &GR32RC = MRI.getRegClass(i: X86::GR32RegClassID);
544 const MCRegisterClass &VR128XRC = MRI.getRegClass(i: X86::VR128XRegClassID);
545 const MCRegisterClass &VR256XRC = MRI.getRegClass(i: X86::VR256XRegClassID);
546
547 auto ClearsSuperReg = [=](unsigned RegID) {
548 // On X86-64, a general purpose integer register is viewed as a 64-bit
549 // register internal to the processor.
550 // An update to the lower 32 bits of a 64 bit integer register is
551 // architecturally defined to zero extend the upper 32 bits.
552 if (GR32RC.contains(Reg: RegID))
553 return true;
554
555 // Early exit if this instruction has no vex/evex/xop prefix.
556 if (!HasEVEX && !HasVEX && !HasXOP)
557 return false;
558
559 // All VEX and EVEX encoded instructions are defined to zero the high bits
560 // of the destination register up to VLMAX (i.e. the maximum vector register
561 // width pertaining to the instruction).
562 // We assume the same behavior for XOP instructions too.
563 return VR128XRC.contains(Reg: RegID) || VR256XRC.contains(Reg: RegID);
564 };
565
566 Mask.clearAllBits();
567 for (unsigned I = 0, E = NumDefs; I < E; ++I) {
568 const MCOperand &Op = Inst.getOperand(i: I);
569 if (ClearsSuperReg(Op.getReg()))
570 Mask.setBit(I);
571 }
572
573 for (unsigned I = 0, E = NumImplicitDefs; I < E; ++I) {
574 const MCPhysReg Reg = Desc.implicit_defs()[I];
575 if (ClearsSuperReg(Reg))
576 Mask.setBit(NumDefs + I);
577 }
578
579 return Mask.getBoolValue();
580}
581
582static std::vector<std::pair<uint64_t, uint64_t>>
583findX86PltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents) {
584 // Do a lightweight parsing of PLT entries.
585 std::vector<std::pair<uint64_t, uint64_t>> Result;
586 for (uint64_t Byte = 0, End = PltContents.size(); Byte + 6 < End; ) {
587 // Recognize a jmp.
588 if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0xa3) {
589 // The jmp instruction at the beginning of each PLT entry jumps to the
590 // address of the base of the .got.plt section plus the immediate.
591 // Set the 1 << 32 bit to let ELFObjectFileBase::getPltEntries convert the
592 // offset to an address. Imm may be a negative int32_t if the GOT entry is
593 // in .got.
594 uint32_t Imm = support::endian::read32le(P: PltContents.data() + Byte + 2);
595 Result.emplace_back(args: PltSectionVA + Byte, args: Imm | (uint64_t(1) << 32));
596 Byte += 6;
597 } else if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0x25) {
598 // The jmp instruction at the beginning of each PLT entry jumps to the
599 // immediate.
600 uint32_t Imm = support::endian::read32le(P: PltContents.data() + Byte + 2);
601 Result.push_back(x: std::make_pair(x: PltSectionVA + Byte, y&: Imm));
602 Byte += 6;
603 } else
604 Byte++;
605 }
606 return Result;
607}
608
609static std::vector<std::pair<uint64_t, uint64_t>>
610findX86_64PltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents) {
611 // Do a lightweight parsing of PLT entries.
612 std::vector<std::pair<uint64_t, uint64_t>> Result;
613 for (uint64_t Byte = 0, End = PltContents.size(); Byte + 6 < End; ) {
614 // Recognize a jmp.
615 if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0x25) {
616 // The jmp instruction at the beginning of each PLT entry jumps to the
617 // address of the next instruction plus the immediate.
618 uint32_t Imm = support::endian::read32le(P: PltContents.data() + Byte + 2);
619 Result.push_back(
620 x: std::make_pair(x: PltSectionVA + Byte, y: PltSectionVA + Byte + 6 + Imm));
621 Byte += 6;
622 } else
623 Byte++;
624 }
625 return Result;
626}
627
628std::vector<std::pair<uint64_t, uint64_t>>
629X86MCInstrAnalysis::findPltEntries(uint64_t PltSectionVA,
630 ArrayRef<uint8_t> PltContents,
631 const MCSubtargetInfo &STI) const {
632 const Triple &TargetTriple = STI.getTargetTriple();
633 switch (TargetTriple.getArch()) {
634 case Triple::x86:
635 return findX86PltEntries(PltSectionVA, PltContents);
636 case Triple::x86_64:
637 return findX86_64PltEntries(PltSectionVA, PltContents);
638 default:
639 return {};
640 }
641}
642
643bool X86MCInstrAnalysis::evaluateBranch(const MCInst &Inst, uint64_t Addr,
644 uint64_t Size, uint64_t &Target) const {
645 if (Inst.getNumOperands() == 0 ||
646 Info->get(Opcode: Inst.getOpcode()).operands()[0].OperandType !=
647 MCOI::OPERAND_PCREL)
648 return false;
649 Target = Addr + Size + Inst.getOperand(i: 0).getImm();
650 return true;
651}
652
653std::optional<uint64_t> X86MCInstrAnalysis::evaluateMemoryOperandAddress(
654 const MCInst &Inst, const MCSubtargetInfo *STI, uint64_t Addr,
655 uint64_t Size) const {
656 const MCInstrDesc &MCID = Info->get(Opcode: Inst.getOpcode());
657 int MemOpStart = X86II::getMemoryOperandNo(TSFlags: MCID.TSFlags);
658 if (MemOpStart == -1)
659 return std::nullopt;
660 MemOpStart += X86II::getOperandBias(Desc: MCID);
661
662 const MCOperand &SegReg = Inst.getOperand(i: MemOpStart + X86::AddrSegmentReg);
663 const MCOperand &BaseReg = Inst.getOperand(i: MemOpStart + X86::AddrBaseReg);
664 const MCOperand &IndexReg = Inst.getOperand(i: MemOpStart + X86::AddrIndexReg);
665 const MCOperand &ScaleAmt = Inst.getOperand(i: MemOpStart + X86::AddrScaleAmt);
666 const MCOperand &Disp = Inst.getOperand(i: MemOpStart + X86::AddrDisp);
667 if (SegReg.getReg() || IndexReg.getReg() || ScaleAmt.getImm() != 1 ||
668 !Disp.isImm())
669 return std::nullopt;
670
671 // RIP-relative addressing.
672 if (BaseReg.getReg() == X86::RIP)
673 return Addr + Size + Disp.getImm();
674
675 return std::nullopt;
676}
677
678std::optional<uint64_t>
679X86MCInstrAnalysis::getMemoryOperandRelocationOffset(const MCInst &Inst,
680 uint64_t Size) const {
681 if (Inst.getOpcode() != X86::LEA64r)
682 return std::nullopt;
683 const MCInstrDesc &MCID = Info->get(Opcode: Inst.getOpcode());
684 int MemOpStart = X86II::getMemoryOperandNo(TSFlags: MCID.TSFlags);
685 if (MemOpStart == -1)
686 return std::nullopt;
687 MemOpStart += X86II::getOperandBias(Desc: MCID);
688 const MCOperand &SegReg = Inst.getOperand(i: MemOpStart + X86::AddrSegmentReg);
689 const MCOperand &BaseReg = Inst.getOperand(i: MemOpStart + X86::AddrBaseReg);
690 const MCOperand &IndexReg = Inst.getOperand(i: MemOpStart + X86::AddrIndexReg);
691 const MCOperand &ScaleAmt = Inst.getOperand(i: MemOpStart + X86::AddrScaleAmt);
692 const MCOperand &Disp = Inst.getOperand(i: MemOpStart + X86::AddrDisp);
693 // Must be a simple rip-relative address.
694 if (BaseReg.getReg() != X86::RIP || SegReg.getReg() || IndexReg.getReg() ||
695 ScaleAmt.getImm() != 1 || !Disp.isImm())
696 return std::nullopt;
697 // rip-relative ModR/M immediate is 32 bits.
698 assert(Size > 4 && "invalid instruction size for rip-relative lea");
699 return Size - 4;
700}
701
702} // end of namespace X86_MC
703
704} // end of namespace llvm
705
706static MCInstrAnalysis *createX86MCInstrAnalysis(const MCInstrInfo *Info) {
707 return new X86_MC::X86MCInstrAnalysis(Info);
708}
709
710// Force static initialization.
711extern "C" LLVM_C_ABI void LLVMInitializeX86TargetMC() {
712 for (Target *T : {&getTheX86_32Target(), &getTheX86_64Target()}) {
713 // Register the MC asm info.
714 RegisterMCAsmInfoFn X(*T, createX86MCAsmInfo);
715
716 // Register the MC instruction info.
717 TargetRegistry::RegisterMCInstrInfo(T&: *T, Fn: createX86MCInstrInfo);
718
719 // Register the MC register info.
720 TargetRegistry::RegisterMCRegInfo(T&: *T, Fn: createX86MCRegisterInfo);
721
722 // Register the MC subtarget info.
723 TargetRegistry::RegisterMCSubtargetInfo(T&: *T,
724 Fn: X86_MC::createX86MCSubtargetInfo);
725
726 // Register the MC instruction analyzer.
727 TargetRegistry::RegisterMCInstrAnalysis(T&: *T, Fn: createX86MCInstrAnalysis);
728
729 // Register the code emitter.
730 TargetRegistry::RegisterMCCodeEmitter(T&: *T, Fn: createX86MCCodeEmitter);
731
732 // Register the obj target streamer.
733 TargetRegistry::RegisterObjectTargetStreamer(T&: *T,
734 Fn: createX86ObjectTargetStreamer);
735
736 // Register the asm target streamer.
737 TargetRegistry::RegisterAsmTargetStreamer(T&: *T, Fn: createX86AsmTargetStreamer);
738
739 // Register the null streamer.
740 TargetRegistry::RegisterNullTargetStreamer(T&: *T, Fn: createX86NullTargetStreamer);
741
742 TargetRegistry::RegisterCOFFStreamer(T&: *T, Fn: createX86WinCOFFStreamer);
743 TargetRegistry::RegisterELFStreamer(T&: *T, Fn: createX86ELFStreamer);
744
745 // Register the MCInstPrinter.
746 TargetRegistry::RegisterMCInstPrinter(T&: *T, Fn: createX86MCInstPrinter);
747
748 // Register the MC relocation info.
749 TargetRegistry::RegisterMCRelocationInfo(T&: *T, Fn: createX86MCRelocationInfo);
750 }
751
752 // Register the asm backend.
753 TargetRegistry::RegisterMCAsmBackend(T&: getTheX86_32Target(),
754 Fn: createX86_32AsmBackend);
755 TargetRegistry::RegisterMCAsmBackend(T&: getTheX86_64Target(),
756 Fn: createX86_64AsmBackend);
757}
758
759MCRegister llvm::getX86SubSuperRegister(MCRegister Reg, unsigned Size,
760 bool High) {
761#define DEFAULT_NOREG \
762 default: \
763 return X86::NoRegister;
764#define SUB_SUPER(R1, R2, R3, R4, R) \
765 case X86::R1: \
766 case X86::R2: \
767 case X86::R3: \
768 case X86::R4: \
769 return X86::R;
770#define A_SUB_SUPER(R) \
771 case X86::AH: \
772 SUB_SUPER(AL, AX, EAX, RAX, R)
773#define D_SUB_SUPER(R) \
774 case X86::DH: \
775 SUB_SUPER(DL, DX, EDX, RDX, R)
776#define C_SUB_SUPER(R) \
777 case X86::CH: \
778 SUB_SUPER(CL, CX, ECX, RCX, R)
779#define B_SUB_SUPER(R) \
780 case X86::BH: \
781 SUB_SUPER(BL, BX, EBX, RBX, R)
782#define SI_SUB_SUPER(R) SUB_SUPER(SIL, SI, ESI, RSI, R)
783#define DI_SUB_SUPER(R) SUB_SUPER(DIL, DI, EDI, RDI, R)
784#define BP_SUB_SUPER(R) SUB_SUPER(BPL, BP, EBP, RBP, R)
785#define SP_SUB_SUPER(R) SUB_SUPER(SPL, SP, ESP, RSP, R)
786#define NO_SUB_SUPER(NO, REG) \
787 SUB_SUPER(R##NO##B, R##NO##W, R##NO##D, R##NO, REG)
788#define NO_SUB_SUPER_B(NO) NO_SUB_SUPER(NO, R##NO##B)
789#define NO_SUB_SUPER_W(NO) NO_SUB_SUPER(NO, R##NO##W)
790#define NO_SUB_SUPER_D(NO) NO_SUB_SUPER(NO, R##NO##D)
791#define NO_SUB_SUPER_Q(NO) NO_SUB_SUPER(NO, R##NO)
792 switch (Size) {
793 default:
794 llvm_unreachable("illegal register size");
795 case 8:
796 if (High) {
797 switch (Reg.id()) {
798 DEFAULT_NOREG
799 A_SUB_SUPER(AH)
800 D_SUB_SUPER(DH)
801 C_SUB_SUPER(CH)
802 B_SUB_SUPER(BH)
803 }
804 } else {
805 switch (Reg.id()) {
806 DEFAULT_NOREG
807 A_SUB_SUPER(AL)
808 D_SUB_SUPER(DL)
809 C_SUB_SUPER(CL)
810 B_SUB_SUPER(BL)
811 SI_SUB_SUPER(SIL)
812 DI_SUB_SUPER(DIL)
813 BP_SUB_SUPER(BPL)
814 SP_SUB_SUPER(SPL)
815 NO_SUB_SUPER_B(8)
816 NO_SUB_SUPER_B(9)
817 NO_SUB_SUPER_B(10)
818 NO_SUB_SUPER_B(11)
819 NO_SUB_SUPER_B(12)
820 NO_SUB_SUPER_B(13)
821 NO_SUB_SUPER_B(14)
822 NO_SUB_SUPER_B(15)
823 NO_SUB_SUPER_B(16)
824 NO_SUB_SUPER_B(17)
825 NO_SUB_SUPER_B(18)
826 NO_SUB_SUPER_B(19)
827 NO_SUB_SUPER_B(20)
828 NO_SUB_SUPER_B(21)
829 NO_SUB_SUPER_B(22)
830 NO_SUB_SUPER_B(23)
831 NO_SUB_SUPER_B(24)
832 NO_SUB_SUPER_B(25)
833 NO_SUB_SUPER_B(26)
834 NO_SUB_SUPER_B(27)
835 NO_SUB_SUPER_B(28)
836 NO_SUB_SUPER_B(29)
837 NO_SUB_SUPER_B(30)
838 NO_SUB_SUPER_B(31)
839 }
840 }
841 case 16:
842 switch (Reg.id()) {
843 DEFAULT_NOREG
844 A_SUB_SUPER(AX)
845 D_SUB_SUPER(DX)
846 C_SUB_SUPER(CX)
847 B_SUB_SUPER(BX)
848 SI_SUB_SUPER(SI)
849 DI_SUB_SUPER(DI)
850 BP_SUB_SUPER(BP)
851 SP_SUB_SUPER(SP)
852 NO_SUB_SUPER_W(8)
853 NO_SUB_SUPER_W(9)
854 NO_SUB_SUPER_W(10)
855 NO_SUB_SUPER_W(11)
856 NO_SUB_SUPER_W(12)
857 NO_SUB_SUPER_W(13)
858 NO_SUB_SUPER_W(14)
859 NO_SUB_SUPER_W(15)
860 NO_SUB_SUPER_W(16)
861 NO_SUB_SUPER_W(17)
862 NO_SUB_SUPER_W(18)
863 NO_SUB_SUPER_W(19)
864 NO_SUB_SUPER_W(20)
865 NO_SUB_SUPER_W(21)
866 NO_SUB_SUPER_W(22)
867 NO_SUB_SUPER_W(23)
868 NO_SUB_SUPER_W(24)
869 NO_SUB_SUPER_W(25)
870 NO_SUB_SUPER_W(26)
871 NO_SUB_SUPER_W(27)
872 NO_SUB_SUPER_W(28)
873 NO_SUB_SUPER_W(29)
874 NO_SUB_SUPER_W(30)
875 NO_SUB_SUPER_W(31)
876 }
877 case 32:
878 switch (Reg.id()) {
879 DEFAULT_NOREG
880 A_SUB_SUPER(EAX)
881 D_SUB_SUPER(EDX)
882 C_SUB_SUPER(ECX)
883 B_SUB_SUPER(EBX)
884 SI_SUB_SUPER(ESI)
885 DI_SUB_SUPER(EDI)
886 BP_SUB_SUPER(EBP)
887 SP_SUB_SUPER(ESP)
888 NO_SUB_SUPER_D(8)
889 NO_SUB_SUPER_D(9)
890 NO_SUB_SUPER_D(10)
891 NO_SUB_SUPER_D(11)
892 NO_SUB_SUPER_D(12)
893 NO_SUB_SUPER_D(13)
894 NO_SUB_SUPER_D(14)
895 NO_SUB_SUPER_D(15)
896 NO_SUB_SUPER_D(16)
897 NO_SUB_SUPER_D(17)
898 NO_SUB_SUPER_D(18)
899 NO_SUB_SUPER_D(19)
900 NO_SUB_SUPER_D(20)
901 NO_SUB_SUPER_D(21)
902 NO_SUB_SUPER_D(22)
903 NO_SUB_SUPER_D(23)
904 NO_SUB_SUPER_D(24)
905 NO_SUB_SUPER_D(25)
906 NO_SUB_SUPER_D(26)
907 NO_SUB_SUPER_D(27)
908 NO_SUB_SUPER_D(28)
909 NO_SUB_SUPER_D(29)
910 NO_SUB_SUPER_D(30)
911 NO_SUB_SUPER_D(31)
912 }
913 case 64:
914 switch (Reg.id()) {
915 DEFAULT_NOREG
916 A_SUB_SUPER(RAX)
917 D_SUB_SUPER(RDX)
918 C_SUB_SUPER(RCX)
919 B_SUB_SUPER(RBX)
920 SI_SUB_SUPER(RSI)
921 DI_SUB_SUPER(RDI)
922 BP_SUB_SUPER(RBP)
923 SP_SUB_SUPER(RSP)
924 NO_SUB_SUPER_Q(8)
925 NO_SUB_SUPER_Q(9)
926 NO_SUB_SUPER_Q(10)
927 NO_SUB_SUPER_Q(11)
928 NO_SUB_SUPER_Q(12)
929 NO_SUB_SUPER_Q(13)
930 NO_SUB_SUPER_Q(14)
931 NO_SUB_SUPER_Q(15)
932 NO_SUB_SUPER_Q(16)
933 NO_SUB_SUPER_Q(17)
934 NO_SUB_SUPER_Q(18)
935 NO_SUB_SUPER_Q(19)
936 NO_SUB_SUPER_Q(20)
937 NO_SUB_SUPER_Q(21)
938 NO_SUB_SUPER_Q(22)
939 NO_SUB_SUPER_Q(23)
940 NO_SUB_SUPER_Q(24)
941 NO_SUB_SUPER_Q(25)
942 NO_SUB_SUPER_Q(26)
943 NO_SUB_SUPER_Q(27)
944 NO_SUB_SUPER_Q(28)
945 NO_SUB_SUPER_Q(29)
946 NO_SUB_SUPER_Q(30)
947 NO_SUB_SUPER_Q(31)
948 }
949 }
950}
951