1 | //===-- X86MCTargetDesc.cpp - X86 Target Descriptions ---------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file provides X86 specific target descriptions. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "X86MCTargetDesc.h" |
14 | #include "TargetInfo/X86TargetInfo.h" |
15 | #include "X86ATTInstPrinter.h" |
16 | #include "X86BaseInfo.h" |
17 | #include "X86IntelInstPrinter.h" |
18 | #include "X86MCAsmInfo.h" |
19 | #include "X86TargetStreamer.h" |
20 | #include "llvm/ADT/APInt.h" |
21 | #include "llvm/DebugInfo/CodeView/CodeView.h" |
22 | #include "llvm/MC/MCDwarf.h" |
23 | #include "llvm/MC/MCInstrAnalysis.h" |
24 | #include "llvm/MC/MCInstrInfo.h" |
25 | #include "llvm/MC/MCRegisterInfo.h" |
26 | #include "llvm/MC/MCStreamer.h" |
27 | #include "llvm/MC/MCSubtargetInfo.h" |
28 | #include "llvm/MC/MachineLocation.h" |
29 | #include "llvm/MC/TargetRegistry.h" |
30 | #include "llvm/Support/ErrorHandling.h" |
31 | #include "llvm/TargetParser/Host.h" |
32 | #include "llvm/TargetParser/Triple.h" |
33 | |
34 | using namespace llvm; |
35 | |
36 | #define GET_REGINFO_MC_DESC |
37 | #include "X86GenRegisterInfo.inc" |
38 | |
39 | #define GET_INSTRINFO_MC_DESC |
40 | #define GET_INSTRINFO_MC_HELPERS |
41 | #define ENABLE_INSTR_PREDICATE_VERIFIER |
42 | #include "X86GenInstrInfo.inc" |
43 | |
44 | #define GET_SUBTARGETINFO_MC_DESC |
45 | #include "X86GenSubtargetInfo.inc" |
46 | |
47 | std::string X86_MC::ParseX86Triple(const Triple &TT) { |
48 | std::string FS; |
49 | // SSE2 should default to enabled in 64-bit mode, but can be turned off |
50 | // explicitly. |
51 | if (TT.isArch64Bit()) |
52 | FS = "+64bit-mode,-32bit-mode,-16bit-mode,+sse2" ; |
53 | else if (TT.getEnvironment() != Triple::CODE16) |
54 | FS = "-64bit-mode,+32bit-mode,-16bit-mode" ; |
55 | else |
56 | FS = "-64bit-mode,-32bit-mode,+16bit-mode" ; |
57 | |
58 | return FS; |
59 | } |
60 | |
61 | unsigned X86_MC::getDwarfRegFlavour(const Triple &TT, bool isEH) { |
62 | if (TT.getArch() == Triple::x86_64) |
63 | return DWARFFlavour::X86_64; |
64 | |
65 | if (TT.isOSDarwin()) |
66 | return isEH ? DWARFFlavour::X86_32_DarwinEH : DWARFFlavour::X86_32_Generic; |
67 | if (TT.isOSCygMing()) |
68 | // Unsupported by now, just quick fallback |
69 | return DWARFFlavour::X86_32_Generic; |
70 | return DWARFFlavour::X86_32_Generic; |
71 | } |
72 | |
73 | bool X86_MC::hasLockPrefix(const MCInst &MI) { |
74 | return MI.getFlags() & X86::IP_HAS_LOCK; |
75 | } |
76 | |
77 | static bool isMemOperand(const MCInst &MI, unsigned Op, unsigned RegClassID) { |
78 | const MCOperand &Base = MI.getOperand(i: Op + X86::AddrBaseReg); |
79 | const MCOperand &Index = MI.getOperand(i: Op + X86::AddrIndexReg); |
80 | const MCRegisterClass &RC = X86MCRegisterClasses[RegClassID]; |
81 | |
82 | return (Base.isReg() && Base.getReg() != 0 && RC.contains(Reg: Base.getReg())) || |
83 | (Index.isReg() && Index.getReg() != 0 && RC.contains(Reg: Index.getReg())); |
84 | } |
85 | |
86 | bool X86_MC::is16BitMemOperand(const MCInst &MI, unsigned Op, |
87 | const MCSubtargetInfo &STI) { |
88 | const MCOperand &Base = MI.getOperand(i: Op + X86::AddrBaseReg); |
89 | const MCOperand &Index = MI.getOperand(i: Op + X86::AddrIndexReg); |
90 | |
91 | if (STI.hasFeature(Feature: X86::Is16Bit) && Base.isReg() && Base.getReg() == 0 && |
92 | Index.isReg() && Index.getReg() == 0) |
93 | return true; |
94 | return isMemOperand(MI, Op, RegClassID: X86::GR16RegClassID); |
95 | } |
96 | |
97 | bool X86_MC::is32BitMemOperand(const MCInst &MI, unsigned Op) { |
98 | const MCOperand &Base = MI.getOperand(i: Op + X86::AddrBaseReg); |
99 | const MCOperand &Index = MI.getOperand(i: Op + X86::AddrIndexReg); |
100 | if (Base.isReg() && Base.getReg() == X86::EIP) { |
101 | assert(Index.isReg() && Index.getReg() == 0 && "Invalid eip-based address" ); |
102 | return true; |
103 | } |
104 | if (Index.isReg() && Index.getReg() == X86::EIZ) |
105 | return true; |
106 | return isMemOperand(MI, Op, RegClassID: X86::GR32RegClassID); |
107 | } |
108 | |
109 | #ifndef NDEBUG |
110 | bool X86_MC::is64BitMemOperand(const MCInst &MI, unsigned Op) { |
111 | return isMemOperand(MI, Op, X86::GR64RegClassID); |
112 | } |
113 | #endif |
114 | |
115 | bool X86_MC::needsAddressSizeOverride(const MCInst &MI, |
116 | const MCSubtargetInfo &STI, |
117 | int MemoryOperand, uint64_t TSFlags) { |
118 | uint64_t AdSize = TSFlags & X86II::AdSizeMask; |
119 | bool Is16BitMode = STI.hasFeature(Feature: X86::Is16Bit); |
120 | bool Is32BitMode = STI.hasFeature(Feature: X86::Is32Bit); |
121 | bool Is64BitMode = STI.hasFeature(Feature: X86::Is64Bit); |
122 | if ((Is16BitMode && AdSize == X86II::AdSize32) || |
123 | (Is32BitMode && AdSize == X86II::AdSize16) || |
124 | (Is64BitMode && AdSize == X86II::AdSize32)) |
125 | return true; |
126 | uint64_t Form = TSFlags & X86II::FormMask; |
127 | switch (Form) { |
128 | default: |
129 | break; |
130 | case X86II::RawFrmDstSrc: { |
131 | unsigned siReg = MI.getOperand(i: 1).getReg(); |
132 | assert(((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) || |
133 | (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) || |
134 | (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) && |
135 | "SI and DI register sizes do not match" ); |
136 | return (!Is32BitMode && siReg == X86::ESI) || |
137 | (Is32BitMode && siReg == X86::SI); |
138 | } |
139 | case X86II::RawFrmSrc: { |
140 | unsigned siReg = MI.getOperand(i: 0).getReg(); |
141 | return (!Is32BitMode && siReg == X86::ESI) || |
142 | (Is32BitMode && siReg == X86::SI); |
143 | } |
144 | case X86II::RawFrmDst: { |
145 | unsigned siReg = MI.getOperand(i: 0).getReg(); |
146 | return (!Is32BitMode && siReg == X86::EDI) || |
147 | (Is32BitMode && siReg == X86::DI); |
148 | } |
149 | } |
150 | |
151 | // Determine where the memory operand starts, if present. |
152 | if (MemoryOperand < 0) |
153 | return false; |
154 | |
155 | if (STI.hasFeature(Feature: X86::Is64Bit)) { |
156 | assert(!is16BitMemOperand(MI, MemoryOperand, STI)); |
157 | return is32BitMemOperand(MI, Op: MemoryOperand); |
158 | } |
159 | if (STI.hasFeature(Feature: X86::Is32Bit)) { |
160 | assert(!is64BitMemOperand(MI, MemoryOperand)); |
161 | return is16BitMemOperand(MI, Op: MemoryOperand, STI); |
162 | } |
163 | assert(STI.hasFeature(X86::Is16Bit)); |
164 | assert(!is64BitMemOperand(MI, MemoryOperand)); |
165 | return !is16BitMemOperand(MI, Op: MemoryOperand, STI); |
166 | } |
167 | |
168 | void X86_MC::initLLVMToSEHAndCVRegMapping(MCRegisterInfo *MRI) { |
169 | // FIXME: TableGen these. |
170 | for (unsigned Reg = X86::NoRegister + 1; Reg < X86::NUM_TARGET_REGS; ++Reg) { |
171 | unsigned SEH = MRI->getEncodingValue(RegNo: Reg); |
172 | MRI->mapLLVMRegToSEHReg(LLVMReg: Reg, SEHReg: SEH); |
173 | } |
174 | |
175 | // Mapping from CodeView to MC register id. |
176 | static const struct { |
177 | codeview::RegisterId CVReg; |
178 | MCPhysReg Reg; |
179 | } RegMap[] = { |
180 | {.CVReg: codeview::RegisterId::AL, .Reg: X86::AL}, |
181 | {.CVReg: codeview::RegisterId::CL, .Reg: X86::CL}, |
182 | {.CVReg: codeview::RegisterId::DL, .Reg: X86::DL}, |
183 | {.CVReg: codeview::RegisterId::BL, .Reg: X86::BL}, |
184 | {.CVReg: codeview::RegisterId::AH, .Reg: X86::AH}, |
185 | {.CVReg: codeview::RegisterId::CH, .Reg: X86::CH}, |
186 | {.CVReg: codeview::RegisterId::DH, .Reg: X86::DH}, |
187 | {.CVReg: codeview::RegisterId::BH, .Reg: X86::BH}, |
188 | {.CVReg: codeview::RegisterId::AX, .Reg: X86::AX}, |
189 | {.CVReg: codeview::RegisterId::CX, .Reg: X86::CX}, |
190 | {.CVReg: codeview::RegisterId::DX, .Reg: X86::DX}, |
191 | {.CVReg: codeview::RegisterId::BX, .Reg: X86::BX}, |
192 | {.CVReg: codeview::RegisterId::SP, .Reg: X86::SP}, |
193 | {.CVReg: codeview::RegisterId::BP, .Reg: X86::BP}, |
194 | {.CVReg: codeview::RegisterId::SI, .Reg: X86::SI}, |
195 | {.CVReg: codeview::RegisterId::DI, .Reg: X86::DI}, |
196 | {.CVReg: codeview::RegisterId::EAX, .Reg: X86::EAX}, |
197 | {.CVReg: codeview::RegisterId::ECX, .Reg: X86::ECX}, |
198 | {.CVReg: codeview::RegisterId::EDX, .Reg: X86::EDX}, |
199 | {.CVReg: codeview::RegisterId::EBX, .Reg: X86::EBX}, |
200 | {.CVReg: codeview::RegisterId::ESP, .Reg: X86::ESP}, |
201 | {.CVReg: codeview::RegisterId::EBP, .Reg: X86::EBP}, |
202 | {.CVReg: codeview::RegisterId::ESI, .Reg: X86::ESI}, |
203 | {.CVReg: codeview::RegisterId::EDI, .Reg: X86::EDI}, |
204 | |
205 | {.CVReg: codeview::RegisterId::EFLAGS, .Reg: X86::EFLAGS}, |
206 | |
207 | {.CVReg: codeview::RegisterId::ST0, .Reg: X86::ST0}, |
208 | {.CVReg: codeview::RegisterId::ST1, .Reg: X86::ST1}, |
209 | {.CVReg: codeview::RegisterId::ST2, .Reg: X86::ST2}, |
210 | {.CVReg: codeview::RegisterId::ST3, .Reg: X86::ST3}, |
211 | {.CVReg: codeview::RegisterId::ST4, .Reg: X86::ST4}, |
212 | {.CVReg: codeview::RegisterId::ST5, .Reg: X86::ST5}, |
213 | {.CVReg: codeview::RegisterId::ST6, .Reg: X86::ST6}, |
214 | {.CVReg: codeview::RegisterId::ST7, .Reg: X86::ST7}, |
215 | |
216 | {.CVReg: codeview::RegisterId::ST0, .Reg: X86::FP0}, |
217 | {.CVReg: codeview::RegisterId::ST1, .Reg: X86::FP1}, |
218 | {.CVReg: codeview::RegisterId::ST2, .Reg: X86::FP2}, |
219 | {.CVReg: codeview::RegisterId::ST3, .Reg: X86::FP3}, |
220 | {.CVReg: codeview::RegisterId::ST4, .Reg: X86::FP4}, |
221 | {.CVReg: codeview::RegisterId::ST5, .Reg: X86::FP5}, |
222 | {.CVReg: codeview::RegisterId::ST6, .Reg: X86::FP6}, |
223 | {.CVReg: codeview::RegisterId::ST7, .Reg: X86::FP7}, |
224 | |
225 | {.CVReg: codeview::RegisterId::MM0, .Reg: X86::MM0}, |
226 | {.CVReg: codeview::RegisterId::MM1, .Reg: X86::MM1}, |
227 | {.CVReg: codeview::RegisterId::MM2, .Reg: X86::MM2}, |
228 | {.CVReg: codeview::RegisterId::MM3, .Reg: X86::MM3}, |
229 | {.CVReg: codeview::RegisterId::MM4, .Reg: X86::MM4}, |
230 | {.CVReg: codeview::RegisterId::MM5, .Reg: X86::MM5}, |
231 | {.CVReg: codeview::RegisterId::MM6, .Reg: X86::MM6}, |
232 | {.CVReg: codeview::RegisterId::MM7, .Reg: X86::MM7}, |
233 | |
234 | {.CVReg: codeview::RegisterId::XMM0, .Reg: X86::XMM0}, |
235 | {.CVReg: codeview::RegisterId::XMM1, .Reg: X86::XMM1}, |
236 | {.CVReg: codeview::RegisterId::XMM2, .Reg: X86::XMM2}, |
237 | {.CVReg: codeview::RegisterId::XMM3, .Reg: X86::XMM3}, |
238 | {.CVReg: codeview::RegisterId::XMM4, .Reg: X86::XMM4}, |
239 | {.CVReg: codeview::RegisterId::XMM5, .Reg: X86::XMM5}, |
240 | {.CVReg: codeview::RegisterId::XMM6, .Reg: X86::XMM6}, |
241 | {.CVReg: codeview::RegisterId::XMM7, .Reg: X86::XMM7}, |
242 | |
243 | {.CVReg: codeview::RegisterId::XMM8, .Reg: X86::XMM8}, |
244 | {.CVReg: codeview::RegisterId::XMM9, .Reg: X86::XMM9}, |
245 | {.CVReg: codeview::RegisterId::XMM10, .Reg: X86::XMM10}, |
246 | {.CVReg: codeview::RegisterId::XMM11, .Reg: X86::XMM11}, |
247 | {.CVReg: codeview::RegisterId::XMM12, .Reg: X86::XMM12}, |
248 | {.CVReg: codeview::RegisterId::XMM13, .Reg: X86::XMM13}, |
249 | {.CVReg: codeview::RegisterId::XMM14, .Reg: X86::XMM14}, |
250 | {.CVReg: codeview::RegisterId::XMM15, .Reg: X86::XMM15}, |
251 | |
252 | {.CVReg: codeview::RegisterId::SIL, .Reg: X86::SIL}, |
253 | {.CVReg: codeview::RegisterId::DIL, .Reg: X86::DIL}, |
254 | {.CVReg: codeview::RegisterId::BPL, .Reg: X86::BPL}, |
255 | {.CVReg: codeview::RegisterId::SPL, .Reg: X86::SPL}, |
256 | {.CVReg: codeview::RegisterId::RAX, .Reg: X86::RAX}, |
257 | {.CVReg: codeview::RegisterId::RBX, .Reg: X86::RBX}, |
258 | {.CVReg: codeview::RegisterId::RCX, .Reg: X86::RCX}, |
259 | {.CVReg: codeview::RegisterId::RDX, .Reg: X86::RDX}, |
260 | {.CVReg: codeview::RegisterId::RSI, .Reg: X86::RSI}, |
261 | {.CVReg: codeview::RegisterId::RDI, .Reg: X86::RDI}, |
262 | {.CVReg: codeview::RegisterId::RBP, .Reg: X86::RBP}, |
263 | {.CVReg: codeview::RegisterId::RSP, .Reg: X86::RSP}, |
264 | {.CVReg: codeview::RegisterId::R8, .Reg: X86::R8}, |
265 | {.CVReg: codeview::RegisterId::R9, .Reg: X86::R9}, |
266 | {.CVReg: codeview::RegisterId::R10, .Reg: X86::R10}, |
267 | {.CVReg: codeview::RegisterId::R11, .Reg: X86::R11}, |
268 | {.CVReg: codeview::RegisterId::R12, .Reg: X86::R12}, |
269 | {.CVReg: codeview::RegisterId::R13, .Reg: X86::R13}, |
270 | {.CVReg: codeview::RegisterId::R14, .Reg: X86::R14}, |
271 | {.CVReg: codeview::RegisterId::R15, .Reg: X86::R15}, |
272 | {.CVReg: codeview::RegisterId::R8B, .Reg: X86::R8B}, |
273 | {.CVReg: codeview::RegisterId::R9B, .Reg: X86::R9B}, |
274 | {.CVReg: codeview::RegisterId::R10B, .Reg: X86::R10B}, |
275 | {.CVReg: codeview::RegisterId::R11B, .Reg: X86::R11B}, |
276 | {.CVReg: codeview::RegisterId::R12B, .Reg: X86::R12B}, |
277 | {.CVReg: codeview::RegisterId::R13B, .Reg: X86::R13B}, |
278 | {.CVReg: codeview::RegisterId::R14B, .Reg: X86::R14B}, |
279 | {.CVReg: codeview::RegisterId::R15B, .Reg: X86::R15B}, |
280 | {.CVReg: codeview::RegisterId::R8W, .Reg: X86::R8W}, |
281 | {.CVReg: codeview::RegisterId::R9W, .Reg: X86::R9W}, |
282 | {.CVReg: codeview::RegisterId::R10W, .Reg: X86::R10W}, |
283 | {.CVReg: codeview::RegisterId::R11W, .Reg: X86::R11W}, |
284 | {.CVReg: codeview::RegisterId::R12W, .Reg: X86::R12W}, |
285 | {.CVReg: codeview::RegisterId::R13W, .Reg: X86::R13W}, |
286 | {.CVReg: codeview::RegisterId::R14W, .Reg: X86::R14W}, |
287 | {.CVReg: codeview::RegisterId::R15W, .Reg: X86::R15W}, |
288 | {.CVReg: codeview::RegisterId::R8D, .Reg: X86::R8D}, |
289 | {.CVReg: codeview::RegisterId::R9D, .Reg: X86::R9D}, |
290 | {.CVReg: codeview::RegisterId::R10D, .Reg: X86::R10D}, |
291 | {.CVReg: codeview::RegisterId::R11D, .Reg: X86::R11D}, |
292 | {.CVReg: codeview::RegisterId::R12D, .Reg: X86::R12D}, |
293 | {.CVReg: codeview::RegisterId::R13D, .Reg: X86::R13D}, |
294 | {.CVReg: codeview::RegisterId::R14D, .Reg: X86::R14D}, |
295 | {.CVReg: codeview::RegisterId::R15D, .Reg: X86::R15D}, |
296 | {.CVReg: codeview::RegisterId::AMD64_YMM0, .Reg: X86::YMM0}, |
297 | {.CVReg: codeview::RegisterId::AMD64_YMM1, .Reg: X86::YMM1}, |
298 | {.CVReg: codeview::RegisterId::AMD64_YMM2, .Reg: X86::YMM2}, |
299 | {.CVReg: codeview::RegisterId::AMD64_YMM3, .Reg: X86::YMM3}, |
300 | {.CVReg: codeview::RegisterId::AMD64_YMM4, .Reg: X86::YMM4}, |
301 | {.CVReg: codeview::RegisterId::AMD64_YMM5, .Reg: X86::YMM5}, |
302 | {.CVReg: codeview::RegisterId::AMD64_YMM6, .Reg: X86::YMM6}, |
303 | {.CVReg: codeview::RegisterId::AMD64_YMM7, .Reg: X86::YMM7}, |
304 | {.CVReg: codeview::RegisterId::AMD64_YMM8, .Reg: X86::YMM8}, |
305 | {.CVReg: codeview::RegisterId::AMD64_YMM9, .Reg: X86::YMM9}, |
306 | {.CVReg: codeview::RegisterId::AMD64_YMM10, .Reg: X86::YMM10}, |
307 | {.CVReg: codeview::RegisterId::AMD64_YMM11, .Reg: X86::YMM11}, |
308 | {.CVReg: codeview::RegisterId::AMD64_YMM12, .Reg: X86::YMM12}, |
309 | {.CVReg: codeview::RegisterId::AMD64_YMM13, .Reg: X86::YMM13}, |
310 | {.CVReg: codeview::RegisterId::AMD64_YMM14, .Reg: X86::YMM14}, |
311 | {.CVReg: codeview::RegisterId::AMD64_YMM15, .Reg: X86::YMM15}, |
312 | {.CVReg: codeview::RegisterId::AMD64_YMM16, .Reg: X86::YMM16}, |
313 | {.CVReg: codeview::RegisterId::AMD64_YMM17, .Reg: X86::YMM17}, |
314 | {.CVReg: codeview::RegisterId::AMD64_YMM18, .Reg: X86::YMM18}, |
315 | {.CVReg: codeview::RegisterId::AMD64_YMM19, .Reg: X86::YMM19}, |
316 | {.CVReg: codeview::RegisterId::AMD64_YMM20, .Reg: X86::YMM20}, |
317 | {.CVReg: codeview::RegisterId::AMD64_YMM21, .Reg: X86::YMM21}, |
318 | {.CVReg: codeview::RegisterId::AMD64_YMM22, .Reg: X86::YMM22}, |
319 | {.CVReg: codeview::RegisterId::AMD64_YMM23, .Reg: X86::YMM23}, |
320 | {.CVReg: codeview::RegisterId::AMD64_YMM24, .Reg: X86::YMM24}, |
321 | {.CVReg: codeview::RegisterId::AMD64_YMM25, .Reg: X86::YMM25}, |
322 | {.CVReg: codeview::RegisterId::AMD64_YMM26, .Reg: X86::YMM26}, |
323 | {.CVReg: codeview::RegisterId::AMD64_YMM27, .Reg: X86::YMM27}, |
324 | {.CVReg: codeview::RegisterId::AMD64_YMM28, .Reg: X86::YMM28}, |
325 | {.CVReg: codeview::RegisterId::AMD64_YMM29, .Reg: X86::YMM29}, |
326 | {.CVReg: codeview::RegisterId::AMD64_YMM30, .Reg: X86::YMM30}, |
327 | {.CVReg: codeview::RegisterId::AMD64_YMM31, .Reg: X86::YMM31}, |
328 | {.CVReg: codeview::RegisterId::AMD64_ZMM0, .Reg: X86::ZMM0}, |
329 | {.CVReg: codeview::RegisterId::AMD64_ZMM1, .Reg: X86::ZMM1}, |
330 | {.CVReg: codeview::RegisterId::AMD64_ZMM2, .Reg: X86::ZMM2}, |
331 | {.CVReg: codeview::RegisterId::AMD64_ZMM3, .Reg: X86::ZMM3}, |
332 | {.CVReg: codeview::RegisterId::AMD64_ZMM4, .Reg: X86::ZMM4}, |
333 | {.CVReg: codeview::RegisterId::AMD64_ZMM5, .Reg: X86::ZMM5}, |
334 | {.CVReg: codeview::RegisterId::AMD64_ZMM6, .Reg: X86::ZMM6}, |
335 | {.CVReg: codeview::RegisterId::AMD64_ZMM7, .Reg: X86::ZMM7}, |
336 | {.CVReg: codeview::RegisterId::AMD64_ZMM8, .Reg: X86::ZMM8}, |
337 | {.CVReg: codeview::RegisterId::AMD64_ZMM9, .Reg: X86::ZMM9}, |
338 | {.CVReg: codeview::RegisterId::AMD64_ZMM10, .Reg: X86::ZMM10}, |
339 | {.CVReg: codeview::RegisterId::AMD64_ZMM11, .Reg: X86::ZMM11}, |
340 | {.CVReg: codeview::RegisterId::AMD64_ZMM12, .Reg: X86::ZMM12}, |
341 | {.CVReg: codeview::RegisterId::AMD64_ZMM13, .Reg: X86::ZMM13}, |
342 | {.CVReg: codeview::RegisterId::AMD64_ZMM14, .Reg: X86::ZMM14}, |
343 | {.CVReg: codeview::RegisterId::AMD64_ZMM15, .Reg: X86::ZMM15}, |
344 | {.CVReg: codeview::RegisterId::AMD64_ZMM16, .Reg: X86::ZMM16}, |
345 | {.CVReg: codeview::RegisterId::AMD64_ZMM17, .Reg: X86::ZMM17}, |
346 | {.CVReg: codeview::RegisterId::AMD64_ZMM18, .Reg: X86::ZMM18}, |
347 | {.CVReg: codeview::RegisterId::AMD64_ZMM19, .Reg: X86::ZMM19}, |
348 | {.CVReg: codeview::RegisterId::AMD64_ZMM20, .Reg: X86::ZMM20}, |
349 | {.CVReg: codeview::RegisterId::AMD64_ZMM21, .Reg: X86::ZMM21}, |
350 | {.CVReg: codeview::RegisterId::AMD64_ZMM22, .Reg: X86::ZMM22}, |
351 | {.CVReg: codeview::RegisterId::AMD64_ZMM23, .Reg: X86::ZMM23}, |
352 | {.CVReg: codeview::RegisterId::AMD64_ZMM24, .Reg: X86::ZMM24}, |
353 | {.CVReg: codeview::RegisterId::AMD64_ZMM25, .Reg: X86::ZMM25}, |
354 | {.CVReg: codeview::RegisterId::AMD64_ZMM26, .Reg: X86::ZMM26}, |
355 | {.CVReg: codeview::RegisterId::AMD64_ZMM27, .Reg: X86::ZMM27}, |
356 | {.CVReg: codeview::RegisterId::AMD64_ZMM28, .Reg: X86::ZMM28}, |
357 | {.CVReg: codeview::RegisterId::AMD64_ZMM29, .Reg: X86::ZMM29}, |
358 | {.CVReg: codeview::RegisterId::AMD64_ZMM30, .Reg: X86::ZMM30}, |
359 | {.CVReg: codeview::RegisterId::AMD64_ZMM31, .Reg: X86::ZMM31}, |
360 | {.CVReg: codeview::RegisterId::AMD64_K0, .Reg: X86::K0}, |
361 | {.CVReg: codeview::RegisterId::AMD64_K1, .Reg: X86::K1}, |
362 | {.CVReg: codeview::RegisterId::AMD64_K2, .Reg: X86::K2}, |
363 | {.CVReg: codeview::RegisterId::AMD64_K3, .Reg: X86::K3}, |
364 | {.CVReg: codeview::RegisterId::AMD64_K4, .Reg: X86::K4}, |
365 | {.CVReg: codeview::RegisterId::AMD64_K5, .Reg: X86::K5}, |
366 | {.CVReg: codeview::RegisterId::AMD64_K6, .Reg: X86::K6}, |
367 | {.CVReg: codeview::RegisterId::AMD64_K7, .Reg: X86::K7}, |
368 | {.CVReg: codeview::RegisterId::AMD64_XMM16, .Reg: X86::XMM16}, |
369 | {.CVReg: codeview::RegisterId::AMD64_XMM17, .Reg: X86::XMM17}, |
370 | {.CVReg: codeview::RegisterId::AMD64_XMM18, .Reg: X86::XMM18}, |
371 | {.CVReg: codeview::RegisterId::AMD64_XMM19, .Reg: X86::XMM19}, |
372 | {.CVReg: codeview::RegisterId::AMD64_XMM20, .Reg: X86::XMM20}, |
373 | {.CVReg: codeview::RegisterId::AMD64_XMM21, .Reg: X86::XMM21}, |
374 | {.CVReg: codeview::RegisterId::AMD64_XMM22, .Reg: X86::XMM22}, |
375 | {.CVReg: codeview::RegisterId::AMD64_XMM23, .Reg: X86::XMM23}, |
376 | {.CVReg: codeview::RegisterId::AMD64_XMM24, .Reg: X86::XMM24}, |
377 | {.CVReg: codeview::RegisterId::AMD64_XMM25, .Reg: X86::XMM25}, |
378 | {.CVReg: codeview::RegisterId::AMD64_XMM26, .Reg: X86::XMM26}, |
379 | {.CVReg: codeview::RegisterId::AMD64_XMM27, .Reg: X86::XMM27}, |
380 | {.CVReg: codeview::RegisterId::AMD64_XMM28, .Reg: X86::XMM28}, |
381 | {.CVReg: codeview::RegisterId::AMD64_XMM29, .Reg: X86::XMM29}, |
382 | {.CVReg: codeview::RegisterId::AMD64_XMM30, .Reg: X86::XMM30}, |
383 | {.CVReg: codeview::RegisterId::AMD64_XMM31, .Reg: X86::XMM31}, |
384 | |
385 | }; |
386 | for (const auto &I : RegMap) |
387 | MRI->mapLLVMRegToCVReg(LLVMReg: I.Reg, CVReg: static_cast<int>(I.CVReg)); |
388 | } |
389 | |
390 | MCSubtargetInfo *X86_MC::createX86MCSubtargetInfo(const Triple &TT, |
391 | StringRef CPU, StringRef FS) { |
392 | std::string ArchFS = X86_MC::ParseX86Triple(TT); |
393 | assert(!ArchFS.empty() && "Failed to parse X86 triple" ); |
394 | if (!FS.empty()) |
395 | ArchFS = (Twine(ArchFS) + "," + FS).str(); |
396 | |
397 | if (CPU.empty()) |
398 | CPU = "generic" ; |
399 | |
400 | size_t posNoEVEX512 = FS.rfind(Str: "-evex512" ); |
401 | // Make sure we won't be cheated by "-avx512fp16". |
402 | size_t posNoAVX512F = |
403 | FS.ends_with(Suffix: "-avx512f" ) ? FS.size() - 8 : FS.rfind(Str: "-avx512f," ); |
404 | size_t posEVEX512 = FS.rfind(Str: "+evex512" ); |
405 | size_t posAVX512F = FS.rfind(Str: "+avx512" ); // Any AVX512XXX will enable AVX512F. |
406 | |
407 | if (posAVX512F != StringRef::npos && |
408 | (posNoAVX512F == StringRef::npos || posNoAVX512F < posAVX512F)) |
409 | if (posEVEX512 == StringRef::npos && posNoEVEX512 == StringRef::npos) |
410 | ArchFS += ",+evex512" ; |
411 | |
412 | return createX86MCSubtargetInfoImpl(TT, CPU, /*TuneCPU*/ CPU, FS: ArchFS); |
413 | } |
414 | |
415 | static MCInstrInfo *createX86MCInstrInfo() { |
416 | MCInstrInfo *X = new MCInstrInfo(); |
417 | InitX86MCInstrInfo(II: X); |
418 | return X; |
419 | } |
420 | |
421 | static MCRegisterInfo *createX86MCRegisterInfo(const Triple &TT) { |
422 | unsigned RA = (TT.getArch() == Triple::x86_64) |
423 | ? X86::RIP // Should have dwarf #16. |
424 | : X86::EIP; // Should have dwarf #8. |
425 | |
426 | MCRegisterInfo *X = new MCRegisterInfo(); |
427 | InitX86MCRegisterInfo(RI: X, RA, DwarfFlavour: X86_MC::getDwarfRegFlavour(TT, isEH: false), |
428 | EHFlavour: X86_MC::getDwarfRegFlavour(TT, isEH: true), PC: RA); |
429 | X86_MC::initLLVMToSEHAndCVRegMapping(MRI: X); |
430 | return X; |
431 | } |
432 | |
433 | static MCAsmInfo *createX86MCAsmInfo(const MCRegisterInfo &MRI, |
434 | const Triple &TheTriple, |
435 | const MCTargetOptions &Options) { |
436 | bool is64Bit = TheTriple.getArch() == Triple::x86_64; |
437 | |
438 | MCAsmInfo *MAI; |
439 | if (TheTriple.isOSBinFormatMachO()) { |
440 | if (is64Bit) |
441 | MAI = new X86_64MCAsmInfoDarwin(TheTriple); |
442 | else |
443 | MAI = new X86MCAsmInfoDarwin(TheTriple); |
444 | } else if (TheTriple.isOSBinFormatELF()) { |
445 | // Force the use of an ELF container. |
446 | MAI = new X86ELFMCAsmInfo(TheTriple); |
447 | } else if (TheTriple.isWindowsMSVCEnvironment() || |
448 | TheTriple.isWindowsCoreCLREnvironment()) { |
449 | if (Options.getAssemblyLanguage().equals_insensitive(RHS: "masm" )) |
450 | MAI = new X86MCAsmInfoMicrosoftMASM(TheTriple); |
451 | else |
452 | MAI = new X86MCAsmInfoMicrosoft(TheTriple); |
453 | } else if (TheTriple.isOSCygMing() || |
454 | TheTriple.isWindowsItaniumEnvironment()) { |
455 | MAI = new X86MCAsmInfoGNUCOFF(TheTriple); |
456 | } else if (TheTriple.isUEFI()) { |
457 | MAI = new X86MCAsmInfoGNUCOFF(TheTriple); |
458 | } else { |
459 | // The default is ELF. |
460 | MAI = new X86ELFMCAsmInfo(TheTriple); |
461 | } |
462 | |
463 | // Initialize initial frame state. |
464 | // Calculate amount of bytes used for return address storing |
465 | int stackGrowth = is64Bit ? -8 : -4; |
466 | |
467 | // Initial state of the frame pointer is esp+stackGrowth. |
468 | unsigned StackPtr = is64Bit ? X86::RSP : X86::ESP; |
469 | MCCFIInstruction Inst = MCCFIInstruction::cfiDefCfa( |
470 | L: nullptr, Register: MRI.getDwarfRegNum(RegNum: StackPtr, isEH: true), Offset: -stackGrowth); |
471 | MAI->addInitialFrameState(Inst); |
472 | |
473 | // Add return address to move list |
474 | unsigned InstPtr = is64Bit ? X86::RIP : X86::EIP; |
475 | MCCFIInstruction Inst2 = MCCFIInstruction::createOffset( |
476 | L: nullptr, Register: MRI.getDwarfRegNum(RegNum: InstPtr, isEH: true), Offset: stackGrowth); |
477 | MAI->addInitialFrameState(Inst: Inst2); |
478 | |
479 | return MAI; |
480 | } |
481 | |
482 | static MCInstPrinter *createX86MCInstPrinter(const Triple &T, |
483 | unsigned SyntaxVariant, |
484 | const MCAsmInfo &MAI, |
485 | const MCInstrInfo &MII, |
486 | const MCRegisterInfo &MRI) { |
487 | if (SyntaxVariant == 0) |
488 | return new X86ATTInstPrinter(MAI, MII, MRI); |
489 | if (SyntaxVariant == 1) |
490 | return new X86IntelInstPrinter(MAI, MII, MRI); |
491 | return nullptr; |
492 | } |
493 | |
494 | static MCRelocationInfo *createX86MCRelocationInfo(const Triple &TheTriple, |
495 | MCContext &Ctx) { |
496 | // Default to the stock relocation info. |
497 | return llvm::createMCRelocationInfo(TT: TheTriple, Ctx); |
498 | } |
499 | |
500 | namespace llvm { |
501 | namespace X86_MC { |
502 | |
503 | class X86MCInstrAnalysis : public MCInstrAnalysis { |
504 | X86MCInstrAnalysis(const X86MCInstrAnalysis &) = delete; |
505 | X86MCInstrAnalysis &operator=(const X86MCInstrAnalysis &) = delete; |
506 | virtual ~X86MCInstrAnalysis() = default; |
507 | |
508 | public: |
509 | X86MCInstrAnalysis(const MCInstrInfo *MCII) : MCInstrAnalysis(MCII) {} |
510 | |
511 | #define GET_STIPREDICATE_DECLS_FOR_MC_ANALYSIS |
512 | #include "X86GenSubtargetInfo.inc" |
513 | |
514 | bool clearsSuperRegisters(const MCRegisterInfo &MRI, const MCInst &Inst, |
515 | APInt &Mask) const override; |
516 | std::vector<std::pair<uint64_t, uint64_t>> |
517 | findPltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents, |
518 | const Triple &TargetTriple) const override; |
519 | |
520 | bool evaluateBranch(const MCInst &Inst, uint64_t Addr, uint64_t Size, |
521 | uint64_t &Target) const override; |
522 | std::optional<uint64_t> |
523 | evaluateMemoryOperandAddress(const MCInst &Inst, const MCSubtargetInfo *STI, |
524 | uint64_t Addr, uint64_t Size) const override; |
525 | std::optional<uint64_t> |
526 | getMemoryOperandRelocationOffset(const MCInst &Inst, |
527 | uint64_t Size) const override; |
528 | }; |
529 | |
530 | #define GET_STIPREDICATE_DEFS_FOR_MC_ANALYSIS |
531 | #include "X86GenSubtargetInfo.inc" |
532 | |
533 | bool X86MCInstrAnalysis::(const MCRegisterInfo &MRI, |
534 | const MCInst &Inst, |
535 | APInt &Mask) const { |
536 | const MCInstrDesc &Desc = Info->get(Opcode: Inst.getOpcode()); |
537 | unsigned NumDefs = Desc.getNumDefs(); |
538 | unsigned NumImplicitDefs = Desc.implicit_defs().size(); |
539 | assert(Mask.getBitWidth() == NumDefs + NumImplicitDefs && |
540 | "Unexpected number of bits in the mask!" ); |
541 | |
542 | bool HasVEX = (Desc.TSFlags & X86II::EncodingMask) == X86II::VEX; |
543 | bool HasEVEX = (Desc.TSFlags & X86II::EncodingMask) == X86II::EVEX; |
544 | bool HasXOP = (Desc.TSFlags & X86II::EncodingMask) == X86II::XOP; |
545 | |
546 | const MCRegisterClass &GR32RC = MRI.getRegClass(i: X86::GR32RegClassID); |
547 | const MCRegisterClass &VR128XRC = MRI.getRegClass(i: X86::VR128XRegClassID); |
548 | const MCRegisterClass &VR256XRC = MRI.getRegClass(i: X86::VR256XRegClassID); |
549 | |
550 | auto = [=](unsigned RegID) { |
551 | // On X86-64, a general purpose integer register is viewed as a 64-bit |
552 | // register internal to the processor. |
553 | // An update to the lower 32 bits of a 64 bit integer register is |
554 | // architecturally defined to zero extend the upper 32 bits. |
555 | if (GR32RC.contains(Reg: RegID)) |
556 | return true; |
557 | |
558 | // Early exit if this instruction has no vex/evex/xop prefix. |
559 | if (!HasEVEX && !HasVEX && !HasXOP) |
560 | return false; |
561 | |
562 | // All VEX and EVEX encoded instructions are defined to zero the high bits |
563 | // of the destination register up to VLMAX (i.e. the maximum vector register |
564 | // width pertaining to the instruction). |
565 | // We assume the same behavior for XOP instructions too. |
566 | return VR128XRC.contains(Reg: RegID) || VR256XRC.contains(Reg: RegID); |
567 | }; |
568 | |
569 | Mask.clearAllBits(); |
570 | for (unsigned I = 0, E = NumDefs; I < E; ++I) { |
571 | const MCOperand &Op = Inst.getOperand(i: I); |
572 | if (ClearsSuperReg(Op.getReg())) |
573 | Mask.setBit(I); |
574 | } |
575 | |
576 | for (unsigned I = 0, E = NumImplicitDefs; I < E; ++I) { |
577 | const MCPhysReg Reg = Desc.implicit_defs()[I]; |
578 | if (ClearsSuperReg(Reg)) |
579 | Mask.setBit(NumDefs + I); |
580 | } |
581 | |
582 | return Mask.getBoolValue(); |
583 | } |
584 | |
585 | static std::vector<std::pair<uint64_t, uint64_t>> |
586 | findX86PltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents) { |
587 | // Do a lightweight parsing of PLT entries. |
588 | std::vector<std::pair<uint64_t, uint64_t>> Result; |
589 | for (uint64_t Byte = 0, End = PltContents.size(); Byte + 6 < End; ) { |
590 | // Recognize a jmp. |
591 | if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0xa3) { |
592 | // The jmp instruction at the beginning of each PLT entry jumps to the |
593 | // address of the base of the .got.plt section plus the immediate. |
594 | // Set the 1 << 32 bit to let ELFObjectFileBase::getPltEntries convert the |
595 | // offset to an address. Imm may be a negative int32_t if the GOT entry is |
596 | // in .got. |
597 | uint32_t Imm = support::endian::read32le(P: PltContents.data() + Byte + 2); |
598 | Result.emplace_back(args: PltSectionVA + Byte, args: Imm | (uint64_t(1) << 32)); |
599 | Byte += 6; |
600 | } else if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0x25) { |
601 | // The jmp instruction at the beginning of each PLT entry jumps to the |
602 | // immediate. |
603 | uint32_t Imm = support::endian::read32le(P: PltContents.data() + Byte + 2); |
604 | Result.push_back(x: std::make_pair(x: PltSectionVA + Byte, y&: Imm)); |
605 | Byte += 6; |
606 | } else |
607 | Byte++; |
608 | } |
609 | return Result; |
610 | } |
611 | |
612 | static std::vector<std::pair<uint64_t, uint64_t>> |
613 | findX86_64PltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents) { |
614 | // Do a lightweight parsing of PLT entries. |
615 | std::vector<std::pair<uint64_t, uint64_t>> Result; |
616 | for (uint64_t Byte = 0, End = PltContents.size(); Byte + 6 < End; ) { |
617 | // Recognize a jmp. |
618 | if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0x25) { |
619 | // The jmp instruction at the beginning of each PLT entry jumps to the |
620 | // address of the next instruction plus the immediate. |
621 | uint32_t Imm = support::endian::read32le(P: PltContents.data() + Byte + 2); |
622 | Result.push_back( |
623 | x: std::make_pair(x: PltSectionVA + Byte, y: PltSectionVA + Byte + 6 + Imm)); |
624 | Byte += 6; |
625 | } else |
626 | Byte++; |
627 | } |
628 | return Result; |
629 | } |
630 | |
631 | std::vector<std::pair<uint64_t, uint64_t>> |
632 | X86MCInstrAnalysis::findPltEntries(uint64_t PltSectionVA, |
633 | ArrayRef<uint8_t> PltContents, |
634 | const Triple &TargetTriple) const { |
635 | switch (TargetTriple.getArch()) { |
636 | case Triple::x86: |
637 | return findX86PltEntries(PltSectionVA, PltContents); |
638 | case Triple::x86_64: |
639 | return findX86_64PltEntries(PltSectionVA, PltContents); |
640 | default: |
641 | return {}; |
642 | } |
643 | } |
644 | |
645 | bool X86MCInstrAnalysis::evaluateBranch(const MCInst &Inst, uint64_t Addr, |
646 | uint64_t Size, uint64_t &Target) const { |
647 | if (Inst.getNumOperands() == 0 || |
648 | Info->get(Opcode: Inst.getOpcode()).operands()[0].OperandType != |
649 | MCOI::OPERAND_PCREL) |
650 | return false; |
651 | Target = Addr + Size + Inst.getOperand(i: 0).getImm(); |
652 | return true; |
653 | } |
654 | |
655 | std::optional<uint64_t> X86MCInstrAnalysis::evaluateMemoryOperandAddress( |
656 | const MCInst &Inst, const MCSubtargetInfo *STI, uint64_t Addr, |
657 | uint64_t Size) const { |
658 | const MCInstrDesc &MCID = Info->get(Opcode: Inst.getOpcode()); |
659 | int MemOpStart = X86II::getMemoryOperandNo(TSFlags: MCID.TSFlags); |
660 | if (MemOpStart == -1) |
661 | return std::nullopt; |
662 | MemOpStart += X86II::getOperandBias(Desc: MCID); |
663 | |
664 | const MCOperand &SegReg = Inst.getOperand(i: MemOpStart + X86::AddrSegmentReg); |
665 | const MCOperand &BaseReg = Inst.getOperand(i: MemOpStart + X86::AddrBaseReg); |
666 | const MCOperand &IndexReg = Inst.getOperand(i: MemOpStart + X86::AddrIndexReg); |
667 | const MCOperand &ScaleAmt = Inst.getOperand(i: MemOpStart + X86::AddrScaleAmt); |
668 | const MCOperand &Disp = Inst.getOperand(i: MemOpStart + X86::AddrDisp); |
669 | if (SegReg.getReg() != 0 || IndexReg.getReg() != 0 || ScaleAmt.getImm() != 1 || |
670 | !Disp.isImm()) |
671 | return std::nullopt; |
672 | |
673 | // RIP-relative addressing. |
674 | if (BaseReg.getReg() == X86::RIP) |
675 | return Addr + Size + Disp.getImm(); |
676 | |
677 | return std::nullopt; |
678 | } |
679 | |
680 | std::optional<uint64_t> |
681 | X86MCInstrAnalysis::getMemoryOperandRelocationOffset(const MCInst &Inst, |
682 | uint64_t Size) const { |
683 | if (Inst.getOpcode() != X86::LEA64r) |
684 | return std::nullopt; |
685 | const MCInstrDesc &MCID = Info->get(Opcode: Inst.getOpcode()); |
686 | int MemOpStart = X86II::getMemoryOperandNo(TSFlags: MCID.TSFlags); |
687 | if (MemOpStart == -1) |
688 | return std::nullopt; |
689 | MemOpStart += X86II::getOperandBias(Desc: MCID); |
690 | const MCOperand &SegReg = Inst.getOperand(i: MemOpStart + X86::AddrSegmentReg); |
691 | const MCOperand &BaseReg = Inst.getOperand(i: MemOpStart + X86::AddrBaseReg); |
692 | const MCOperand &IndexReg = Inst.getOperand(i: MemOpStart + X86::AddrIndexReg); |
693 | const MCOperand &ScaleAmt = Inst.getOperand(i: MemOpStart + X86::AddrScaleAmt); |
694 | const MCOperand &Disp = Inst.getOperand(i: MemOpStart + X86::AddrDisp); |
695 | // Must be a simple rip-relative address. |
696 | if (BaseReg.getReg() != X86::RIP || SegReg.getReg() != 0 || |
697 | IndexReg.getReg() != 0 || ScaleAmt.getImm() != 1 || !Disp.isImm()) |
698 | return std::nullopt; |
699 | // rip-relative ModR/M immediate is 32 bits. |
700 | assert(Size > 4 && "invalid instruction size for rip-relative lea" ); |
701 | return Size - 4; |
702 | } |
703 | |
704 | } // end of namespace X86_MC |
705 | |
706 | } // end of namespace llvm |
707 | |
708 | static MCInstrAnalysis *createX86MCInstrAnalysis(const MCInstrInfo *Info) { |
709 | return new X86_MC::X86MCInstrAnalysis(Info); |
710 | } |
711 | |
712 | // Force static initialization. |
713 | extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeX86TargetMC() { |
714 | for (Target *T : {&getTheX86_32Target(), &getTheX86_64Target()}) { |
715 | // Register the MC asm info. |
716 | RegisterMCAsmInfoFn X(*T, createX86MCAsmInfo); |
717 | |
718 | // Register the MC instruction info. |
719 | TargetRegistry::RegisterMCInstrInfo(T&: *T, Fn: createX86MCInstrInfo); |
720 | |
721 | // Register the MC register info. |
722 | TargetRegistry::RegisterMCRegInfo(T&: *T, Fn: createX86MCRegisterInfo); |
723 | |
724 | // Register the MC subtarget info. |
725 | TargetRegistry::RegisterMCSubtargetInfo(T&: *T, |
726 | Fn: X86_MC::createX86MCSubtargetInfo); |
727 | |
728 | // Register the MC instruction analyzer. |
729 | TargetRegistry::RegisterMCInstrAnalysis(T&: *T, Fn: createX86MCInstrAnalysis); |
730 | |
731 | // Register the code emitter. |
732 | TargetRegistry::RegisterMCCodeEmitter(T&: *T, Fn: createX86MCCodeEmitter); |
733 | |
734 | // Register the obj target streamer. |
735 | TargetRegistry::RegisterObjectTargetStreamer(T&: *T, |
736 | Fn: createX86ObjectTargetStreamer); |
737 | |
738 | // Register the asm target streamer. |
739 | TargetRegistry::RegisterAsmTargetStreamer(T&: *T, Fn: createX86AsmTargetStreamer); |
740 | |
741 | // Register the null streamer. |
742 | TargetRegistry::RegisterNullTargetStreamer(T&: *T, Fn: createX86NullTargetStreamer); |
743 | |
744 | TargetRegistry::RegisterCOFFStreamer(T&: *T, Fn: createX86WinCOFFStreamer); |
745 | TargetRegistry::RegisterELFStreamer(T&: *T, Fn: createX86ELFStreamer); |
746 | |
747 | // Register the MCInstPrinter. |
748 | TargetRegistry::RegisterMCInstPrinter(T&: *T, Fn: createX86MCInstPrinter); |
749 | |
750 | // Register the MC relocation info. |
751 | TargetRegistry::RegisterMCRelocationInfo(T&: *T, Fn: createX86MCRelocationInfo); |
752 | } |
753 | |
754 | // Register the asm backend. |
755 | TargetRegistry::RegisterMCAsmBackend(T&: getTheX86_32Target(), |
756 | Fn: createX86_32AsmBackend); |
757 | TargetRegistry::RegisterMCAsmBackend(T&: getTheX86_64Target(), |
758 | Fn: createX86_64AsmBackend); |
759 | } |
760 | |
761 | MCRegister llvm::getX86SubSuperRegister(MCRegister Reg, unsigned Size, |
762 | bool High) { |
763 | #define DEFAULT_NOREG \ |
764 | default: \ |
765 | return X86::NoRegister; |
766 | #define SUB_SUPER(R1, R2, R3, R4, R) \ |
767 | case X86::R1: \ |
768 | case X86::R2: \ |
769 | case X86::R3: \ |
770 | case X86::R4: \ |
771 | return X86::R; |
772 | #define A_SUB_SUPER(R) \ |
773 | case X86::AH: \ |
774 | SUB_SUPER(AL, AX, EAX, RAX, R) |
775 | #define D_SUB_SUPER(R) \ |
776 | case X86::DH: \ |
777 | SUB_SUPER(DL, DX, EDX, RDX, R) |
778 | #define C_SUB_SUPER(R) \ |
779 | case X86::CH: \ |
780 | SUB_SUPER(CL, CX, ECX, RCX, R) |
781 | #define B_SUB_SUPER(R) \ |
782 | case X86::BH: \ |
783 | SUB_SUPER(BL, BX, EBX, RBX, R) |
784 | #define SI_SUB_SUPER(R) SUB_SUPER(SIL, SI, ESI, RSI, R) |
785 | #define DI_SUB_SUPER(R) SUB_SUPER(DIL, DI, EDI, RDI, R) |
786 | #define BP_SUB_SUPER(R) SUB_SUPER(BPL, BP, EBP, RBP, R) |
787 | #define SP_SUB_SUPER(R) SUB_SUPER(SPL, SP, ESP, RSP, R) |
788 | #define NO_SUB_SUPER(NO, REG) \ |
789 | SUB_SUPER(R##NO##B, R##NO##W, R##NO##D, R##NO, REG) |
790 | #define NO_SUB_SUPER_B(NO) NO_SUB_SUPER(NO, R##NO##B) |
791 | #define NO_SUB_SUPER_W(NO) NO_SUB_SUPER(NO, R##NO##W) |
792 | #define NO_SUB_SUPER_D(NO) NO_SUB_SUPER(NO, R##NO##D) |
793 | #define NO_SUB_SUPER_Q(NO) NO_SUB_SUPER(NO, R##NO) |
794 | switch (Size) { |
795 | default: |
796 | llvm_unreachable("illegal register size" ); |
797 | case 8: |
798 | if (High) { |
799 | switch (Reg.id()) { |
800 | DEFAULT_NOREG |
801 | A_SUB_SUPER(AH) |
802 | D_SUB_SUPER(DH) |
803 | C_SUB_SUPER(CH) |
804 | B_SUB_SUPER(BH) |
805 | } |
806 | } else { |
807 | switch (Reg.id()) { |
808 | DEFAULT_NOREG |
809 | A_SUB_SUPER(AL) |
810 | D_SUB_SUPER(DL) |
811 | C_SUB_SUPER(CL) |
812 | B_SUB_SUPER(BL) |
813 | SI_SUB_SUPER(SIL) |
814 | DI_SUB_SUPER(DIL) |
815 | BP_SUB_SUPER(BPL) |
816 | SP_SUB_SUPER(SPL) |
817 | NO_SUB_SUPER_B(8) |
818 | NO_SUB_SUPER_B(9) |
819 | NO_SUB_SUPER_B(10) |
820 | NO_SUB_SUPER_B(11) |
821 | NO_SUB_SUPER_B(12) |
822 | NO_SUB_SUPER_B(13) |
823 | NO_SUB_SUPER_B(14) |
824 | NO_SUB_SUPER_B(15) |
825 | NO_SUB_SUPER_B(16) |
826 | NO_SUB_SUPER_B(17) |
827 | NO_SUB_SUPER_B(18) |
828 | NO_SUB_SUPER_B(19) |
829 | NO_SUB_SUPER_B(20) |
830 | NO_SUB_SUPER_B(21) |
831 | NO_SUB_SUPER_B(22) |
832 | NO_SUB_SUPER_B(23) |
833 | NO_SUB_SUPER_B(24) |
834 | NO_SUB_SUPER_B(25) |
835 | NO_SUB_SUPER_B(26) |
836 | NO_SUB_SUPER_B(27) |
837 | NO_SUB_SUPER_B(28) |
838 | NO_SUB_SUPER_B(29) |
839 | NO_SUB_SUPER_B(30) |
840 | NO_SUB_SUPER_B(31) |
841 | } |
842 | } |
843 | case 16: |
844 | switch (Reg.id()) { |
845 | DEFAULT_NOREG |
846 | A_SUB_SUPER(AX) |
847 | D_SUB_SUPER(DX) |
848 | C_SUB_SUPER(CX) |
849 | B_SUB_SUPER(BX) |
850 | SI_SUB_SUPER(SI) |
851 | DI_SUB_SUPER(DI) |
852 | BP_SUB_SUPER(BP) |
853 | SP_SUB_SUPER(SP) |
854 | NO_SUB_SUPER_W(8) |
855 | NO_SUB_SUPER_W(9) |
856 | NO_SUB_SUPER_W(10) |
857 | NO_SUB_SUPER_W(11) |
858 | NO_SUB_SUPER_W(12) |
859 | NO_SUB_SUPER_W(13) |
860 | NO_SUB_SUPER_W(14) |
861 | NO_SUB_SUPER_W(15) |
862 | NO_SUB_SUPER_W(16) |
863 | NO_SUB_SUPER_W(17) |
864 | NO_SUB_SUPER_W(18) |
865 | NO_SUB_SUPER_W(19) |
866 | NO_SUB_SUPER_W(20) |
867 | NO_SUB_SUPER_W(21) |
868 | NO_SUB_SUPER_W(22) |
869 | NO_SUB_SUPER_W(23) |
870 | NO_SUB_SUPER_W(24) |
871 | NO_SUB_SUPER_W(25) |
872 | NO_SUB_SUPER_W(26) |
873 | NO_SUB_SUPER_W(27) |
874 | NO_SUB_SUPER_W(28) |
875 | NO_SUB_SUPER_W(29) |
876 | NO_SUB_SUPER_W(30) |
877 | NO_SUB_SUPER_W(31) |
878 | } |
879 | case 32: |
880 | switch (Reg.id()) { |
881 | DEFAULT_NOREG |
882 | A_SUB_SUPER(EAX) |
883 | D_SUB_SUPER(EDX) |
884 | C_SUB_SUPER(ECX) |
885 | B_SUB_SUPER(EBX) |
886 | SI_SUB_SUPER(ESI) |
887 | DI_SUB_SUPER(EDI) |
888 | BP_SUB_SUPER(EBP) |
889 | SP_SUB_SUPER(ESP) |
890 | NO_SUB_SUPER_D(8) |
891 | NO_SUB_SUPER_D(9) |
892 | NO_SUB_SUPER_D(10) |
893 | NO_SUB_SUPER_D(11) |
894 | NO_SUB_SUPER_D(12) |
895 | NO_SUB_SUPER_D(13) |
896 | NO_SUB_SUPER_D(14) |
897 | NO_SUB_SUPER_D(15) |
898 | NO_SUB_SUPER_D(16) |
899 | NO_SUB_SUPER_D(17) |
900 | NO_SUB_SUPER_D(18) |
901 | NO_SUB_SUPER_D(19) |
902 | NO_SUB_SUPER_D(20) |
903 | NO_SUB_SUPER_D(21) |
904 | NO_SUB_SUPER_D(22) |
905 | NO_SUB_SUPER_D(23) |
906 | NO_SUB_SUPER_D(24) |
907 | NO_SUB_SUPER_D(25) |
908 | NO_SUB_SUPER_D(26) |
909 | NO_SUB_SUPER_D(27) |
910 | NO_SUB_SUPER_D(28) |
911 | NO_SUB_SUPER_D(29) |
912 | NO_SUB_SUPER_D(30) |
913 | NO_SUB_SUPER_D(31) |
914 | } |
915 | case 64: |
916 | switch (Reg.id()) { |
917 | DEFAULT_NOREG |
918 | A_SUB_SUPER(RAX) |
919 | D_SUB_SUPER(RDX) |
920 | C_SUB_SUPER(RCX) |
921 | B_SUB_SUPER(RBX) |
922 | SI_SUB_SUPER(RSI) |
923 | DI_SUB_SUPER(RDI) |
924 | BP_SUB_SUPER(RBP) |
925 | SP_SUB_SUPER(RSP) |
926 | NO_SUB_SUPER_Q(8) |
927 | NO_SUB_SUPER_Q(9) |
928 | NO_SUB_SUPER_Q(10) |
929 | NO_SUB_SUPER_Q(11) |
930 | NO_SUB_SUPER_Q(12) |
931 | NO_SUB_SUPER_Q(13) |
932 | NO_SUB_SUPER_Q(14) |
933 | NO_SUB_SUPER_Q(15) |
934 | NO_SUB_SUPER_Q(16) |
935 | NO_SUB_SUPER_Q(17) |
936 | NO_SUB_SUPER_Q(18) |
937 | NO_SUB_SUPER_Q(19) |
938 | NO_SUB_SUPER_Q(20) |
939 | NO_SUB_SUPER_Q(21) |
940 | NO_SUB_SUPER_Q(22) |
941 | NO_SUB_SUPER_Q(23) |
942 | NO_SUB_SUPER_Q(24) |
943 | NO_SUB_SUPER_Q(25) |
944 | NO_SUB_SUPER_Q(26) |
945 | NO_SUB_SUPER_Q(27) |
946 | NO_SUB_SUPER_Q(28) |
947 | NO_SUB_SUPER_Q(29) |
948 | NO_SUB_SUPER_Q(30) |
949 | NO_SUB_SUPER_Q(31) |
950 | } |
951 | } |
952 | } |
953 | |