1//===-- X86Subtarget.h - Define Subtarget for the X86 ----------*- C++ -*--===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the X86 specific subclass of TargetSubtargetInfo.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_TARGET_X86_X86SUBTARGET_H
14#define LLVM_LIB_TARGET_X86_X86SUBTARGET_H
15
16#include "X86FrameLowering.h"
17#include "X86ISelLowering.h"
18#include "X86InstrInfo.h"
19#include "X86SelectionDAGInfo.h"
20#include "llvm/CodeGen/TargetSubtargetInfo.h"
21#include "llvm/IR/CallingConv.h"
22#include "llvm/TargetParser/Triple.h"
23#include <climits>
24#include <memory>
25
26#define GET_SUBTARGETINFO_HEADER
27#include "X86GenSubtargetInfo.inc"
28
29namespace llvm {
30
31class CallLowering;
32class GlobalValue;
33class InstructionSelector;
34class LegalizerInfo;
35class RegisterBankInfo;
36class StringRef;
37class TargetMachine;
38
39/// The X86 backend supports a number of different styles of PIC.
40///
41namespace PICStyles {
42
43enum class Style {
44 StubPIC, // Used on i386-darwin in pic mode.
45 GOT, // Used on 32 bit elf on when in pic mode.
46 RIPRel, // Used on X86-64 when in pic mode.
47 None // Set when not in pic mode.
48};
49
50} // end namespace PICStyles
51
52class X86Subtarget final : public X86GenSubtargetInfo {
53 enum X86SSEEnum {
54 NoSSE, SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, AVX, AVX2, AVX512
55 };
56
57 /// Which PIC style to use
58 PICStyles::Style PICStyle;
59
60 const TargetMachine &TM;
61
62 /// SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, or none supported.
63 X86SSEEnum X86SSELevel = NoSSE;
64
65#define GET_SUBTARGETINFO_MACRO(ATTRIBUTE, DEFAULT, GETTER) \
66 bool ATTRIBUTE = DEFAULT;
67#include "X86GenSubtargetInfo.inc"
68 /// The minimum alignment known to hold of the stack frame on
69 /// entry to the function and which must be maintained by every function.
70 Align stackAlignment = Align(4);
71
72 Align TileConfigAlignment = Align(4);
73
74 /// Max. memset / memcpy size that is turned into rep/movs, rep/stos ops.
75 ///
76 // FIXME: this is a known good value for Yonah. How about others?
77 unsigned MaxInlineSizeThreshold = 128;
78
79 /// What processor and OS we're targeting.
80 Triple TargetTriple;
81
82 /// GlobalISel related APIs.
83 std::unique_ptr<CallLowering> CallLoweringInfo;
84 std::unique_ptr<LegalizerInfo> Legalizer;
85 std::unique_ptr<RegisterBankInfo> RegBankInfo;
86 std::unique_ptr<InstructionSelector> InstSelector;
87
88 /// Override the stack alignment.
89 MaybeAlign StackAlignOverride;
90
91 /// Preferred vector width from function attribute.
92 unsigned PreferVectorWidthOverride;
93
94 /// Resolved preferred vector width from function attribute and subtarget
95 /// features.
96 unsigned PreferVectorWidth = UINT32_MAX;
97
98 /// Required vector width from function attribute.
99 unsigned RequiredVectorWidth;
100
101 X86SelectionDAGInfo TSInfo;
102 // Ordering here is important. X86InstrInfo initializes X86RegisterInfo which
103 // X86TargetLowering needs.
104 X86InstrInfo InstrInfo;
105 X86TargetLowering TLInfo;
106 X86FrameLowering FrameLowering;
107
108public:
109 /// This constructor initializes the data members to match that
110 /// of the specified triple.
111 ///
112 X86Subtarget(const Triple &TT, StringRef CPU, StringRef TuneCPU, StringRef FS,
113 const X86TargetMachine &TM, MaybeAlign StackAlignOverride,
114 unsigned PreferVectorWidthOverride,
115 unsigned RequiredVectorWidth);
116 ~X86Subtarget() override;
117
118 const X86TargetLowering *getTargetLowering() const override {
119 return &TLInfo;
120 }
121
122 const X86InstrInfo *getInstrInfo() const override { return &InstrInfo; }
123
124 const X86FrameLowering *getFrameLowering() const override {
125 return &FrameLowering;
126 }
127
128 const X86SelectionDAGInfo *getSelectionDAGInfo() const override {
129 return &TSInfo;
130 }
131
132 const X86RegisterInfo *getRegisterInfo() const override {
133 return &getInstrInfo()->getRegisterInfo();
134 }
135
136 unsigned getTileConfigSize() const { return 64; }
137 Align getTileConfigAlignment() const { return TileConfigAlignment; }
138
139 /// Returns the minimum alignment known to hold of the
140 /// stack frame on entry to the function and which must be maintained by every
141 /// function for this subtarget.
142 Align getStackAlignment() const { return stackAlignment; }
143
144 /// Returns the maximum memset / memcpy size
145 /// that still makes it profitable to inline the call.
146 unsigned getMaxInlineSizeThreshold() const { return MaxInlineSizeThreshold; }
147
148 /// ParseSubtargetFeatures - Parses features string setting specified
149 /// subtarget options. Definition of function is auto generated by tblgen.
150 void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS);
151
152 /// Methods used by Global ISel
153 const CallLowering *getCallLowering() const override;
154 InstructionSelector *getInstructionSelector() const override;
155 const LegalizerInfo *getLegalizerInfo() const override;
156 const RegisterBankInfo *getRegBankInfo() const override;
157
158private:
159 /// Initialize the full set of dependencies so we can use an initializer
160 /// list for X86Subtarget.
161 X86Subtarget &initializeSubtargetDependencies(StringRef CPU,
162 StringRef TuneCPU,
163 StringRef FS);
164 void initSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS);
165
166public:
167
168#define GET_SUBTARGETINFO_MACRO(ATTRIBUTE, DEFAULT, GETTER) \
169 bool GETTER() const { return ATTRIBUTE; }
170#include "X86GenSubtargetInfo.inc"
171
172 /// Is this x86_64 with the ILP32 programming model (x32 ABI)?
173 bool isTarget64BitILP32() const { return Is64Bit && IsX32; }
174
175 /// Is this x86_64 with the LP64 programming model (standard AMD64, no x32)?
176 bool isTarget64BitLP64() const { return Is64Bit && !IsX32; }
177
178 PICStyles::Style getPICStyle() const { return PICStyle; }
179 void setPICStyle(PICStyles::Style Style) { PICStyle = Style; }
180
181 bool canUseCMPXCHG8B() const { return hasCX8(); }
182 bool canUseCMPXCHG16B() const {
183 // CX16 is just the CPUID bit, instruction requires 64-bit mode too.
184 return hasCX16() && is64Bit();
185 }
186 // SSE codegen depends on cmovs, and all SSE1+ processors support them.
187 // All 64-bit processors support cmov.
188 bool canUseCMOV() const { return hasCMOV() || hasSSE1() || is64Bit(); }
189 bool hasSSE1() const { return X86SSELevel >= SSE1; }
190 bool hasSSE2() const { return X86SSELevel >= SSE2; }
191 bool hasSSE3() const { return X86SSELevel >= SSE3; }
192 bool hasSSSE3() const { return X86SSELevel >= SSSE3; }
193 bool hasSSE41() const { return X86SSELevel >= SSE41; }
194 bool hasSSE42() const { return X86SSELevel >= SSE42; }
195 bool hasAVX() const { return X86SSELevel >= AVX; }
196 bool hasAVX2() const { return X86SSELevel >= AVX2; }
197 bool hasAVX512() const { return X86SSELevel >= AVX512; }
198 bool hasInt256() const { return hasAVX2(); }
199 bool hasAnyFMA() const { return hasFMA() || hasFMA4(); }
200 bool hasPrefetchW() const {
201 // The PREFETCHW instruction was added with 3DNow but later CPUs gave it
202 // its own CPUID bit as part of deprecating 3DNow.
203 return hasPRFCHW();
204 }
205 bool hasSSEPrefetch() const {
206 // We also implicitly enable these when we have a write prefix supporting
207 // cache level OR if we have prfchw.
208 return hasSSE1() || hasPRFCHW() || hasPREFETCHI();
209 }
210 bool canUseLAHFSAHF() const { return hasLAHFSAHF64() || !is64Bit(); }
211 // These are generic getters that OR together all of the thunk types
212 // supported by the subtarget. Therefore useIndirectThunk*() will return true
213 // if any respective thunk feature is enabled.
214 bool useIndirectThunkCalls() const {
215 return useRetpolineIndirectCalls() || useLVIControlFlowIntegrity();
216 }
217 bool useIndirectThunkBranches() const {
218 return useRetpolineIndirectBranches() || useLVIControlFlowIntegrity();
219 }
220
221 unsigned getPreferVectorWidth() const { return PreferVectorWidth; }
222 unsigned getRequiredVectorWidth() const { return RequiredVectorWidth; }
223
224 // Helper functions to determine when we should allow widening to 512-bit
225 // during codegen.
226 // TODO: Currently we're always allowing widening on CPUs without VLX,
227 // because for many cases we don't have a better option.
228 bool canExtendTo512DQ() const {
229 return hasAVX512() && (!hasVLX() || getPreferVectorWidth() >= 512);
230 }
231 bool canExtendTo512BW() const {
232 return hasBWI() && canExtendTo512DQ();
233 }
234
235 bool hasNoDomainDelay() const { return NoDomainDelay; }
236 bool hasNoDomainDelayMov() const {
237 return hasNoDomainDelay() || NoDomainDelayMov;
238 }
239 bool hasNoDomainDelayBlend() const {
240 return hasNoDomainDelay() || NoDomainDelayBlend;
241 }
242 bool hasNoDomainDelayShuffle() const {
243 return hasNoDomainDelay() || NoDomainDelayShuffle;
244 }
245
246 // If there are no 512-bit vectors and we prefer not to use 512-bit registers,
247 // disable them in the legalizer.
248 bool useAVX512Regs() const {
249 return hasAVX512() && (canExtendTo512DQ() || RequiredVectorWidth > 256);
250 }
251
252 bool useLight256BitInstructions() const {
253 return getPreferVectorWidth() >= 256 || AllowLight256Bit;
254 }
255
256 bool useBWIRegs() const {
257 return hasBWI() && useAVX512Regs();
258 }
259
260 // Returns true if the destination register of a BSF/BSR instruction is
261 // not touched if the source register is zero.
262 // NOTE: i32->i64 implicit zext isn't guaranteed by BSR/BSF pass through.
263 bool hasBitScanPassThrough() const { return is64Bit(); }
264
265 bool isXRaySupported() const override { return is64Bit(); }
266
267 /// Use clflush if we have SSE2 or we're on x86-64 (even if we asked for
268 /// no-sse2). There isn't any reason to disable it if the target processor
269 /// supports it.
270 bool hasCLFLUSH() const { return hasSSE2() || is64Bit(); }
271
272 /// Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
273 /// no-sse2). There isn't any reason to disable it if the target processor
274 /// supports it.
275 bool hasMFence() const { return hasSSE2() || is64Bit(); }
276
277 /// Avoid use of `mfence` for`fence seq_cst`, and instead use `lock or`.
278 bool avoidMFence() const { return is64Bit(); }
279
280 const Triple &getTargetTriple() const { return TargetTriple; }
281
282 bool isTargetDarwin() const { return TargetTriple.isOSDarwin(); }
283 bool isTargetFreeBSD() const { return TargetTriple.isOSFreeBSD(); }
284 bool isTargetDragonFly() const { return TargetTriple.isOSDragonFly(); }
285 bool isTargetSolaris() const { return TargetTriple.isOSSolaris(); }
286 bool isTargetPS() const { return TargetTriple.isPS(); }
287
288 bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
289 bool isTargetCOFF() const { return TargetTriple.isOSBinFormatCOFF(); }
290 bool isTargetMachO() const { return TargetTriple.isOSBinFormatMachO(); }
291
292 bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
293 bool isTargetKFreeBSD() const { return TargetTriple.isOSKFreeBSD(); }
294 bool isTargetHurd() const { return TargetTriple.isOSHurd(); }
295 bool isTargetGlibc() const { return TargetTriple.isOSGlibc(); }
296 bool isTargetMusl() const { return TargetTriple.isMusl(); }
297 bool isTargetAndroid() const { return TargetTriple.isAndroid(); }
298 bool isTargetMCU() const { return TargetTriple.isOSIAMCU(); }
299 bool isTargetFuchsia() const { return TargetTriple.isOSFuchsia(); }
300
301 bool isTargetWindowsMSVC() const {
302 return TargetTriple.isWindowsMSVCEnvironment();
303 }
304
305 bool isTargetWindowsCoreCLR() const {
306 return TargetTriple.isWindowsCoreCLREnvironment();
307 }
308
309 bool isTargetWindowsCygwin() const {
310 return TargetTriple.isWindowsCygwinEnvironment();
311 }
312
313 bool isTargetWindowsGNU() const {
314 return TargetTriple.isWindowsGNUEnvironment();
315 }
316
317 bool isTargetWindowsItanium() const {
318 return TargetTriple.isWindowsItaniumEnvironment();
319 }
320
321 bool isTargetCygMing() const { return TargetTriple.isOSCygMing(); }
322
323 bool isUEFI() const { return TargetTriple.isUEFI(); }
324
325 bool isOSWindows() const { return TargetTriple.isOSWindows(); }
326
327 bool isTargetUEFI64() const { return Is64Bit && isUEFI(); }
328
329 bool isTargetWin64() const { return Is64Bit && isOSWindows(); }
330
331 bool isTargetWin32() const { return !Is64Bit && isOSWindows(); }
332
333 bool isPICStyleGOT() const { return PICStyle == PICStyles::Style::GOT; }
334 bool isPICStyleRIPRel() const { return PICStyle == PICStyles::Style::RIPRel; }
335
336 bool isPICStyleStubPIC() const {
337 return PICStyle == PICStyles::Style::StubPIC;
338 }
339
340 bool isPositionIndependent() const;
341
342 bool isCallingConvWin64(CallingConv::ID CC) const {
343 switch (CC) {
344 // On Win64, all these conventions just use the default convention.
345 case CallingConv::C:
346 case CallingConv::Fast:
347 case CallingConv::Tail:
348 return isTargetWin64() || isTargetUEFI64();
349 case CallingConv::Swift:
350 case CallingConv::SwiftTail:
351 case CallingConv::X86_FastCall:
352 case CallingConv::X86_StdCall:
353 case CallingConv::X86_ThisCall:
354 case CallingConv::X86_VectorCall:
355 case CallingConv::Intel_OCL_BI:
356 return isTargetWin64();
357 // This convention allows using the Win64 convention on other targets.
358 case CallingConv::Win64:
359 return true;
360 // This convention allows using the SysV convention on Windows targets.
361 case CallingConv::X86_64_SysV:
362 return false;
363 // Otherwise, who knows what this is.
364 default:
365 return false;
366 }
367 }
368
369 /// Classify a global variable reference for the current subtarget according
370 /// to how we should reference it in a non-pcrel context.
371 unsigned char classifyLocalReference(const GlobalValue *GV) const;
372
373 unsigned char classifyGlobalReference(const GlobalValue *GV,
374 const Module &M) const;
375 unsigned char classifyGlobalReference(const GlobalValue *GV) const;
376
377 /// Classify a global function reference for the current subtarget.
378 unsigned char classifyGlobalFunctionReference(const GlobalValue *GV,
379 const Module &M) const;
380 unsigned char
381 classifyGlobalFunctionReference(const GlobalValue *GV) const override;
382
383 /// Classify a blockaddress reference for the current subtarget according to
384 /// how we should reference it in a non-pcrel context.
385 unsigned char classifyBlockAddressReference() const;
386
387 /// Return true if the subtarget allows calls to immediate address.
388 bool isLegalToCallImmediateAddr() const;
389
390 /// Return whether FrameLowering should always set the "extended frame
391 /// present" bit in FP, or set it based on a symbol in the runtime.
392 bool swiftAsyncContextIsDynamicallySet() const {
393 // Older OS versions (particularly system unwinders) are confused by the
394 // Swift extended frame, so when building code that might be run on them we
395 // must dynamically query the concurrency library to determine whether
396 // extended frames should be flagged as present.
397 const Triple &TT = getTargetTriple();
398
399 unsigned Major = TT.getOSVersion().getMajor();
400 switch(TT.getOS()) {
401 default:
402 return false;
403 case Triple::IOS:
404 case Triple::TvOS:
405 return Major < 15;
406 case Triple::WatchOS:
407 return Major < 8;
408 case Triple::MacOSX:
409 case Triple::Darwin:
410 return Major < 12;
411 }
412 }
413
414 /// If we are using indirect thunks, we need to expand indirectbr to avoid it
415 /// lowering to an actual indirect jump.
416 bool enableIndirectBrExpand() const override {
417 return useIndirectThunkBranches();
418 }
419
420 /// Enable the MachineScheduler pass for all X86 subtargets.
421 bool enableMachineScheduler() const override { return true; }
422
423 bool enableEarlyIfConversion() const override;
424
425 void getPostRAMutations(std::vector<std::unique_ptr<ScheduleDAGMutation>>
426 &Mutations) const override;
427
428 AntiDepBreakMode getAntiDepBreakMode() const override {
429 return TargetSubtargetInfo::ANTIDEP_CRITICAL;
430 }
431};
432
433} // end namespace llvm
434
435#endif // LLVM_LIB_TARGET_X86_X86SUBTARGET_H
436