1//===---- TargetInfo.h - Encapsulate target details -------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// These classes wrap the information about a call or function
10// definition used to handle ABI compliancy.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_CLANG_LIB_CODEGEN_TARGETINFO_H
15#define LLVM_CLANG_LIB_CODEGEN_TARGETINFO_H
16
17#include "CGBuilder.h"
18#include "CGValue.h"
19#include "CodeGenModule.h"
20#include "clang/AST/Type.h"
21#include "clang/Basic/LLVM.h"
22#include "clang/Basic/SyncScope.h"
23#include "clang/Basic/TargetInfo.h"
24#include "llvm/ADT/SmallString.h"
25#include "llvm/ADT/StringRef.h"
26
27namespace llvm {
28class Constant;
29class GlobalValue;
30class Type;
31class Value;
32}
33
34namespace clang {
35class Decl;
36
37namespace CodeGen {
38class ABIInfo;
39class CallArgList;
40class CodeGenFunction;
41class CGHLSLOffsetInfo;
42class CGBlockInfo;
43class CGHLSLOffsetInfo;
44class SwiftABIInfo;
45
46/// TargetCodeGenInfo - This class organizes various target-specific
47/// codegeneration issues, like target-specific attributes, builtins and so
48/// on.
49class TargetCodeGenInfo {
50 std::unique_ptr<ABIInfo> Info;
51
52protected:
53 // Target hooks supporting Swift calling conventions. The target must
54 // initialize this field if it claims to support these calling conventions
55 // by returning true from TargetInfo::checkCallingConvention for them.
56 std::unique_ptr<SwiftABIInfo> SwiftInfo;
57
58 // Returns ABI info helper for the target. This is for use by derived classes.
59 template <typename T> const T &getABIInfo() const {
60 return static_cast<const T &>(*Info);
61 }
62
63public:
64 TargetCodeGenInfo(std::unique_ptr<ABIInfo> Info);
65 virtual ~TargetCodeGenInfo();
66
67 /// getABIInfo() - Returns ABI info helper for the target.
68 const ABIInfo &getABIInfo() const { return *Info; }
69
70 /// Returns Swift ABI info helper for the target.
71 const SwiftABIInfo &getSwiftABIInfo() const {
72 assert(SwiftInfo && "Swift ABI info has not been initialized");
73 return *SwiftInfo;
74 }
75
76 /// supportsLibCall - Query to whether or not target supports all
77 /// lib calls.
78 virtual bool supportsLibCall() const { return true; }
79
80 /// setTargetAttributes - Provides a convenient hook to handle extra
81 /// target-specific attributes for the given global.
82 virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
83 CodeGen::CodeGenModule &M) const {}
84
85 /// emitTargetMetadata - Provides a convenient hook to handle extra
86 /// target-specific metadata for the given globals.
87 virtual void emitTargetMetadata(
88 CodeGen::CodeGenModule &CGM,
89 const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames) const {}
90
91 /// Provides a convenient hook to handle extra target-specific globals.
92 virtual void emitTargetGlobals(CodeGen::CodeGenModule &CGM) const {}
93
94 /// Any further codegen related checks that need to be done on a function
95 /// signature in a target specific manner.
96 virtual void checkFunctionABI(CodeGenModule &CGM,
97 const FunctionDecl *Decl) const {}
98
99 /// Any further codegen related checks that need to be done on a function call
100 /// in a target specific manner.
101 virtual void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
102 const FunctionDecl *Caller,
103 const FunctionDecl *Callee,
104 const CallArgList &Args,
105 QualType ReturnType) const {}
106
107 /// Returns true if inlining the function call would produce incorrect code
108 /// for the current target and should be ignored (even with the always_inline
109 /// or flatten attributes).
110 ///
111 /// Note: This probably should be handled in LLVM. However, the LLVM
112 /// `alwaysinline` attribute currently means the inliner will ignore
113 /// mismatched attributes (which sometimes can generate invalid code). So,
114 /// this hook allows targets to avoid adding the LLVM `alwaysinline` attribute
115 /// based on C/C++ attributes or other target-specific reasons.
116 ///
117 /// See previous discussion here:
118 /// https://discourse.llvm.org/t/rfc-avoid-inlining-alwaysinline-functions-when-they-cannot-be-inlined/79528
119 virtual bool
120 wouldInliningViolateFunctionCallABI(const FunctionDecl *Caller,
121 const FunctionDecl *Callee) const {
122 return false;
123 }
124
125 /// Determines the size of struct _Unwind_Exception on this platform,
126 /// in 8-bit units. The Itanium ABI defines this as:
127 /// struct _Unwind_Exception {
128 /// uint64 exception_class;
129 /// _Unwind_Exception_Cleanup_Fn exception_cleanup;
130 /// uint64 private_1;
131 /// uint64 private_2;
132 /// };
133 virtual unsigned getSizeOfUnwindException() const;
134
135 /// Controls whether __builtin_extend_pointer should sign-extend
136 /// pointers to uint64_t or zero-extend them (the default). Has
137 /// no effect for targets:
138 /// - that have 64-bit pointers, or
139 /// - that cannot address through registers larger than pointers, or
140 /// - that implicitly ignore/truncate the top bits when addressing
141 /// through such registers.
142 virtual bool extendPointerWithSExt() const { return false; }
143
144 /// Determines the DWARF register number for the stack pointer, for
145 /// exception-handling purposes. Implements __builtin_dwarf_sp_column.
146 ///
147 /// Returns -1 if the operation is unsupported by this target.
148 virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
149 return -1;
150 }
151
152 /// Initializes the given DWARF EH register-size table, a char*.
153 /// Implements __builtin_init_dwarf_reg_size_table.
154 ///
155 /// Returns true if the operation is unsupported by this target.
156 virtual bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
157 llvm::Value *Address) const {
158 return true;
159 }
160
161 /// Performs the code-generation required to convert a return
162 /// address as stored by the system into the actual address of the
163 /// next instruction that will be executed.
164 ///
165 /// Used by __builtin_extract_return_addr().
166 virtual llvm::Value *decodeReturnAddress(CodeGen::CodeGenFunction &CGF,
167 llvm::Value *Address) const {
168 return Address;
169 }
170
171 /// Performs the code-generation required to convert the address
172 /// of an instruction into a return address suitable for storage
173 /// by the system in a return slot.
174 ///
175 /// Used by __builtin_frob_return_addr().
176 virtual llvm::Value *encodeReturnAddress(CodeGen::CodeGenFunction &CGF,
177 llvm::Value *Address) const {
178 return Address;
179 }
180
181 /// Performs a target specific test of a floating point value for things
182 /// like IsNaN, Infinity, ... Nullptr is returned if no implementation
183 /// exists.
184 virtual llvm::Value *
185 testFPKind(llvm::Value *V, unsigned BuiltinID, CGBuilderTy &Builder,
186 CodeGenModule &CGM) const {
187 assert(V->getType()->isFloatingPointTy() && "V should have an FP type.");
188 return nullptr;
189 }
190
191 /// Corrects the low-level LLVM type for a given constraint and "usual"
192 /// type.
193 ///
194 /// \returns A pointer to a new LLVM type, possibly the same as the original
195 /// on success; 0 on failure.
196 virtual llvm::Type *adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
197 StringRef Constraint,
198 llvm::Type *Ty) const {
199 return Ty;
200 }
201
202 /// Target hook to decide whether an inline asm operand can be passed
203 /// by value.
204 virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF,
205 llvm::Type *Ty) const {
206 return false;
207 }
208
209 /// Adds constraints and types for result registers.
210 virtual void addReturnRegisterOutputs(
211 CodeGen::CodeGenFunction &CGF, CodeGen::LValue ReturnValue,
212 std::string &Constraints, std::vector<llvm::Type *> &ResultRegTypes,
213 std::vector<llvm::Type *> &ResultTruncRegTypes,
214 std::vector<CodeGen::LValue> &ResultRegDests, std::string &AsmString,
215 unsigned NumOutputs) const {}
216
217 /// doesReturnSlotInterfereWithArgs - Return true if the target uses an
218 /// argument slot for an 'sret' type.
219 virtual bool doesReturnSlotInterfereWithArgs() const { return true; }
220
221 /// Retrieve the address of a function to call immediately before
222 /// calling objc_retainAutoreleasedReturnValue. The
223 /// implementation of objc_autoreleaseReturnValue sniffs the
224 /// instruction stream following its return address to decide
225 /// whether it's a call to objc_retainAutoreleasedReturnValue.
226 /// This can be prohibitively expensive, depending on the
227 /// relocation model, and so on some targets it instead sniffs for
228 /// a particular instruction sequence. This functions returns
229 /// that instruction sequence in inline assembly, which will be
230 /// empty if none is required.
231 virtual StringRef getARCRetainAutoreleasedReturnValueMarker() const {
232 return "";
233 }
234
235 /// Determine whether a call to objc_retainAutoreleasedReturnValue or
236 /// objc_unsafeClaimAutoreleasedReturnValue should be marked as 'notail'.
237 virtual bool markARCOptimizedReturnCallsAsNoTail() const { return false; }
238
239 /// Return a constant used by UBSan as a signature to identify functions
240 /// possessing type information, or 0 if the platform is unsupported.
241 /// This magic number is invalid instruction encoding in many targets.
242 virtual llvm::Constant *
243 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const {
244 return llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: 0xc105cafe);
245 }
246
247 /// Determine whether a call to an unprototyped functions under
248 /// the given calling convention should use the variadic
249 /// convention or the non-variadic convention.
250 ///
251 /// There's a good reason to make a platform's variadic calling
252 /// convention be different from its non-variadic calling
253 /// convention: the non-variadic arguments can be passed in
254 /// registers (better for performance), and the variadic arguments
255 /// can be passed on the stack (also better for performance). If
256 /// this is done, however, unprototyped functions *must* use the
257 /// non-variadic convention, because C99 states that a call
258 /// through an unprototyped function type must succeed if the
259 /// function was defined with a non-variadic prototype with
260 /// compatible parameters. Therefore, splitting the conventions
261 /// makes it impossible to call a variadic function through an
262 /// unprototyped type. Since function prototypes came out in the
263 /// late 1970s, this is probably an acceptable trade-off.
264 /// Nonetheless, not all platforms are willing to make it, and in
265 /// particularly x86-64 bends over backwards to make the
266 /// conventions compatible.
267 ///
268 /// The default is false. This is correct whenever:
269 /// - the conventions are exactly the same, because it does not
270 /// matter and the resulting IR will be somewhat prettier in
271 /// certain cases; or
272 /// - the conventions are substantively different in how they pass
273 /// arguments, because in this case using the variadic convention
274 /// will lead to C99 violations.
275 ///
276 /// However, some platforms make the conventions identical except
277 /// for passing additional out-of-band information to a variadic
278 /// function: for example, x86-64 passes the number of SSE
279 /// arguments in %al. On these platforms, it is desirable to
280 /// call unprototyped functions using the variadic convention so
281 /// that unprototyped calls to varargs functions still succeed.
282 ///
283 /// Relatedly, platforms which pass the fixed arguments to this:
284 /// A foo(B, C, D);
285 /// differently than they would pass them to this:
286 /// A foo(B, C, D, ...);
287 /// may need to adjust the debugger-support code in Sema to do the
288 /// right thing when calling a function with no know signature.
289 virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args,
290 const FunctionNoProtoType *fnType) const;
291
292 /// Gets the linker options necessary to link a dependent library on this
293 /// platform.
294 virtual void getDependentLibraryOption(llvm::StringRef Lib,
295 llvm::SmallString<24> &Opt) const;
296
297 /// Gets the linker options necessary to detect object file mismatches on
298 /// this platform.
299 virtual void getDetectMismatchOption(llvm::StringRef Name,
300 llvm::StringRef Value,
301 llvm::SmallString<32> &Opt) const {}
302
303 /// Get LLVM calling convention for device kernels.
304 virtual unsigned getDeviceKernelCallingConv() const;
305
306 /// Get target specific null pointer.
307 /// \param T is the LLVM type of the null pointer.
308 /// \param QT is the clang QualType of the null pointer.
309 /// \return ConstantPointerNull with the given type \p T.
310 /// Each target can override it to return its own desired constant value.
311 virtual llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
312 llvm::PointerType *T, QualType QT) const;
313
314 /// Get target favored AST address space of a global variable for languages
315 /// other than OpenCL and CUDA.
316 /// If \p D is nullptr, returns the default target favored address space
317 /// for global variable.
318 virtual LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
319 const VarDecl *D) const;
320
321 /// Get the AST address space for alloca.
322 virtual LangAS getASTAllocaAddressSpace() const { return LangAS::Default; }
323
324 /// Get address space of pointer parameter for __cxa_atexit.
325 virtual LangAS getAddrSpaceOfCxaAtexitPtrParam() const {
326 return LangAS::Default;
327 }
328
329 /// Get the syncscope used in LLVM IR as a string
330 virtual StringRef getLLVMSyncScopeStr(const LangOptions &LangOpts,
331 SyncScope Scope,
332 llvm::AtomicOrdering Ordering) const;
333
334 /// Get the syncscope used in LLVM IR as a SyncScope ID.
335 llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
336 SyncScope Scope,
337 llvm::AtomicOrdering Ordering,
338 llvm::LLVMContext &Ctx) const;
339
340 /// Allow the target to apply other metadata to an atomic instruction
341 virtual void setTargetAtomicMetadata(CodeGenFunction &CGF,
342 llvm::Instruction &AtomicInst,
343 const AtomicExpr *Expr = nullptr) const {
344 }
345
346 /// Interface class for filling custom fields of a block literal for OpenCL.
347 class TargetOpenCLBlockHelper {
348 public:
349 typedef std::pair<llvm::Value *, StringRef> ValueTy;
350 TargetOpenCLBlockHelper() {}
351 virtual ~TargetOpenCLBlockHelper() {}
352 /// Get the custom field types for OpenCL blocks.
353 virtual llvm::SmallVector<llvm::Type *, 1> getCustomFieldTypes() = 0;
354 /// Get the custom field values for OpenCL blocks.
355 virtual llvm::SmallVector<ValueTy, 1>
356 getCustomFieldValues(CodeGenFunction &CGF, const CGBlockInfo &Info) = 0;
357 virtual bool areAllCustomFieldValuesConstant(const CGBlockInfo &Info) = 0;
358 /// Get the custom field values for OpenCL blocks if all values are LLVM
359 /// constants.
360 virtual llvm::SmallVector<llvm::Constant *, 1>
361 getCustomFieldValues(CodeGenModule &CGM, const CGBlockInfo &Info) = 0;
362 };
363 virtual TargetOpenCLBlockHelper *getTargetOpenCLBlockHelper() const {
364 return nullptr;
365 }
366
367 /// Create an OpenCL kernel for an enqueued block. The kernel function is
368 /// a wrapper for the block invoke function with target-specific calling
369 /// convention and ABI as an OpenCL kernel. The wrapper function accepts
370 /// block context and block arguments in target-specific way and calls
371 /// the original block invoke function.
372 virtual llvm::Value *
373 createEnqueuedBlockKernel(CodeGenFunction &CGF,
374 llvm::Function *BlockInvokeFunc,
375 llvm::Type *BlockTy) const;
376
377 /// \return true if the target supports alias from the unmangled name to the
378 /// mangled name of functions declared within an extern "C" region and marked
379 /// as 'used', and having internal linkage.
380 virtual bool shouldEmitStaticExternCAliases() const { return true; }
381
382 /// \return true if annonymous zero-sized bitfields should be emitted to
383 /// correctly distinguish between struct types whose memory layout is the
384 /// same, but whose layout may differ when used as argument passed by value
385 virtual bool shouldEmitDWARFBitFieldSeparators() const { return false; }
386
387 virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const {}
388 virtual void setOCLKernelStubCallingConvention(const FunctionType *&FT) const;
389 /// Return the device-side type for the CUDA device builtin surface type.
390 virtual llvm::Type *getCUDADeviceBuiltinSurfaceDeviceType() const {
391 // By default, no change from the original one.
392 return nullptr;
393 }
394 /// Return the device-side type for the CUDA device builtin texture type.
395 virtual llvm::Type *getCUDADeviceBuiltinTextureDeviceType() const {
396 // By default, no change from the original one.
397 return nullptr;
398 }
399
400 /// Return the WebAssembly externref reference type.
401 virtual llvm::Type *getWasmExternrefReferenceType() const { return nullptr; }
402
403 /// Return the WebAssembly funcref reference type.
404 virtual llvm::Type *getWasmFuncrefReferenceType() const { return nullptr; }
405
406 /// Emit the device-side copy of the builtin surface type.
407 virtual bool emitCUDADeviceBuiltinSurfaceDeviceCopy(CodeGenFunction &CGF,
408 LValue Dst,
409 LValue Src) const {
410 // DO NOTHING by default.
411 return false;
412 }
413 /// Emit the device-side copy of the builtin texture type.
414 virtual bool emitCUDADeviceBuiltinTextureDeviceCopy(CodeGenFunction &CGF,
415 LValue Dst,
416 LValue Src) const {
417 // DO NOTHING by default.
418 return false;
419 }
420
421 /// Return an LLVM type that corresponds to an OpenCL type.
422 virtual llvm::Type *getOpenCLType(CodeGenModule &CGM, const Type *T) const {
423 return nullptr;
424 }
425
426 /// Return an LLVM type that corresponds to a HLSL type
427 virtual llvm::Type *getHLSLType(CodeGenModule &CGM, const Type *T,
428 const CGHLSLOffsetInfo &OffsetInfo) const {
429 return nullptr;
430 }
431
432 /// Return an LLVM type that corresponds to padding in HLSL types
433 virtual llvm::Type *getHLSLPadding(CodeGenModule &CGM,
434 CharUnits NumBytes) const {
435 return nullptr;
436 }
437
438 /// Return true if this is an HLSL padding type.
439 virtual bool isHLSLPadding(llvm::Type *Ty) const { return false; }
440
441 // Set the Branch Protection Attributes of the Function accordingly to the
442 // BPI. Remove attributes that contradict with current BPI.
443 static void
444 setBranchProtectionFnAttributes(const TargetInfo::BranchProtectionInfo &BPI,
445 llvm::Function &F);
446
447 // Add the Branch Protection Attributes of the FuncAttrs.
448 static void
449 initBranchProtectionFnAttributes(const TargetInfo::BranchProtectionInfo &BPI,
450 llvm::AttrBuilder &FuncAttrs);
451
452 // Set the ptrauth-* attributes of the Function accordingly to the Opts.
453 // Remove attributes that contradict with current Opts.
454 static void setPointerAuthFnAttributes(const PointerAuthOptions &Opts,
455 llvm::Function &F);
456
457 // Add the ptrauth-* Attributes to the FuncAttrs.
458 static void initPointerAuthFnAttributes(const PointerAuthOptions &Opts,
459 llvm::AttrBuilder &FuncAttrs);
460
461protected:
462 static std::string qualifyWindowsLibrary(StringRef Lib);
463
464 void addStackProbeTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
465 CodeGen::CodeGenModule &CGM) const;
466};
467
468std::unique_ptr<TargetCodeGenInfo>
469createDefaultTargetCodeGenInfo(CodeGenModule &CGM);
470
471enum class AArch64ABIKind {
472 AAPCS = 0,
473 DarwinPCS,
474 Win64,
475 AAPCSSoft,
476};
477
478std::unique_ptr<TargetCodeGenInfo>
479createAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind Kind);
480
481std::unique_ptr<TargetCodeGenInfo>
482createWindowsAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind K);
483
484std::unique_ptr<TargetCodeGenInfo>
485createAMDGPUTargetCodeGenInfo(CodeGenModule &CGM);
486
487std::unique_ptr<TargetCodeGenInfo>
488createARCTargetCodeGenInfo(CodeGenModule &CGM);
489
490enum class ARMABIKind {
491 APCS = 0,
492 AAPCS = 1,
493 AAPCS_VFP = 2,
494 AAPCS16_VFP = 3,
495};
496
497std::unique_ptr<TargetCodeGenInfo>
498createARMTargetCodeGenInfo(CodeGenModule &CGM, ARMABIKind Kind);
499
500std::unique_ptr<TargetCodeGenInfo>
501createWindowsARMTargetCodeGenInfo(CodeGenModule &CGM, ARMABIKind K);
502
503std::unique_ptr<TargetCodeGenInfo>
504createAVRTargetCodeGenInfo(CodeGenModule &CGM, unsigned NPR, unsigned NRR);
505
506std::unique_ptr<TargetCodeGenInfo>
507createBPFTargetCodeGenInfo(CodeGenModule &CGM);
508
509std::unique_ptr<TargetCodeGenInfo>
510createCSKYTargetCodeGenInfo(CodeGenModule &CGM, unsigned FLen);
511
512std::unique_ptr<TargetCodeGenInfo>
513createHexagonTargetCodeGenInfo(CodeGenModule &CGM);
514
515std::unique_ptr<TargetCodeGenInfo>
516createLanaiTargetCodeGenInfo(CodeGenModule &CGM);
517
518std::unique_ptr<TargetCodeGenInfo>
519createLoongArchTargetCodeGenInfo(CodeGenModule &CGM, unsigned GRLen,
520 unsigned FLen);
521
522std::unique_ptr<TargetCodeGenInfo>
523createM68kTargetCodeGenInfo(CodeGenModule &CGM);
524
525std::unique_ptr<TargetCodeGenInfo>
526createMIPSTargetCodeGenInfo(CodeGenModule &CGM, bool IsOS32);
527
528std::unique_ptr<TargetCodeGenInfo>
529createWindowsMIPSTargetCodeGenInfo(CodeGenModule &CGM, bool IsOS32);
530
531std::unique_ptr<TargetCodeGenInfo>
532createMSP430TargetCodeGenInfo(CodeGenModule &CGM);
533
534std::unique_ptr<TargetCodeGenInfo>
535createNVPTXTargetCodeGenInfo(CodeGenModule &CGM);
536
537enum class PPC64_SVR4_ABIKind {
538 ELFv1 = 0,
539 ELFv2,
540};
541
542std::unique_ptr<TargetCodeGenInfo>
543createAIXTargetCodeGenInfo(CodeGenModule &CGM, bool Is64Bit);
544
545std::unique_ptr<TargetCodeGenInfo>
546createPPC32TargetCodeGenInfo(CodeGenModule &CGM, bool SoftFloatABI);
547
548std::unique_ptr<TargetCodeGenInfo>
549createPPC64TargetCodeGenInfo(CodeGenModule &CGM);
550
551std::unique_ptr<TargetCodeGenInfo>
552createPPC64_SVR4_TargetCodeGenInfo(CodeGenModule &CGM, PPC64_SVR4_ABIKind Kind,
553 bool SoftFloatABI);
554
555std::unique_ptr<TargetCodeGenInfo>
556createRISCVTargetCodeGenInfo(CodeGenModule &CGM, unsigned XLen, unsigned FLen,
557 bool EABI);
558
559std::unique_ptr<TargetCodeGenInfo>
560createCommonSPIRTargetCodeGenInfo(CodeGenModule &CGM);
561
562std::unique_ptr<TargetCodeGenInfo>
563createSPIRVTargetCodeGenInfo(CodeGenModule &CGM);
564
565std::unique_ptr<TargetCodeGenInfo>
566createSparcV8TargetCodeGenInfo(CodeGenModule &CGM);
567
568std::unique_ptr<TargetCodeGenInfo>
569createSparcV9TargetCodeGenInfo(CodeGenModule &CGM);
570
571std::unique_ptr<TargetCodeGenInfo>
572createSystemZTargetCodeGenInfo(CodeGenModule &CGM, bool HasVector,
573 bool SoftFloatABI);
574
575std::unique_ptr<TargetCodeGenInfo>
576createTCETargetCodeGenInfo(CodeGenModule &CGM);
577
578std::unique_ptr<TargetCodeGenInfo>
579createVETargetCodeGenInfo(CodeGenModule &CGM);
580
581std::unique_ptr<TargetCodeGenInfo>
582createDirectXTargetCodeGenInfo(CodeGenModule &CGM);
583
584enum class WebAssemblyABIKind {
585 MVP = 0,
586 ExperimentalMV = 1,
587};
588
589std::unique_ptr<TargetCodeGenInfo>
590createWebAssemblyTargetCodeGenInfo(CodeGenModule &CGM, WebAssemblyABIKind K);
591
592/// The AVX ABI level for X86 targets.
593enum class X86AVXABILevel {
594 None,
595 AVX,
596 AVX512,
597};
598
599std::unique_ptr<TargetCodeGenInfo> createX86_32TargetCodeGenInfo(
600 CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI,
601 unsigned NumRegisterParameters, bool SoftFloatABI);
602
603std::unique_ptr<TargetCodeGenInfo>
604createWinX86_32TargetCodeGenInfo(CodeGenModule &CGM, bool DarwinVectorABI,
605 bool Win32StructABI,
606 unsigned NumRegisterParameters);
607
608std::unique_ptr<TargetCodeGenInfo>
609createX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel);
610
611std::unique_ptr<TargetCodeGenInfo>
612createWinX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel);
613
614std::unique_ptr<TargetCodeGenInfo>
615createXCoreTargetCodeGenInfo(CodeGenModule &CGM);
616
617} // namespace CodeGen
618} // namespace clang
619
620#endif // LLVM_CLANG_LIB_CODEGEN_TARGETINFO_H
621