1//===---- TargetInfo.h - Encapsulate target details -------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// These classes wrap the information about a call or function
10// definition used to handle ABI compliancy.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_CLANG_LIB_CODEGEN_TARGETINFO_H
15#define LLVM_CLANG_LIB_CODEGEN_TARGETINFO_H
16
17#include "CGBuilder.h"
18#include "CGValue.h"
19#include "CodeGenModule.h"
20#include "clang/AST/Type.h"
21#include "clang/Basic/LLVM.h"
22#include "clang/Basic/SyncScope.h"
23#include "clang/Basic/TargetInfo.h"
24#include "llvm/ADT/SmallString.h"
25#include "llvm/ADT/StringRef.h"
26
27namespace llvm {
28class Constant;
29class GlobalValue;
30class Type;
31class Value;
32}
33
34namespace clang {
35class Decl;
36
37namespace CodeGen {
38class ABIInfo;
39class CallArgList;
40class CodeGenFunction;
41class CGBlockInfo;
42class SwiftABIInfo;
43
44/// TargetCodeGenInfo - This class organizes various target-specific
45/// codegeneration issues, like target-specific attributes, builtins and so
46/// on.
47class TargetCodeGenInfo {
48 std::unique_ptr<ABIInfo> Info;
49
50protected:
51 // Target hooks supporting Swift calling conventions. The target must
52 // initialize this field if it claims to support these calling conventions
53 // by returning true from TargetInfo::checkCallingConvention for them.
54 std::unique_ptr<SwiftABIInfo> SwiftInfo;
55
56 // Returns ABI info helper for the target. This is for use by derived classes.
57 template <typename T> const T &getABIInfo() const {
58 return static_cast<const T &>(*Info);
59 }
60
61public:
62 TargetCodeGenInfo(std::unique_ptr<ABIInfo> Info);
63 virtual ~TargetCodeGenInfo();
64
65 /// getABIInfo() - Returns ABI info helper for the target.
66 const ABIInfo &getABIInfo() const { return *Info; }
67
68 /// Returns Swift ABI info helper for the target.
69 const SwiftABIInfo &getSwiftABIInfo() const {
70 assert(SwiftInfo && "Swift ABI info has not been initialized");
71 return *SwiftInfo;
72 }
73
74 /// supportsLibCall - Query to whether or not target supports all
75 /// lib calls.
76 virtual bool supportsLibCall() const { return true; }
77
78 /// setTargetAttributes - Provides a convenient hook to handle extra
79 /// target-specific attributes for the given global.
80 virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
81 CodeGen::CodeGenModule &M) const {}
82
83 /// emitTargetMetadata - Provides a convenient hook to handle extra
84 /// target-specific metadata for the given globals.
85 virtual void emitTargetMetadata(
86 CodeGen::CodeGenModule &CGM,
87 const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames) const {}
88
89 /// Provides a convenient hook to handle extra target-specific globals.
90 virtual void emitTargetGlobals(CodeGen::CodeGenModule &CGM) const {}
91
92 /// Any further codegen related checks that need to be done on a function
93 /// signature in a target specific manner.
94 virtual void checkFunctionABI(CodeGenModule &CGM,
95 const FunctionDecl *Decl) const {}
96
97 /// Any further codegen related checks that need to be done on a function call
98 /// in a target specific manner.
99 virtual void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
100 const FunctionDecl *Caller,
101 const FunctionDecl *Callee,
102 const CallArgList &Args,
103 QualType ReturnType) const {}
104
105 /// Returns true if inlining the function call would produce incorrect code
106 /// for the current target and should be ignored (even with the always_inline
107 /// or flatten attributes).
108 ///
109 /// Note: This probably should be handled in LLVM. However, the LLVM
110 /// `alwaysinline` attribute currently means the inliner will ignore
111 /// mismatched attributes (which sometimes can generate invalid code). So,
112 /// this hook allows targets to avoid adding the LLVM `alwaysinline` attribute
113 /// based on C/C++ attributes or other target-specific reasons.
114 ///
115 /// See previous discussion here:
116 /// https://discourse.llvm.org/t/rfc-avoid-inlining-alwaysinline-functions-when-they-cannot-be-inlined/79528
117 virtual bool
118 wouldInliningViolateFunctionCallABI(const FunctionDecl *Caller,
119 const FunctionDecl *Callee) const {
120 return false;
121 }
122
123 /// Determines the size of struct _Unwind_Exception on this platform,
124 /// in 8-bit units. The Itanium ABI defines this as:
125 /// struct _Unwind_Exception {
126 /// uint64 exception_class;
127 /// _Unwind_Exception_Cleanup_Fn exception_cleanup;
128 /// uint64 private_1;
129 /// uint64 private_2;
130 /// };
131 virtual unsigned getSizeOfUnwindException() const;
132
133 /// Controls whether __builtin_extend_pointer should sign-extend
134 /// pointers to uint64_t or zero-extend them (the default). Has
135 /// no effect for targets:
136 /// - that have 64-bit pointers, or
137 /// - that cannot address through registers larger than pointers, or
138 /// - that implicitly ignore/truncate the top bits when addressing
139 /// through such registers.
140 virtual bool extendPointerWithSExt() const { return false; }
141
142 /// Determines the DWARF register number for the stack pointer, for
143 /// exception-handling purposes. Implements __builtin_dwarf_sp_column.
144 ///
145 /// Returns -1 if the operation is unsupported by this target.
146 virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
147 return -1;
148 }
149
150 /// Initializes the given DWARF EH register-size table, a char*.
151 /// Implements __builtin_init_dwarf_reg_size_table.
152 ///
153 /// Returns true if the operation is unsupported by this target.
154 virtual bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
155 llvm::Value *Address) const {
156 return true;
157 }
158
159 /// Performs the code-generation required to convert a return
160 /// address as stored by the system into the actual address of the
161 /// next instruction that will be executed.
162 ///
163 /// Used by __builtin_extract_return_addr().
164 virtual llvm::Value *decodeReturnAddress(CodeGen::CodeGenFunction &CGF,
165 llvm::Value *Address) const {
166 return Address;
167 }
168
169 /// Performs the code-generation required to convert the address
170 /// of an instruction into a return address suitable for storage
171 /// by the system in a return slot.
172 ///
173 /// Used by __builtin_frob_return_addr().
174 virtual llvm::Value *encodeReturnAddress(CodeGen::CodeGenFunction &CGF,
175 llvm::Value *Address) const {
176 return Address;
177 }
178
179 /// Performs a target specific test of a floating point value for things
180 /// like IsNaN, Infinity, ... Nullptr is returned if no implementation
181 /// exists.
182 virtual llvm::Value *
183 testFPKind(llvm::Value *V, unsigned BuiltinID, CGBuilderTy &Builder,
184 CodeGenModule &CGM) const {
185 assert(V->getType()->isFloatingPointTy() && "V should have an FP type.");
186 return nullptr;
187 }
188
189 /// Corrects the low-level LLVM type for a given constraint and "usual"
190 /// type.
191 ///
192 /// \returns A pointer to a new LLVM type, possibly the same as the original
193 /// on success; 0 on failure.
194 virtual llvm::Type *adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
195 StringRef Constraint,
196 llvm::Type *Ty) const {
197 return Ty;
198 }
199
200 /// Target hook to decide whether an inline asm operand can be passed
201 /// by value.
202 virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF,
203 llvm::Type *Ty) const {
204 return false;
205 }
206
207 /// Adds constraints and types for result registers.
208 virtual void addReturnRegisterOutputs(
209 CodeGen::CodeGenFunction &CGF, CodeGen::LValue ReturnValue,
210 std::string &Constraints, std::vector<llvm::Type *> &ResultRegTypes,
211 std::vector<llvm::Type *> &ResultTruncRegTypes,
212 std::vector<CodeGen::LValue> &ResultRegDests, std::string &AsmString,
213 unsigned NumOutputs) const {}
214
215 /// doesReturnSlotInterfereWithArgs - Return true if the target uses an
216 /// argument slot for an 'sret' type.
217 virtual bool doesReturnSlotInterfereWithArgs() const { return true; }
218
219 /// Retrieve the address of a function to call immediately before
220 /// calling objc_retainAutoreleasedReturnValue. The
221 /// implementation of objc_autoreleaseReturnValue sniffs the
222 /// instruction stream following its return address to decide
223 /// whether it's a call to objc_retainAutoreleasedReturnValue.
224 /// This can be prohibitively expensive, depending on the
225 /// relocation model, and so on some targets it instead sniffs for
226 /// a particular instruction sequence. This functions returns
227 /// that instruction sequence in inline assembly, which will be
228 /// empty if none is required.
229 virtual StringRef getARCRetainAutoreleasedReturnValueMarker() const {
230 return "";
231 }
232
233 /// Determine whether a call to objc_retainAutoreleasedReturnValue or
234 /// objc_unsafeClaimAutoreleasedReturnValue should be marked as 'notail'.
235 virtual bool markARCOptimizedReturnCallsAsNoTail() const { return false; }
236
237 /// Return a constant used by UBSan as a signature to identify functions
238 /// possessing type information, or 0 if the platform is unsupported.
239 /// This magic number is invalid instruction encoding in many targets.
240 virtual llvm::Constant *
241 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const {
242 return llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: 0xc105cafe);
243 }
244
245 /// Determine whether a call to an unprototyped functions under
246 /// the given calling convention should use the variadic
247 /// convention or the non-variadic convention.
248 ///
249 /// There's a good reason to make a platform's variadic calling
250 /// convention be different from its non-variadic calling
251 /// convention: the non-variadic arguments can be passed in
252 /// registers (better for performance), and the variadic arguments
253 /// can be passed on the stack (also better for performance). If
254 /// this is done, however, unprototyped functions *must* use the
255 /// non-variadic convention, because C99 states that a call
256 /// through an unprototyped function type must succeed if the
257 /// function was defined with a non-variadic prototype with
258 /// compatible parameters. Therefore, splitting the conventions
259 /// makes it impossible to call a variadic function through an
260 /// unprototyped type. Since function prototypes came out in the
261 /// late 1970s, this is probably an acceptable trade-off.
262 /// Nonetheless, not all platforms are willing to make it, and in
263 /// particularly x86-64 bends over backwards to make the
264 /// conventions compatible.
265 ///
266 /// The default is false. This is correct whenever:
267 /// - the conventions are exactly the same, because it does not
268 /// matter and the resulting IR will be somewhat prettier in
269 /// certain cases; or
270 /// - the conventions are substantively different in how they pass
271 /// arguments, because in this case using the variadic convention
272 /// will lead to C99 violations.
273 ///
274 /// However, some platforms make the conventions identical except
275 /// for passing additional out-of-band information to a variadic
276 /// function: for example, x86-64 passes the number of SSE
277 /// arguments in %al. On these platforms, it is desirable to
278 /// call unprototyped functions using the variadic convention so
279 /// that unprototyped calls to varargs functions still succeed.
280 ///
281 /// Relatedly, platforms which pass the fixed arguments to this:
282 /// A foo(B, C, D);
283 /// differently than they would pass them to this:
284 /// A foo(B, C, D, ...);
285 /// may need to adjust the debugger-support code in Sema to do the
286 /// right thing when calling a function with no know signature.
287 virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args,
288 const FunctionNoProtoType *fnType) const;
289
290 /// Gets the linker options necessary to link a dependent library on this
291 /// platform.
292 virtual void getDependentLibraryOption(llvm::StringRef Lib,
293 llvm::SmallString<24> &Opt) const;
294
295 /// Gets the linker options necessary to detect object file mismatches on
296 /// this platform.
297 virtual void getDetectMismatchOption(llvm::StringRef Name,
298 llvm::StringRef Value,
299 llvm::SmallString<32> &Opt) const {}
300
301 /// Get LLVM calling convention for device kernels.
302 virtual unsigned getDeviceKernelCallingConv() const;
303
304 /// Get target specific null pointer.
305 /// \param T is the LLVM type of the null pointer.
306 /// \param QT is the clang QualType of the null pointer.
307 /// \return ConstantPointerNull with the given type \p T.
308 /// Each target can override it to return its own desired constant value.
309 virtual llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
310 llvm::PointerType *T, QualType QT) const;
311
312 /// Get target favored AST address space of a global variable for languages
313 /// other than OpenCL and CUDA.
314 /// If \p D is nullptr, returns the default target favored address space
315 /// for global variable.
316 virtual LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
317 const VarDecl *D) const;
318
319 /// Get the AST address space for alloca.
320 virtual LangAS getASTAllocaAddressSpace() const { return LangAS::Default; }
321
322 Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr,
323 LangAS SrcAddr, llvm::Type *DestTy,
324 bool IsNonNull = false) const;
325
326 /// Perform address space cast of an expression of pointer type.
327 /// \param V is the LLVM value to be casted to another address space.
328 /// \param SrcAddr is the language address space of \p V.
329 /// \param DestAddr is the targeted language address space.
330 /// \param DestTy is the destination LLVM pointer type.
331 /// \param IsNonNull is the flag indicating \p V is known to be non null.
332 virtual llvm::Value *performAddrSpaceCast(CodeGen::CodeGenFunction &CGF,
333 llvm::Value *V, LangAS SrcAddr,
334 llvm::Type *DestTy,
335 bool IsNonNull = false) const;
336
337 /// Perform address space cast of a constant expression of pointer type.
338 /// \param V is the LLVM constant to be casted to another address space.
339 /// \param SrcAddr is the language address space of \p V.
340 /// \param DestAddr is the targeted language address space.
341 /// \param DestTy is the destination LLVM pointer type.
342 virtual llvm::Constant *performAddrSpaceCast(CodeGenModule &CGM,
343 llvm::Constant *V,
344 LangAS SrcAddr,
345 llvm::Type *DestTy) const;
346
347 /// Get address space of pointer parameter for __cxa_atexit.
348 virtual LangAS getAddrSpaceOfCxaAtexitPtrParam() const {
349 return LangAS::Default;
350 }
351
352 /// Get the syncscope used in LLVM IR.
353 virtual llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
354 SyncScope Scope,
355 llvm::AtomicOrdering Ordering,
356 llvm::LLVMContext &Ctx) const;
357
358 /// Allow the target to apply other metadata to an atomic instruction
359 virtual void setTargetAtomicMetadata(CodeGenFunction &CGF,
360 llvm::Instruction &AtomicInst,
361 const AtomicExpr *Expr = nullptr) const {
362 }
363
364 /// Interface class for filling custom fields of a block literal for OpenCL.
365 class TargetOpenCLBlockHelper {
366 public:
367 typedef std::pair<llvm::Value *, StringRef> ValueTy;
368 TargetOpenCLBlockHelper() {}
369 virtual ~TargetOpenCLBlockHelper() {}
370 /// Get the custom field types for OpenCL blocks.
371 virtual llvm::SmallVector<llvm::Type *, 1> getCustomFieldTypes() = 0;
372 /// Get the custom field values for OpenCL blocks.
373 virtual llvm::SmallVector<ValueTy, 1>
374 getCustomFieldValues(CodeGenFunction &CGF, const CGBlockInfo &Info) = 0;
375 virtual bool areAllCustomFieldValuesConstant(const CGBlockInfo &Info) = 0;
376 /// Get the custom field values for OpenCL blocks if all values are LLVM
377 /// constants.
378 virtual llvm::SmallVector<llvm::Constant *, 1>
379 getCustomFieldValues(CodeGenModule &CGM, const CGBlockInfo &Info) = 0;
380 };
381 virtual TargetOpenCLBlockHelper *getTargetOpenCLBlockHelper() const {
382 return nullptr;
383 }
384
385 /// Create an OpenCL kernel for an enqueued block. The kernel function is
386 /// a wrapper for the block invoke function with target-specific calling
387 /// convention and ABI as an OpenCL kernel. The wrapper function accepts
388 /// block context and block arguments in target-specific way and calls
389 /// the original block invoke function.
390 virtual llvm::Value *
391 createEnqueuedBlockKernel(CodeGenFunction &CGF,
392 llvm::Function *BlockInvokeFunc,
393 llvm::Type *BlockTy) const;
394
395 /// \return true if the target supports alias from the unmangled name to the
396 /// mangled name of functions declared within an extern "C" region and marked
397 /// as 'used', and having internal linkage.
398 virtual bool shouldEmitStaticExternCAliases() const { return true; }
399
400 /// \return true if annonymous zero-sized bitfields should be emitted to
401 /// correctly distinguish between struct types whose memory layout is the
402 /// same, but whose layout may differ when used as argument passed by value
403 virtual bool shouldEmitDWARFBitFieldSeparators() const { return false; }
404
405 virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const {}
406 virtual void setOCLKernelStubCallingConvention(const FunctionType *&FT) const;
407 /// Return the device-side type for the CUDA device builtin surface type.
408 virtual llvm::Type *getCUDADeviceBuiltinSurfaceDeviceType() const {
409 // By default, no change from the original one.
410 return nullptr;
411 }
412 /// Return the device-side type for the CUDA device builtin texture type.
413 virtual llvm::Type *getCUDADeviceBuiltinTextureDeviceType() const {
414 // By default, no change from the original one.
415 return nullptr;
416 }
417
418 /// Return the WebAssembly externref reference type.
419 virtual llvm::Type *getWasmExternrefReferenceType() const { return nullptr; }
420
421 /// Return the WebAssembly funcref reference type.
422 virtual llvm::Type *getWasmFuncrefReferenceType() const { return nullptr; }
423
424 /// Emit the device-side copy of the builtin surface type.
425 virtual bool emitCUDADeviceBuiltinSurfaceDeviceCopy(CodeGenFunction &CGF,
426 LValue Dst,
427 LValue Src) const {
428 // DO NOTHING by default.
429 return false;
430 }
431 /// Emit the device-side copy of the builtin texture type.
432 virtual bool emitCUDADeviceBuiltinTextureDeviceCopy(CodeGenFunction &CGF,
433 LValue Dst,
434 LValue Src) const {
435 // DO NOTHING by default.
436 return false;
437 }
438
439 /// Return an LLVM type that corresponds to an OpenCL type.
440 virtual llvm::Type *getOpenCLType(CodeGenModule &CGM, const Type *T) const {
441 return nullptr;
442 }
443
444 /// Return an LLVM type that corresponds to a HLSL type
445 virtual llvm::Type *
446 getHLSLType(CodeGenModule &CGM, const Type *T,
447 const SmallVector<int32_t> *Packoffsets = nullptr) const {
448 return nullptr;
449 }
450
451 // Set the Branch Protection Attributes of the Function accordingly to the
452 // BPI. Remove attributes that contradict with current BPI.
453 static void
454 setBranchProtectionFnAttributes(const TargetInfo::BranchProtectionInfo &BPI,
455 llvm::Function &F);
456
457 // Add the Branch Protection Attributes of the FuncAttrs.
458 static void
459 initBranchProtectionFnAttributes(const TargetInfo::BranchProtectionInfo &BPI,
460 llvm::AttrBuilder &FuncAttrs);
461
462 // Set the ptrauth-* attributes of the Function accordingly to the Opts.
463 // Remove attributes that contradict with current Opts.
464 static void setPointerAuthFnAttributes(const PointerAuthOptions &Opts,
465 llvm::Function &F);
466
467 // Add the ptrauth-* Attributes to the FuncAttrs.
468 static void initPointerAuthFnAttributes(const PointerAuthOptions &Opts,
469 llvm::AttrBuilder &FuncAttrs);
470
471protected:
472 static std::string qualifyWindowsLibrary(StringRef Lib);
473
474 void addStackProbeTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
475 CodeGen::CodeGenModule &CGM) const;
476};
477
478std::unique_ptr<TargetCodeGenInfo>
479createDefaultTargetCodeGenInfo(CodeGenModule &CGM);
480
481enum class AArch64ABIKind {
482 AAPCS = 0,
483 DarwinPCS,
484 Win64,
485 AAPCSSoft,
486 PAuthTest,
487};
488
489std::unique_ptr<TargetCodeGenInfo>
490createAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind Kind);
491
492std::unique_ptr<TargetCodeGenInfo>
493createWindowsAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind K);
494
495std::unique_ptr<TargetCodeGenInfo>
496createAMDGPUTargetCodeGenInfo(CodeGenModule &CGM);
497
498std::unique_ptr<TargetCodeGenInfo>
499createARCTargetCodeGenInfo(CodeGenModule &CGM);
500
501enum class ARMABIKind {
502 APCS = 0,
503 AAPCS = 1,
504 AAPCS_VFP = 2,
505 AAPCS16_VFP = 3,
506};
507
508std::unique_ptr<TargetCodeGenInfo>
509createARMTargetCodeGenInfo(CodeGenModule &CGM, ARMABIKind Kind);
510
511std::unique_ptr<TargetCodeGenInfo>
512createWindowsARMTargetCodeGenInfo(CodeGenModule &CGM, ARMABIKind K);
513
514std::unique_ptr<TargetCodeGenInfo>
515createAVRTargetCodeGenInfo(CodeGenModule &CGM, unsigned NPR, unsigned NRR);
516
517std::unique_ptr<TargetCodeGenInfo>
518createBPFTargetCodeGenInfo(CodeGenModule &CGM);
519
520std::unique_ptr<TargetCodeGenInfo>
521createCSKYTargetCodeGenInfo(CodeGenModule &CGM, unsigned FLen);
522
523std::unique_ptr<TargetCodeGenInfo>
524createHexagonTargetCodeGenInfo(CodeGenModule &CGM);
525
526std::unique_ptr<TargetCodeGenInfo>
527createLanaiTargetCodeGenInfo(CodeGenModule &CGM);
528
529std::unique_ptr<TargetCodeGenInfo>
530createLoongArchTargetCodeGenInfo(CodeGenModule &CGM, unsigned GRLen,
531 unsigned FLen);
532
533std::unique_ptr<TargetCodeGenInfo>
534createM68kTargetCodeGenInfo(CodeGenModule &CGM);
535
536std::unique_ptr<TargetCodeGenInfo>
537createMIPSTargetCodeGenInfo(CodeGenModule &CGM, bool IsOS32);
538
539std::unique_ptr<TargetCodeGenInfo>
540createWindowsMIPSTargetCodeGenInfo(CodeGenModule &CGM, bool IsOS32);
541
542std::unique_ptr<TargetCodeGenInfo>
543createMSP430TargetCodeGenInfo(CodeGenModule &CGM);
544
545std::unique_ptr<TargetCodeGenInfo>
546createNVPTXTargetCodeGenInfo(CodeGenModule &CGM);
547
548std::unique_ptr<TargetCodeGenInfo>
549createPNaClTargetCodeGenInfo(CodeGenModule &CGM);
550
551enum class PPC64_SVR4_ABIKind {
552 ELFv1 = 0,
553 ELFv2,
554};
555
556std::unique_ptr<TargetCodeGenInfo>
557createAIXTargetCodeGenInfo(CodeGenModule &CGM, bool Is64Bit);
558
559std::unique_ptr<TargetCodeGenInfo>
560createPPC32TargetCodeGenInfo(CodeGenModule &CGM, bool SoftFloatABI);
561
562std::unique_ptr<TargetCodeGenInfo>
563createPPC64TargetCodeGenInfo(CodeGenModule &CGM);
564
565std::unique_ptr<TargetCodeGenInfo>
566createPPC64_SVR4_TargetCodeGenInfo(CodeGenModule &CGM, PPC64_SVR4_ABIKind Kind,
567 bool SoftFloatABI);
568
569std::unique_ptr<TargetCodeGenInfo>
570createRISCVTargetCodeGenInfo(CodeGenModule &CGM, unsigned XLen, unsigned FLen,
571 bool EABI);
572
573std::unique_ptr<TargetCodeGenInfo>
574createCommonSPIRTargetCodeGenInfo(CodeGenModule &CGM);
575
576std::unique_ptr<TargetCodeGenInfo>
577createSPIRVTargetCodeGenInfo(CodeGenModule &CGM);
578
579std::unique_ptr<TargetCodeGenInfo>
580createSparcV8TargetCodeGenInfo(CodeGenModule &CGM);
581
582std::unique_ptr<TargetCodeGenInfo>
583createSparcV9TargetCodeGenInfo(CodeGenModule &CGM);
584
585std::unique_ptr<TargetCodeGenInfo>
586createSystemZTargetCodeGenInfo(CodeGenModule &CGM, bool HasVector,
587 bool SoftFloatABI);
588
589std::unique_ptr<TargetCodeGenInfo>
590createTCETargetCodeGenInfo(CodeGenModule &CGM);
591
592std::unique_ptr<TargetCodeGenInfo>
593createVETargetCodeGenInfo(CodeGenModule &CGM);
594
595std::unique_ptr<TargetCodeGenInfo>
596createDirectXTargetCodeGenInfo(CodeGenModule &CGM);
597
598enum class WebAssemblyABIKind {
599 MVP = 0,
600 ExperimentalMV = 1,
601};
602
603std::unique_ptr<TargetCodeGenInfo>
604createWebAssemblyTargetCodeGenInfo(CodeGenModule &CGM, WebAssemblyABIKind K);
605
606/// The AVX ABI level for X86 targets.
607enum class X86AVXABILevel {
608 None,
609 AVX,
610 AVX512,
611};
612
613std::unique_ptr<TargetCodeGenInfo> createX86_32TargetCodeGenInfo(
614 CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI,
615 unsigned NumRegisterParameters, bool SoftFloatABI);
616
617std::unique_ptr<TargetCodeGenInfo>
618createWinX86_32TargetCodeGenInfo(CodeGenModule &CGM, bool DarwinVectorABI,
619 bool Win32StructABI,
620 unsigned NumRegisterParameters);
621
622std::unique_ptr<TargetCodeGenInfo>
623createX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel);
624
625std::unique_ptr<TargetCodeGenInfo>
626createWinX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel);
627
628std::unique_ptr<TargetCodeGenInfo>
629createXCoreTargetCodeGenInfo(CodeGenModule &CGM);
630
631} // namespace CodeGen
632} // namespace clang
633
634#endif // LLVM_CLANG_LIB_CODEGEN_TARGETINFO_H
635