1//===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the code that handles AST -> LLVM type lowering.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CodeGenTypes.h"
14#include "CGCXXABI.h"
15#include "CGCall.h"
16#include "CGDebugInfo.h"
17#include "CGHLSLRuntime.h"
18#include "CGOpenCLRuntime.h"
19#include "CGRecordLayout.h"
20#include "TargetInfo.h"
21#include "clang/AST/ASTContext.h"
22#include "clang/AST/DeclCXX.h"
23#include "clang/AST/DeclObjC.h"
24#include "clang/AST/Expr.h"
25#include "clang/AST/RecordLayout.h"
26#include "clang/CodeGen/CGFunctionInfo.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/DerivedTypes.h"
29#include "llvm/IR/Module.h"
30
31using namespace clang;
32using namespace CodeGen;
33
34CodeGenTypes::CodeGenTypes(CodeGenModule &cgm)
35 : CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()),
36 Target(cgm.getTarget()) {
37 SkippedLayout = false;
38 LongDoubleReferenced = false;
39}
40
41CodeGenTypes::~CodeGenTypes() {
42 for (llvm::FoldingSet<CGFunctionInfo>::iterator
43 I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; )
44 delete &*I++;
45}
46
47CGCXXABI &CodeGenTypes::getCXXABI() const { return getCGM().getCXXABI(); }
48
49const CodeGenOptions &CodeGenTypes::getCodeGenOpts() const {
50 return CGM.getCodeGenOpts();
51}
52
53void CodeGenTypes::addRecordTypeName(const RecordDecl *RD,
54 llvm::StructType *Ty,
55 StringRef suffix) {
56 SmallString<256> TypeName;
57 llvm::raw_svector_ostream OS(TypeName);
58 OS << RD->getKindName() << '.';
59
60 // FIXME: We probably want to make more tweaks to the printing policy. For
61 // example, we should probably enable PrintCanonicalTypes and
62 // FullyQualifiedNames.
63 PrintingPolicy Policy = RD->getASTContext().getPrintingPolicy();
64 Policy.SuppressInlineNamespace =
65 llvm::to_underlying(E: PrintingPolicy::SuppressInlineNamespaceMode::None);
66
67 // Name the codegen type after the typedef name
68 // if there is no tag type name available
69 if (RD->getIdentifier()) {
70 // FIXME: We should not have to check for a null decl context here.
71 // Right now we do it because the implicit Obj-C decls don't have one.
72 if (RD->getDeclContext())
73 RD->printQualifiedName(OS, Policy);
74 else
75 RD->printName(OS, Policy);
76 } else if (const TypedefNameDecl *TDD = RD->getTypedefNameForAnonDecl()) {
77 // FIXME: We should not have to check for a null decl context here.
78 // Right now we do it because the implicit Obj-C decls don't have one.
79 if (TDD->getDeclContext())
80 TDD->printQualifiedName(OS, Policy);
81 else
82 TDD->printName(OS);
83 } else
84 OS << "anon";
85
86 if (!suffix.empty())
87 OS << suffix;
88
89 Ty->setName(OS.str());
90}
91
92/// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
93/// ConvertType in that it is used to convert to the memory representation for
94/// a type. For example, the scalar representation for _Bool is i1, but the
95/// memory representation is usually i8 or i32, depending on the target.
96///
97/// We generally assume that the alloc size of this type under the LLVM
98/// data layout is the same as the size of the AST type. The alignment
99/// does not have to match: Clang should always use explicit alignments
100/// and packed structs as necessary to produce the layout it needs.
101/// But the size does need to be exactly right or else things like struct
102/// layout will break.
103llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) {
104 if (T->isConstantMatrixType()) {
105 const Type *Ty = Context.getCanonicalType(T).getTypePtr();
106 const ConstantMatrixType *MT = cast<ConstantMatrixType>(Val: Ty);
107 llvm::Type *IRElemTy = ConvertType(T: MT->getElementType());
108 if (Context.getLangOpts().HLSL && T->isConstantMatrixBoolType())
109 IRElemTy = ConvertTypeForMem(T: Context.BoolTy);
110 return llvm::ArrayType::get(ElementType: IRElemTy, NumElements: MT->getNumElementsFlattened());
111 }
112
113 llvm::Type *R = ConvertType(T);
114
115 // Check for the boolean vector case.
116 if (T->isExtVectorBoolType()) {
117 auto *FixedVT = cast<llvm::FixedVectorType>(Val: R);
118
119 if (Context.getLangOpts().HLSL) {
120 llvm::Type *IRElemTy = ConvertTypeForMem(T: Context.BoolTy);
121 return llvm::FixedVectorType::get(ElementType: IRElemTy, NumElts: FixedVT->getNumElements());
122 }
123
124 // Pad to at least one byte.
125 uint64_t BytePadded = std::max<uint64_t>(a: FixedVT->getNumElements(), b: 8);
126 return llvm::IntegerType::get(C&: FixedVT->getContext(), NumBits: BytePadded);
127 }
128
129 // If T is _Bool or a _BitInt type, ConvertType will produce an IR type
130 // with the exact semantic bit-width of the AST type; for example,
131 // _BitInt(17) will turn into i17. In memory, however, we need to store
132 // such values extended to their full storage size as decided by AST
133 // layout; this is an ABI requirement. Ideally, we would always use an
134 // integer type that's just the bit-size of the AST type; for example, if
135 // sizeof(_BitInt(17)) == 4, _BitInt(17) would turn into i32. That is what's
136 // returned by convertTypeForLoadStore. However, that type does not
137 // always satisfy the size requirement on memory representation types
138 // describe above. For example, a 32-bit platform might reasonably set
139 // sizeof(_BitInt(65)) == 12, but i96 is likely to have to have an alloc size
140 // of 16 bytes in the LLVM data layout. In these cases, we simply return
141 // a byte array of the appropriate size.
142 if (T->isBitIntType()) {
143 if (typeRequiresSplitIntoByteArray(ASTTy: T, LLVMTy: R))
144 return llvm::ArrayType::get(ElementType: CGM.Int8Ty,
145 NumElements: Context.getTypeSizeInChars(T).getQuantity());
146 return llvm::IntegerType::get(C&: getLLVMContext(),
147 NumBits: (unsigned)Context.getTypeSize(T));
148 }
149
150 if (R->isIntegerTy(Bitwidth: 1))
151 return llvm::IntegerType::get(C&: getLLVMContext(),
152 NumBits: (unsigned)Context.getTypeSize(T));
153
154 // Else, don't map it.
155 return R;
156}
157
158bool CodeGenTypes::typeRequiresSplitIntoByteArray(QualType ASTTy,
159 llvm::Type *LLVMTy) {
160 if (!LLVMTy)
161 LLVMTy = ConvertType(T: ASTTy);
162
163 CharUnits ASTSize = Context.getTypeSizeInChars(T: ASTTy);
164 CharUnits LLVMSize =
165 CharUnits::fromQuantity(Quantity: getDataLayout().getTypeAllocSize(Ty: LLVMTy));
166 return ASTSize != LLVMSize;
167}
168
169llvm::Type *CodeGenTypes::convertTypeForLoadStore(QualType T,
170 llvm::Type *LLVMTy) {
171 if (!LLVMTy)
172 LLVMTy = ConvertType(T);
173
174 if (T->isBitIntType())
175 return llvm::Type::getIntNTy(
176 C&: getLLVMContext(), N: Context.getTypeSizeInChars(T).getQuantity() * 8);
177
178 if (LLVMTy->isIntegerTy(Bitwidth: 1))
179 return llvm::IntegerType::get(C&: getLLVMContext(),
180 NumBits: (unsigned)Context.getTypeSize(T));
181
182 if (T->isConstantMatrixBoolType()) {
183 // Matrices are loaded and stored atomically as vectors. Therefore we
184 // construct a FixedVectorType here instead of returning
185 // ConvertTypeForMem(T) which would return an ArrayType instead.
186 const Type *Ty = Context.getCanonicalType(T).getTypePtr();
187 const ConstantMatrixType *MT = cast<ConstantMatrixType>(Val: Ty);
188 llvm::Type *IRElemTy = ConvertTypeForMem(T: MT->getElementType());
189 return llvm::FixedVectorType::get(ElementType: IRElemTy, NumElts: MT->getNumElementsFlattened());
190 }
191
192 if (T->isExtVectorBoolType())
193 return ConvertTypeForMem(T);
194
195 return LLVMTy;
196}
197
198/// isRecordLayoutComplete - Return true if the specified type is already
199/// completely laid out.
200bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const {
201 llvm::DenseMap<const Type*, llvm::StructType *>::const_iterator I =
202 RecordDeclTypes.find(Val: Ty);
203 return I != RecordDeclTypes.end() && !I->second->isOpaque();
204}
205
206/// isFuncParamTypeConvertible - Return true if the specified type in a
207/// function parameter or result position can be converted to an IR type at this
208/// point. This boils down to being whether it is complete.
209bool CodeGenTypes::isFuncParamTypeConvertible(QualType Ty) {
210 // Some ABIs cannot have their member pointers represented in IR unless
211 // certain circumstances have been reached.
212 if (const auto *MPT = Ty->getAs<MemberPointerType>())
213 return getCXXABI().isMemberPointerConvertible(MPT);
214
215 // If this isn't a tagged type, we can convert it!
216 const TagType *TT = Ty->getAs<TagType>();
217 if (!TT) return true;
218
219 // Incomplete types cannot be converted.
220 return !TT->isIncompleteType();
221}
222
223
224/// Code to verify a given function type is complete, i.e. the return type
225/// and all of the parameter types are complete. Also check to see if we are in
226/// a RS_StructPointer context, and if so whether any struct types have been
227/// pended. If so, we don't want to ask the ABI lowering code to handle a type
228/// that cannot be converted to an IR type.
229bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) {
230 if (!isFuncParamTypeConvertible(Ty: FT->getReturnType()))
231 return false;
232
233 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(Val: FT))
234 for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++)
235 if (!isFuncParamTypeConvertible(Ty: FPT->getParamType(i)))
236 return false;
237
238 return true;
239}
240
241/// UpdateCompletedType - When we find the full definition for a TagDecl,
242/// replace the 'opaque' type we previously made for it if applicable.
243void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
244 CanQualType T = CGM.getContext().getCanonicalTagType(TD);
245 // If this is an enum being completed, then we flush all non-struct types from
246 // the cache. This allows function types and other things that may be derived
247 // from the enum to be recomputed.
248 if (const EnumDecl *ED = dyn_cast<EnumDecl>(Val: TD)) {
249 // Only flush the cache if we've actually already converted this type.
250 if (TypeCache.count(Val: T->getTypePtr())) {
251 // Okay, we formed some types based on this. We speculated that the enum
252 // would be lowered to i32, so we only need to flush the cache if this
253 // didn't happen.
254 if (!ConvertType(T: ED->getIntegerType())->isIntegerTy(Bitwidth: 32))
255 TypeCache.clear();
256 }
257 // If necessary, provide the full definition of a type only used with a
258 // declaration so far.
259 if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
260 DI->completeType(ED);
261 return;
262 }
263
264 // If we completed a RecordDecl that we previously used and converted to an
265 // anonymous type, then go ahead and complete it now.
266 const RecordDecl *RD = cast<RecordDecl>(Val: TD);
267 if (RD->isDependentType()) return;
268
269 // Only complete it if we converted it already. If we haven't converted it
270 // yet, we'll just do it lazily.
271 if (RecordDeclTypes.count(Val: T.getTypePtr()))
272 ConvertRecordDeclType(TD: RD);
273
274 // If necessary, provide the full definition of a type only used with a
275 // declaration so far.
276 if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
277 DI->completeType(RD);
278}
279
280void CodeGenTypes::RefreshTypeCacheForClass(const CXXRecordDecl *RD) {
281 CanQualType T = Context.getCanonicalTagType(TD: RD);
282 T = Context.getCanonicalType(T);
283
284 const Type *Ty = T.getTypePtr();
285 if (RecordsWithOpaqueMemberPointers.count(Val: Ty)) {
286 TypeCache.clear();
287 RecordsWithOpaqueMemberPointers.clear();
288 }
289}
290
291static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext,
292 const llvm::fltSemantics &format,
293 bool UseNativeHalf = false) {
294 if (&format == &llvm::APFloat::IEEEhalf()) {
295 if (UseNativeHalf)
296 return llvm::Type::getHalfTy(C&: VMContext);
297 else
298 return llvm::Type::getInt16Ty(C&: VMContext);
299 }
300 if (&format == &llvm::APFloat::BFloat())
301 return llvm::Type::getBFloatTy(C&: VMContext);
302 if (&format == &llvm::APFloat::IEEEsingle())
303 return llvm::Type::getFloatTy(C&: VMContext);
304 if (&format == &llvm::APFloat::IEEEdouble())
305 return llvm::Type::getDoubleTy(C&: VMContext);
306 if (&format == &llvm::APFloat::IEEEquad())
307 return llvm::Type::getFP128Ty(C&: VMContext);
308 if (&format == &llvm::APFloat::PPCDoubleDouble())
309 return llvm::Type::getPPC_FP128Ty(C&: VMContext);
310 if (&format == &llvm::APFloat::x87DoubleExtended())
311 return llvm::Type::getX86_FP80Ty(C&: VMContext);
312 llvm_unreachable("Unknown float format!");
313}
314
315llvm::Type *CodeGenTypes::ConvertFunctionTypeInternal(QualType QFT) {
316 assert(QFT.isCanonical());
317 const FunctionType *FT = cast<FunctionType>(Val: QFT.getTypePtr());
318 // First, check whether we can build the full function type. If the
319 // function type depends on an incomplete type (e.g. a struct or enum), we
320 // cannot lower the function type.
321 if (!isFuncTypeConvertible(FT)) {
322 // This function's type depends on an incomplete tag type.
323
324 // Force conversion of all the relevant record types, to make sure
325 // we re-convert the FunctionType when appropriate.
326 if (const auto *RD = FT->getReturnType()->getAsRecordDecl())
327 ConvertRecordDeclType(TD: RD);
328 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(Val: FT))
329 for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++)
330 if (const auto *RD = FPT->getParamType(i)->getAsRecordDecl())
331 ConvertRecordDeclType(TD: RD);
332
333 SkippedLayout = true;
334
335 // Return a placeholder type.
336 return llvm::StructType::get(Context&: getLLVMContext());
337 }
338
339 // The function type can be built; call the appropriate routines to
340 // build it.
341 const CGFunctionInfo *FI;
342 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(Val: FT)) {
343 FI = &arrangeFreeFunctionType(
344 Ty: CanQual<FunctionProtoType>::CreateUnsafe(Other: QualType(FPT, 0)));
345 } else {
346 const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(Val: FT);
347 FI = &arrangeFreeFunctionType(
348 Ty: CanQual<FunctionNoProtoType>::CreateUnsafe(Other: QualType(FNPT, 0)));
349 }
350
351 llvm::Type *ResultType = nullptr;
352 // If there is something higher level prodding our CGFunctionInfo, then
353 // don't recurse into it again.
354 if (FunctionsBeingProcessed.count(Ptr: FI)) {
355
356 ResultType = llvm::StructType::get(Context&: getLLVMContext());
357 SkippedLayout = true;
358 } else {
359
360 // Otherwise, we're good to go, go ahead and convert it.
361 ResultType = GetFunctionType(Info: *FI);
362 }
363
364 return ResultType;
365}
366
367/// ConvertType - Convert the specified type to its LLVM form.
368llvm::Type *CodeGenTypes::ConvertType(QualType T) {
369 T = Context.getCanonicalType(T);
370
371 const Type *Ty = T.getTypePtr();
372
373 // For the device-side compilation, CUDA device builtin surface/texture types
374 // may be represented in different types.
375 if (Context.getLangOpts().CUDAIsDevice) {
376 if (T->isCUDADeviceBuiltinSurfaceType()) {
377 if (auto *Ty = CGM.getTargetCodeGenInfo()
378 .getCUDADeviceBuiltinSurfaceDeviceType())
379 return Ty;
380 } else if (T->isCUDADeviceBuiltinTextureType()) {
381 if (auto *Ty = CGM.getTargetCodeGenInfo()
382 .getCUDADeviceBuiltinTextureDeviceType())
383 return Ty;
384 }
385 }
386
387 // RecordTypes are cached and processed specially.
388 if (const auto *RT = dyn_cast<RecordType>(Val: Ty))
389 return ConvertRecordDeclType(TD: RT->getDecl()->getDefinitionOrSelf());
390
391 llvm::Type *CachedType = nullptr;
392 auto TCI = TypeCache.find(Val: Ty);
393 if (TCI != TypeCache.end())
394 CachedType = TCI->second;
395 // With expensive checks, check that the type we compute matches the
396 // cached type.
397#ifndef EXPENSIVE_CHECKS
398 if (CachedType)
399 return CachedType;
400#endif
401
402 // If we don't have it in the cache, convert it now.
403 llvm::Type *ResultType = nullptr;
404 switch (Ty->getTypeClass()) {
405 case Type::Record: // Handled above.
406#define TYPE(Class, Base)
407#define ABSTRACT_TYPE(Class, Base)
408#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
409#define DEPENDENT_TYPE(Class, Base) case Type::Class:
410#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
411#include "clang/AST/TypeNodes.inc"
412 llvm_unreachable("Non-canonical or dependent types aren't possible.");
413
414 case Type::Builtin: {
415 switch (cast<BuiltinType>(Val: Ty)->getKind()) {
416 case BuiltinType::Void:
417 case BuiltinType::ObjCId:
418 case BuiltinType::ObjCClass:
419 case BuiltinType::ObjCSel:
420 // LLVM void type can only be used as the result of a function call. Just
421 // map to the same as char.
422 ResultType = llvm::Type::getInt8Ty(C&: getLLVMContext());
423 break;
424
425 case BuiltinType::Bool:
426 // Note that we always return bool as i1 for use as a scalar type.
427 ResultType = llvm::Type::getInt1Ty(C&: getLLVMContext());
428 break;
429
430 case BuiltinType::Char_S:
431 case BuiltinType::Char_U:
432 case BuiltinType::SChar:
433 case BuiltinType::UChar:
434 case BuiltinType::Short:
435 case BuiltinType::UShort:
436 case BuiltinType::Int:
437 case BuiltinType::UInt:
438 case BuiltinType::Long:
439 case BuiltinType::ULong:
440 case BuiltinType::LongLong:
441 case BuiltinType::ULongLong:
442 case BuiltinType::WChar_S:
443 case BuiltinType::WChar_U:
444 case BuiltinType::Char8:
445 case BuiltinType::Char16:
446 case BuiltinType::Char32:
447 case BuiltinType::ShortAccum:
448 case BuiltinType::Accum:
449 case BuiltinType::LongAccum:
450 case BuiltinType::UShortAccum:
451 case BuiltinType::UAccum:
452 case BuiltinType::ULongAccum:
453 case BuiltinType::ShortFract:
454 case BuiltinType::Fract:
455 case BuiltinType::LongFract:
456 case BuiltinType::UShortFract:
457 case BuiltinType::UFract:
458 case BuiltinType::ULongFract:
459 case BuiltinType::SatShortAccum:
460 case BuiltinType::SatAccum:
461 case BuiltinType::SatLongAccum:
462 case BuiltinType::SatUShortAccum:
463 case BuiltinType::SatUAccum:
464 case BuiltinType::SatULongAccum:
465 case BuiltinType::SatShortFract:
466 case BuiltinType::SatFract:
467 case BuiltinType::SatLongFract:
468 case BuiltinType::SatUShortFract:
469 case BuiltinType::SatUFract:
470 case BuiltinType::SatULongFract:
471 ResultType = llvm::IntegerType::get(C&: getLLVMContext(),
472 NumBits: static_cast<unsigned>(Context.getTypeSize(T)));
473 break;
474
475 case BuiltinType::Float16:
476 ResultType =
477 getTypeForFormat(VMContext&: getLLVMContext(), format: Context.getFloatTypeSemantics(T),
478 /* UseNativeHalf = */ true);
479 break;
480
481 case BuiltinType::Half:
482 // Half FP can either be storage-only (lowered to i16) or native.
483 ResultType = getTypeForFormat(
484 VMContext&: getLLVMContext(), format: Context.getFloatTypeSemantics(T),
485 UseNativeHalf: Context.getLangOpts().NativeHalfType ||
486 !Context.getTargetInfo().useFP16ConversionIntrinsics());
487 break;
488 case BuiltinType::LongDouble:
489 LongDoubleReferenced = true;
490 [[fallthrough]];
491 case BuiltinType::BFloat16:
492 case BuiltinType::Float:
493 case BuiltinType::Double:
494 case BuiltinType::Float128:
495 case BuiltinType::Ibm128:
496 ResultType = getTypeForFormat(VMContext&: getLLVMContext(),
497 format: Context.getFloatTypeSemantics(T),
498 /* UseNativeHalf = */ false);
499 break;
500
501 case BuiltinType::NullPtr:
502 // Model std::nullptr_t as i8*
503 ResultType = llvm::PointerType::getUnqual(C&: getLLVMContext());
504 break;
505
506 case BuiltinType::UInt128:
507 case BuiltinType::Int128:
508 ResultType = llvm::IntegerType::get(C&: getLLVMContext(), NumBits: 128);
509 break;
510
511#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
512 case BuiltinType::Id:
513#include "clang/Basic/OpenCLImageTypes.def"
514#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
515 case BuiltinType::Id:
516#include "clang/Basic/OpenCLExtensionTypes.def"
517 case BuiltinType::OCLSampler:
518 case BuiltinType::OCLEvent:
519 case BuiltinType::OCLClkEvent:
520 case BuiltinType::OCLQueue:
521 case BuiltinType::OCLReserveID:
522 ResultType = CGM.getOpenCLRuntime().convertOpenCLSpecificType(T: Ty);
523 break;
524#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId) \
525 case BuiltinType::Id:
526#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId) \
527 case BuiltinType::Id:
528#include "clang/Basic/AArch64ACLETypes.def"
529 {
530 ASTContext::BuiltinVectorTypeInfo Info =
531 Context.getBuiltinVectorTypeInfo(VecTy: cast<BuiltinType>(Val: Ty));
532 // The `__mfp8` type maps to `<1 x i8>` which can't be used to build
533 // a <N x i8> vector type, hence bypass the call to `ConvertType` for
534 // the element type and create the vector type directly.
535 auto *EltTy = Info.ElementType->isMFloat8Type()
536 ? llvm::Type::getInt8Ty(C&: getLLVMContext())
537 : ConvertType(T: Info.ElementType);
538 auto *VTy = llvm::VectorType::get(ElementType: EltTy, EC: Info.EC);
539 switch (Info.NumVectors) {
540 default:
541 llvm_unreachable("Expected 1, 2, 3 or 4 vectors!");
542 case 1:
543 return VTy;
544 case 2:
545 return llvm::StructType::get(elt1: VTy, elts: VTy);
546 case 3:
547 return llvm::StructType::get(elt1: VTy, elts: VTy, elts: VTy);
548 case 4:
549 return llvm::StructType::get(elt1: VTy, elts: VTy, elts: VTy, elts: VTy);
550 }
551 }
552 case BuiltinType::SveCount:
553 return llvm::TargetExtType::get(Context&: getLLVMContext(), Name: "aarch64.svcount");
554 case BuiltinType::MFloat8:
555 return llvm::VectorType::get(ElementType: llvm::Type::getInt8Ty(C&: getLLVMContext()), NumElements: 1,
556 Scalable: false);
557#define PPC_VECTOR_TYPE(Name, Id, Size) \
558 case BuiltinType::Id: \
559 ResultType = \
560 llvm::FixedVectorType::get(ConvertType(Context.BoolTy), Size); \
561 break;
562#include "clang/Basic/PPCTypes.def"
563#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
564#include "clang/Basic/RISCVVTypes.def"
565 {
566 ASTContext::BuiltinVectorTypeInfo Info =
567 Context.getBuiltinVectorTypeInfo(VecTy: cast<BuiltinType>(Val: Ty));
568 if (Info.NumVectors != 1) {
569 unsigned I8EltCount =
570 Info.EC.getKnownMinValue() *
571 ConvertType(T: Info.ElementType)->getScalarSizeInBits() / 8;
572 return llvm::TargetExtType::get(
573 Context&: getLLVMContext(), Name: "riscv.vector.tuple",
574 Types: llvm::ScalableVectorType::get(
575 ElementType: llvm::Type::getInt8Ty(C&: getLLVMContext()), MinNumElts: I8EltCount),
576 Ints: Info.NumVectors);
577 }
578 return llvm::ScalableVectorType::get(ElementType: ConvertType(T: Info.ElementType),
579 MinNumElts: Info.EC.getKnownMinValue());
580 }
581#define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \
582 case BuiltinType::Id: { \
583 if (BuiltinType::Id == BuiltinType::WasmExternRef) \
584 ResultType = CGM.getTargetCodeGenInfo().getWasmExternrefReferenceType(); \
585 else \
586 llvm_unreachable("Unexpected wasm reference builtin type!"); \
587 } break;
588#include "clang/Basic/WebAssemblyReferenceTypes.def"
589#define AMDGPU_OPAQUE_PTR_TYPE(Name, Id, SingletonId, Width, Align, AS) \
590 case BuiltinType::Id: \
591 return llvm::PointerType::get(getLLVMContext(), AS);
592#define AMDGPU_NAMED_BARRIER_TYPE(Name, Id, SingletonId, Width, Align, Scope) \
593 case BuiltinType::Id: \
594 return llvm::TargetExtType::get(getLLVMContext(), "amdgcn.named.barrier", \
595 {}, {Scope});
596#include "clang/Basic/AMDGPUTypes.def"
597#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
598#include "clang/Basic/HLSLIntangibleTypes.def"
599 ResultType = CGM.getHLSLRuntime().convertHLSLSpecificType(T: Ty);
600 break;
601 case BuiltinType::Dependent:
602#define BUILTIN_TYPE(Id, SingletonId)
603#define PLACEHOLDER_TYPE(Id, SingletonId) \
604 case BuiltinType::Id:
605#include "clang/AST/BuiltinTypes.def"
606 llvm_unreachable("Unexpected placeholder builtin type!");
607 }
608 break;
609 }
610 case Type::Auto:
611 case Type::DeducedTemplateSpecialization:
612 llvm_unreachable("Unexpected undeduced type!");
613 case Type::Complex: {
614 llvm::Type *EltTy = ConvertType(T: cast<ComplexType>(Val: Ty)->getElementType());
615 ResultType = llvm::StructType::get(elt1: EltTy, elts: EltTy);
616 break;
617 }
618 case Type::LValueReference:
619 case Type::RValueReference: {
620 const ReferenceType *RTy = cast<ReferenceType>(Val: Ty);
621 QualType ETy = RTy->getPointeeType();
622 unsigned AS = getTargetAddressSpace(T: ETy);
623 ResultType = llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: AS);
624 break;
625 }
626 case Type::Pointer: {
627 const PointerType *PTy = cast<PointerType>(Val: Ty);
628 QualType ETy = PTy->getPointeeType();
629 unsigned AS = getTargetAddressSpace(T: ETy);
630 ResultType = llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: AS);
631 break;
632 }
633
634 case Type::VariableArray: {
635 const VariableArrayType *A = cast<VariableArrayType>(Val: Ty);
636 assert(A->getIndexTypeCVRQualifiers() == 0 &&
637 "FIXME: We only handle trivial array types so far!");
638 // VLAs resolve to the innermost element type; this matches
639 // the return of alloca, and there isn't any obviously better choice.
640 ResultType = ConvertTypeForMem(T: A->getElementType());
641 break;
642 }
643 case Type::IncompleteArray: {
644 const IncompleteArrayType *A = cast<IncompleteArrayType>(Val: Ty);
645 assert(A->getIndexTypeCVRQualifiers() == 0 &&
646 "FIXME: We only handle trivial array types so far!");
647 // int X[] -> [0 x int], unless the element type is not sized. If it is
648 // unsized (e.g. an incomplete struct) just use [0 x i8].
649 ResultType = ConvertTypeForMem(T: A->getElementType());
650 if (!ResultType->isSized()) {
651 SkippedLayout = true;
652 ResultType = llvm::Type::getInt8Ty(C&: getLLVMContext());
653 }
654 ResultType = llvm::ArrayType::get(ElementType: ResultType, NumElements: 0);
655 break;
656 }
657 case Type::ArrayParameter:
658 case Type::ConstantArray: {
659 const ConstantArrayType *A = cast<ConstantArrayType>(Val: Ty);
660 llvm::Type *EltTy = ConvertTypeForMem(T: A->getElementType());
661
662 // Lower arrays of undefined struct type to arrays of i8 just to have a
663 // concrete type.
664 if (!EltTy->isSized()) {
665 SkippedLayout = true;
666 EltTy = llvm::Type::getInt8Ty(C&: getLLVMContext());
667 }
668
669 ResultType = llvm::ArrayType::get(ElementType: EltTy, NumElements: A->getZExtSize());
670 break;
671 }
672 case Type::ExtVector:
673 case Type::Vector: {
674 const auto *VT = cast<VectorType>(Val: Ty);
675 // An ext_vector_type of Bool is really a vector of bits.
676 llvm::Type *IRElemTy = VT->isPackedVectorBoolType(ctx: Context)
677 ? llvm::Type::getInt1Ty(C&: getLLVMContext())
678 : VT->getElementType()->isMFloat8Type()
679 ? llvm::Type::getInt8Ty(C&: getLLVMContext())
680 : ConvertType(T: VT->getElementType());
681 ResultType = llvm::FixedVectorType::get(ElementType: IRElemTy, NumElts: VT->getNumElements());
682 break;
683 }
684 case Type::ConstantMatrix: {
685 const ConstantMatrixType *MT = cast<ConstantMatrixType>(Val: Ty);
686 ResultType =
687 llvm::FixedVectorType::get(ElementType: ConvertType(T: MT->getElementType()),
688 NumElts: MT->getNumRows() * MT->getNumColumns());
689 break;
690 }
691 case Type::FunctionNoProto:
692 case Type::FunctionProto:
693 ResultType = ConvertFunctionTypeInternal(QFT: T);
694 break;
695 case Type::ObjCObject:
696 ResultType = ConvertType(T: cast<ObjCObjectType>(Val: Ty)->getBaseType());
697 break;
698
699 case Type::ObjCInterface: {
700 // Objective-C interfaces are always opaque (outside of the
701 // runtime, which can do whatever it likes); we never refine
702 // these.
703 llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Val: Ty)];
704 if (!T)
705 T = llvm::StructType::create(Context&: getLLVMContext());
706 ResultType = T;
707 break;
708 }
709
710 case Type::ObjCObjectPointer:
711 ResultType = llvm::PointerType::getUnqual(C&: getLLVMContext());
712 break;
713
714 case Type::Enum: {
715 const auto *ED = Ty->castAsEnumDecl();
716 if (ED->isCompleteDefinition() || ED->isFixed())
717 return ConvertType(T: ED->getIntegerType());
718 // Return a placeholder 'i32' type. This can be changed later when the
719 // type is defined (see UpdateCompletedType), but is likely to be the
720 // "right" answer.
721 ResultType = llvm::Type::getInt32Ty(C&: getLLVMContext());
722 break;
723 }
724
725 case Type::BlockPointer: {
726 // Block pointers lower to function type. For function type,
727 // getTargetAddressSpace() returns default address space for
728 // function pointer i.e. program address space. Therefore, for block
729 // pointers, it is important to pass the pointee AST address space when
730 // calling getTargetAddressSpace(), to ensure that we get the LLVM IR
731 // address space for data pointers and not function pointers.
732 const QualType FTy = cast<BlockPointerType>(Val: Ty)->getPointeeType();
733 unsigned AS = Context.getTargetAddressSpace(AS: FTy.getAddressSpace());
734 ResultType = llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: AS);
735 break;
736 }
737
738 case Type::MemberPointer: {
739 auto *MPTy = cast<MemberPointerType>(Val: Ty);
740 if (!getCXXABI().isMemberPointerConvertible(MPT: MPTy)) {
741 CanQualType T = CGM.getContext().getCanonicalTagType(
742 TD: MPTy->getMostRecentCXXRecordDecl());
743 auto Insertion =
744 RecordsWithOpaqueMemberPointers.try_emplace(Key: T.getTypePtr());
745 if (Insertion.second)
746 Insertion.first->second = llvm::StructType::create(Context&: getLLVMContext());
747 ResultType = Insertion.first->second;
748 } else {
749 ResultType = getCXXABI().ConvertMemberPointerType(MPT: MPTy);
750 }
751 break;
752 }
753
754 case Type::Atomic: {
755 QualType valueType = cast<AtomicType>(Val: Ty)->getValueType();
756 ResultType = ConvertTypeForMem(T: valueType);
757
758 // Pad out to the inflated size if necessary.
759 uint64_t valueSize = Context.getTypeSize(T: valueType);
760 uint64_t atomicSize = Context.getTypeSize(T: Ty);
761 if (valueSize != atomicSize) {
762 assert(valueSize < atomicSize);
763 llvm::Type *elts[] = {
764 ResultType,
765 llvm::ArrayType::get(ElementType: CGM.Int8Ty, NumElements: (atomicSize - valueSize) / 8)
766 };
767 ResultType =
768 llvm::StructType::get(Context&: getLLVMContext(), Elements: llvm::ArrayRef(elts));
769 }
770 break;
771 }
772 case Type::Pipe: {
773 ResultType = CGM.getOpenCLRuntime().getPipeType(T: cast<PipeType>(Val: Ty));
774 break;
775 }
776 case Type::BitInt: {
777 const auto &EIT = cast<BitIntType>(Val: Ty);
778 ResultType = llvm::Type::getIntNTy(C&: getLLVMContext(), N: EIT->getNumBits());
779 break;
780 }
781 case Type::HLSLAttributedResource:
782 case Type::HLSLInlineSpirv:
783 ResultType = CGM.getHLSLRuntime().convertHLSLSpecificType(T: Ty);
784 break;
785 }
786
787 assert(ResultType && "Didn't convert a type?");
788 assert((!CachedType || CachedType == ResultType) &&
789 "Cached type doesn't match computed type");
790
791 TypeCache[Ty] = ResultType;
792 return ResultType;
793}
794
795bool CodeGenModule::isPaddedAtomicType(QualType type) {
796 return isPaddedAtomicType(type: type->castAs<AtomicType>());
797}
798
799bool CodeGenModule::isPaddedAtomicType(const AtomicType *type) {
800 return Context.getTypeSize(T: type) != Context.getTypeSize(T: type->getValueType());
801}
802
803/// ConvertRecordDeclType - Lay out a tagged decl type like struct or union.
804llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
805 // TagDecl's are not necessarily unique, instead use the (clang)
806 // type connected to the decl.
807 const Type *Key = Context.getCanonicalTagType(TD: RD).getTypePtr();
808
809 llvm::StructType *&Entry = RecordDeclTypes[Key];
810
811 // If we don't have a StructType at all yet, create the forward declaration.
812 if (!Entry) {
813 Entry = llvm::StructType::create(Context&: getLLVMContext());
814 addRecordTypeName(RD, Ty: Entry, suffix: "");
815 }
816 llvm::StructType *Ty = Entry;
817
818 // If this is still a forward declaration, or the LLVM type is already
819 // complete, there's nothing more to do.
820 RD = RD->getDefinition();
821 if (!RD || !RD->isCompleteDefinition() || !Ty->isOpaque())
822 return Ty;
823
824 // Force conversion of non-virtual base classes recursively.
825 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(Val: RD)) {
826 for (const auto &I : CRD->bases()) {
827 if (I.isVirtual()) continue;
828 ConvertRecordDeclType(RD: I.getType()->castAsRecordDecl());
829 }
830 }
831
832 // Layout fields.
833 std::unique_ptr<CGRecordLayout> Layout = ComputeRecordLayout(D: RD, Ty);
834 CGRecordLayouts[Key] = std::move(Layout);
835
836 // If this struct blocked a FunctionType conversion, then recompute whatever
837 // was derived from that.
838 // FIXME: This is hugely overconservative.
839 if (SkippedLayout)
840 TypeCache.clear();
841
842 return Ty;
843}
844
845/// getCGRecordLayout - Return record layout info for the given record decl.
846const CGRecordLayout &
847CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
848 const Type *Key = Context.getCanonicalTagType(TD: RD).getTypePtr();
849
850 auto I = CGRecordLayouts.find(Val: Key);
851 if (I != CGRecordLayouts.end())
852 return *I->second;
853 // Compute the type information.
854 ConvertRecordDeclType(RD);
855
856 // Now try again.
857 I = CGRecordLayouts.find(Val: Key);
858
859 assert(I != CGRecordLayouts.end() &&
860 "Unable to find record layout information for type");
861 return *I->second;
862}
863
864bool CodeGenTypes::isPointerZeroInitializable(QualType T) {
865 assert((T->isAnyPointerType() || T->isBlockPointerType() ||
866 T->isNullPtrType()) &&
867 "Invalid type");
868 return isZeroInitializable(T);
869}
870
871bool CodeGenTypes::isZeroInitializable(QualType T) {
872 if (T->getAs<PointerType>() || T->isNullPtrType())
873 return Context.getTargetNullPointerValue(QT: T) == 0;
874
875 if (const auto *AT = Context.getAsArrayType(T)) {
876 if (isa<IncompleteArrayType>(Val: AT))
877 return true;
878 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: AT))
879 if (Context.getConstantArrayElementCount(CA: CAT) == 0)
880 return true;
881 T = Context.getBaseElementType(QT: T);
882 }
883
884 // Records are non-zero-initializable if they contain any
885 // non-zero-initializable subobjects.
886 if (const auto *RD = T->getAsRecordDecl())
887 return isZeroInitializable(RD);
888
889 // We have to ask the ABI about member pointers.
890 if (const MemberPointerType *MPT = T->getAs<MemberPointerType>())
891 return getCXXABI().isZeroInitializable(MPT);
892
893 // HLSL Inline SPIR-V types are non-zero-initializable.
894 if (T->getAs<HLSLInlineSpirvType>())
895 return false;
896
897 // Everything else is okay.
898 return true;
899}
900
901bool CodeGenTypes::isZeroInitializable(const RecordDecl *RD) {
902 return getCGRecordLayout(RD).isZeroInitializable();
903}
904
905unsigned CodeGenTypes::getTargetAddressSpace(QualType T) const {
906 // Return the address space for the type. If the type is a
907 // function type without an address space qualifier, the
908 // program address space is used. Otherwise, the target picks
909 // the best address space based on the type information
910 return T->isFunctionType() && !T.hasAddressSpace()
911 ? getDataLayout().getProgramAddressSpace()
912 : getContext().getTargetAddressSpace(AS: T.getAddressSpace());
913}
914