1//===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the code that handles AST -> LLVM type lowering.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CodeGenTypes.h"
14#include "CGCXXABI.h"
15#include "CGCall.h"
16#include "CGDebugInfo.h"
17#include "CGHLSLRuntime.h"
18#include "CGOpenCLRuntime.h"
19#include "CGRecordLayout.h"
20#include "TargetInfo.h"
21#include "clang/AST/ASTContext.h"
22#include "clang/AST/DeclCXX.h"
23#include "clang/AST/DeclObjC.h"
24#include "clang/AST/Expr.h"
25#include "clang/AST/RecordLayout.h"
26#include "clang/CodeGen/CGFunctionInfo.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/DerivedTypes.h"
29#include "llvm/IR/Module.h"
30
31using namespace clang;
32using namespace CodeGen;
33
34CodeGenTypes::CodeGenTypes(CodeGenModule &cgm)
35 : CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()),
36 Target(cgm.getTarget()) {
37 SkippedLayout = false;
38 LongDoubleReferenced = false;
39}
40
41CodeGenTypes::~CodeGenTypes() {
42 for (llvm::FoldingSet<CGFunctionInfo>::iterator
43 I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; )
44 delete &*I++;
45}
46
47CGCXXABI &CodeGenTypes::getCXXABI() const { return getCGM().getCXXABI(); }
48
49const CodeGenOptions &CodeGenTypes::getCodeGenOpts() const {
50 return CGM.getCodeGenOpts();
51}
52
53void CodeGenTypes::addRecordTypeName(const RecordDecl *RD,
54 llvm::StructType *Ty,
55 StringRef suffix) {
56 SmallString<256> TypeName;
57 llvm::raw_svector_ostream OS(TypeName);
58 OS << RD->getKindName() << '.';
59
60 // FIXME: We probably want to make more tweaks to the printing policy. For
61 // example, we should probably enable PrintCanonicalTypes and
62 // FullyQualifiedNames.
63 PrintingPolicy Policy = RD->getASTContext().getPrintingPolicy();
64 Policy.SuppressInlineNamespace =
65 llvm::to_underlying(E: PrintingPolicy::SuppressInlineNamespaceMode::None);
66
67 // Name the codegen type after the typedef name
68 // if there is no tag type name available
69 if (RD->getIdentifier()) {
70 // FIXME: We should not have to check for a null decl context here.
71 // Right now we do it because the implicit Obj-C decls don't have one.
72 if (RD->getDeclContext())
73 RD->printQualifiedName(OS, Policy);
74 else
75 RD->printName(OS, Policy);
76 } else if (const TypedefNameDecl *TDD = RD->getTypedefNameForAnonDecl()) {
77 // FIXME: We should not have to check for a null decl context here.
78 // Right now we do it because the implicit Obj-C decls don't have one.
79 if (TDD->getDeclContext())
80 TDD->printQualifiedName(OS, Policy);
81 else
82 TDD->printName(OS);
83 } else
84 OS << "anon";
85
86 if (!suffix.empty())
87 OS << suffix;
88
89 Ty->setName(OS.str());
90}
91
92/// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
93/// ConvertType in that it is used to convert to the memory representation for
94/// a type. For example, the scalar representation for _Bool is i1, but the
95/// memory representation is usually i8 or i32, depending on the target.
96///
97/// We generally assume that the alloc size of this type under the LLVM
98/// data layout is the same as the size of the AST type. The alignment
99/// does not have to match: Clang should always use explicit alignments
100/// and packed structs as necessary to produce the layout it needs.
101/// But the size does need to be exactly right or else things like struct
102/// layout will break.
103llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) {
104 if (T->isConstantMatrixType()) {
105 const Type *Ty = Context.getCanonicalType(T).getTypePtr();
106 const ConstantMatrixType *MT = cast<ConstantMatrixType>(Val: Ty);
107 llvm::Type *IRElemTy = ConvertType(T: MT->getElementType());
108 if (Context.getLangOpts().HLSL) {
109 if (T->isConstantMatrixBoolType())
110 IRElemTy = ConvertTypeForMem(T: Context.BoolTy);
111
112 unsigned NumRows = MT->getNumRows();
113 unsigned NumCols = MT->getNumColumns();
114 bool IsRowMajor =
115 CGM.getContext().getLangOpts().getDefaultMatrixMemoryLayout() ==
116 LangOptions::MatrixMemoryLayout::MatrixRowMajor;
117 unsigned VecLen = IsRowMajor ? NumCols : NumRows;
118 unsigned ArrayLen = IsRowMajor ? NumRows : NumCols;
119 llvm::Type *VecTy = llvm::FixedVectorType::get(ElementType: IRElemTy, NumElts: VecLen);
120 return llvm::ArrayType::get(ElementType: VecTy, NumElements: ArrayLen);
121 }
122 return llvm::ArrayType::get(ElementType: IRElemTy, NumElements: MT->getNumElementsFlattened());
123 }
124
125 llvm::Type *R = ConvertType(T);
126
127 // Check for the boolean vector case.
128 if (T->isExtVectorBoolType()) {
129 auto *FixedVT = cast<llvm::FixedVectorType>(Val: R);
130
131 if (Context.getLangOpts().HLSL) {
132 llvm::Type *IRElemTy = ConvertTypeForMem(T: Context.BoolTy);
133 return llvm::FixedVectorType::get(ElementType: IRElemTy, NumElts: FixedVT->getNumElements());
134 }
135
136 // Pad to at least one byte.
137 uint64_t BytePadded = std::max<uint64_t>(a: FixedVT->getNumElements(), b: 8);
138 return llvm::IntegerType::get(C&: FixedVT->getContext(), NumBits: BytePadded);
139 }
140
141 // If T is _Bool or a _BitInt type, ConvertType will produce an IR type
142 // with the exact semantic bit-width of the AST type; for example,
143 // _BitInt(17) will turn into i17. In memory, however, we need to store
144 // such values extended to their full storage size as decided by AST
145 // layout; this is an ABI requirement. Ideally, we would always use an
146 // integer type that's just the bit-size of the AST type; for example, if
147 // sizeof(_BitInt(17)) == 4, _BitInt(17) would turn into i32. That is what's
148 // returned by convertTypeForLoadStore. However, that type does not
149 // always satisfy the size requirement on memory representation types
150 // describe above. For example, a 32-bit platform might reasonably set
151 // sizeof(_BitInt(65)) == 12, but i96 is likely to have to have an alloc size
152 // of 16 bytes in the LLVM data layout. In these cases, we simply return
153 // a byte array of the appropriate size.
154 if (T->isBitIntType()) {
155 if (typeRequiresSplitIntoByteArray(ASTTy: T, LLVMTy: R))
156 return llvm::ArrayType::get(ElementType: CGM.Int8Ty,
157 NumElements: Context.getTypeSizeInChars(T).getQuantity());
158 return llvm::IntegerType::get(C&: getLLVMContext(),
159 NumBits: (unsigned)Context.getTypeSize(T));
160 }
161
162 if (R->isIntegerTy(Bitwidth: 1))
163 return llvm::IntegerType::get(C&: getLLVMContext(),
164 NumBits: (unsigned)Context.getTypeSize(T));
165
166 // Else, don't map it.
167 return R;
168}
169
170bool CodeGenTypes::typeRequiresSplitIntoByteArray(QualType ASTTy,
171 llvm::Type *LLVMTy) {
172 if (!LLVMTy)
173 LLVMTy = ConvertType(T: ASTTy);
174
175 CharUnits ASTSize = Context.getTypeSizeInChars(T: ASTTy);
176 CharUnits LLVMSize =
177 CharUnits::fromQuantity(Quantity: getDataLayout().getTypeAllocSize(Ty: LLVMTy));
178 return ASTSize != LLVMSize;
179}
180
181llvm::Type *CodeGenTypes::convertTypeForLoadStore(QualType T,
182 llvm::Type *LLVMTy) {
183 if (!LLVMTy)
184 LLVMTy = ConvertType(T);
185
186 if (T->isBitIntType())
187 return llvm::Type::getIntNTy(
188 C&: getLLVMContext(), N: Context.getTypeSizeInChars(T).getQuantity() * 8);
189
190 if (LLVMTy->isIntegerTy(Bitwidth: 1))
191 return llvm::IntegerType::get(C&: getLLVMContext(),
192 NumBits: (unsigned)Context.getTypeSize(T));
193
194 if (T->isConstantMatrixBoolType()) {
195 // Matrices are loaded and stored atomically as vectors. Therefore we
196 // construct a FixedVectorType here instead of returning
197 // ConvertTypeForMem(T) which would return an ArrayType instead.
198 const Type *Ty = Context.getCanonicalType(T).getTypePtr();
199 const ConstantMatrixType *MT = cast<ConstantMatrixType>(Val: Ty);
200 llvm::Type *IRElemTy = ConvertTypeForMem(T: MT->getElementType());
201 return llvm::FixedVectorType::get(ElementType: IRElemTy, NumElts: MT->getNumElementsFlattened());
202 }
203
204 if (T->isExtVectorBoolType())
205 return ConvertTypeForMem(T);
206
207 return LLVMTy;
208}
209
210/// isRecordLayoutComplete - Return true if the specified type is already
211/// completely laid out.
212bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const {
213 llvm::DenseMap<const Type*, llvm::StructType *>::const_iterator I =
214 RecordDeclTypes.find(Val: Ty);
215 return I != RecordDeclTypes.end() && !I->second->isOpaque();
216}
217
218/// isFuncParamTypeConvertible - Return true if the specified type in a
219/// function parameter or result position can be converted to an IR type at this
220/// point. This boils down to being whether it is complete.
221bool CodeGenTypes::isFuncParamTypeConvertible(QualType Ty) {
222 // Some ABIs cannot have their member pointers represented in IR unless
223 // certain circumstances have been reached.
224 if (const auto *MPT = Ty->getAs<MemberPointerType>())
225 return getCXXABI().isMemberPointerConvertible(MPT);
226
227 // If this isn't a tagged type, we can convert it!
228 const TagType *TT = Ty->getAs<TagType>();
229 if (!TT) return true;
230
231 // Incomplete types cannot be converted.
232 return !TT->isIncompleteType();
233}
234
235
236/// Code to verify a given function type is complete, i.e. the return type
237/// and all of the parameter types are complete. Also check to see if we are in
238/// a RS_StructPointer context, and if so whether any struct types have been
239/// pended. If so, we don't want to ask the ABI lowering code to handle a type
240/// that cannot be converted to an IR type.
241bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) {
242 if (!isFuncParamTypeConvertible(Ty: FT->getReturnType()))
243 return false;
244
245 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(Val: FT))
246 for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++)
247 if (!isFuncParamTypeConvertible(Ty: FPT->getParamType(i)))
248 return false;
249
250 return true;
251}
252
253/// UpdateCompletedType - When we find the full definition for a TagDecl,
254/// replace the 'opaque' type we previously made for it if applicable.
255void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
256 CanQualType T = CGM.getContext().getCanonicalTagType(TD);
257 // If this is an enum being completed, then we flush all non-struct types from
258 // the cache. This allows function types and other things that may be derived
259 // from the enum to be recomputed.
260 if (const EnumDecl *ED = dyn_cast<EnumDecl>(Val: TD)) {
261 // Only flush the cache if we've actually already converted this type.
262 if (TypeCache.count(Val: T->getTypePtr())) {
263 // Okay, we formed some types based on this. We speculated that the enum
264 // would be lowered to i32, so we only need to flush the cache if this
265 // didn't happen.
266 if (!ConvertType(T: ED->getIntegerType())->isIntegerTy(Bitwidth: 32))
267 TypeCache.clear();
268 }
269 // If necessary, provide the full definition of a type only used with a
270 // declaration so far.
271 if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
272 DI->completeType(ED);
273 return;
274 }
275
276 // If we completed a RecordDecl that we previously used and converted to an
277 // anonymous type, then go ahead and complete it now.
278 const RecordDecl *RD = cast<RecordDecl>(Val: TD);
279 if (RD->isDependentType()) return;
280
281 // Only complete it if we converted it already. If we haven't converted it
282 // yet, we'll just do it lazily.
283 if (RecordDeclTypes.count(Val: T.getTypePtr()))
284 ConvertRecordDeclType(TD: RD);
285
286 // If necessary, provide the full definition of a type only used with a
287 // declaration so far.
288 if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
289 DI->completeType(RD);
290}
291
292void CodeGenTypes::RefreshTypeCacheForClass(const CXXRecordDecl *RD) {
293 CanQualType T = Context.getCanonicalTagType(TD: RD);
294 T = Context.getCanonicalType(T);
295
296 const Type *Ty = T.getTypePtr();
297 if (RecordsWithOpaqueMemberPointers.count(Val: Ty)) {
298 TypeCache.clear();
299 RecordsWithOpaqueMemberPointers.clear();
300 }
301}
302
303static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext,
304 const llvm::fltSemantics &format,
305 bool UseNativeHalf = false) {
306 if (&format == &llvm::APFloat::IEEEhalf()) {
307 if (UseNativeHalf)
308 return llvm::Type::getHalfTy(C&: VMContext);
309 else
310 return llvm::Type::getInt16Ty(C&: VMContext);
311 }
312 if (&format == &llvm::APFloat::BFloat())
313 return llvm::Type::getBFloatTy(C&: VMContext);
314 if (&format == &llvm::APFloat::IEEEsingle())
315 return llvm::Type::getFloatTy(C&: VMContext);
316 if (&format == &llvm::APFloat::IEEEdouble())
317 return llvm::Type::getDoubleTy(C&: VMContext);
318 if (&format == &llvm::APFloat::IEEEquad())
319 return llvm::Type::getFP128Ty(C&: VMContext);
320 if (&format == &llvm::APFloat::PPCDoubleDouble())
321 return llvm::Type::getPPC_FP128Ty(C&: VMContext);
322 if (&format == &llvm::APFloat::x87DoubleExtended())
323 return llvm::Type::getX86_FP80Ty(C&: VMContext);
324 llvm_unreachable("Unknown float format!");
325}
326
327llvm::Type *CodeGenTypes::ConvertFunctionTypeInternal(QualType QFT) {
328 assert(QFT.isCanonical());
329 const FunctionType *FT = cast<FunctionType>(Val: QFT.getTypePtr());
330 // First, check whether we can build the full function type. If the
331 // function type depends on an incomplete type (e.g. a struct or enum), we
332 // cannot lower the function type.
333 if (!isFuncTypeConvertible(FT)) {
334 // This function's type depends on an incomplete tag type.
335
336 // Force conversion of all the relevant record types, to make sure
337 // we re-convert the FunctionType when appropriate.
338 if (const auto *RD = FT->getReturnType()->getAsRecordDecl())
339 ConvertRecordDeclType(TD: RD);
340 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(Val: FT))
341 for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++)
342 if (const auto *RD = FPT->getParamType(i)->getAsRecordDecl())
343 ConvertRecordDeclType(TD: RD);
344
345 SkippedLayout = true;
346
347 // Return a placeholder type.
348 return llvm::StructType::get(Context&: getLLVMContext());
349 }
350
351 // The function type can be built; call the appropriate routines to
352 // build it.
353 const CGFunctionInfo *FI;
354 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(Val: FT)) {
355 FI = &arrangeFreeFunctionType(
356 Ty: CanQual<FunctionProtoType>::CreateUnsafe(Other: QualType(FPT, 0)));
357 } else {
358 const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(Val: FT);
359 FI = &arrangeFreeFunctionType(
360 Ty: CanQual<FunctionNoProtoType>::CreateUnsafe(Other: QualType(FNPT, 0)));
361 }
362
363 llvm::Type *ResultType = nullptr;
364 // If there is something higher level prodding our CGFunctionInfo, then
365 // don't recurse into it again.
366 if (FunctionsBeingProcessed.count(Ptr: FI)) {
367
368 ResultType = llvm::StructType::get(Context&: getLLVMContext());
369 SkippedLayout = true;
370 } else {
371
372 // Otherwise, we're good to go, go ahead and convert it.
373 ResultType = GetFunctionType(Info: *FI);
374 }
375
376 return ResultType;
377}
378
379/// ConvertType - Convert the specified type to its LLVM form.
380llvm::Type *CodeGenTypes::ConvertType(QualType T) {
381 T = Context.getCanonicalType(T);
382
383 const Type *Ty = T.getTypePtr();
384
385 // For the device-side compilation, CUDA device builtin surface/texture types
386 // may be represented in different types.
387 if (Context.getLangOpts().CUDAIsDevice) {
388 if (T->isCUDADeviceBuiltinSurfaceType()) {
389 if (auto *Ty = CGM.getTargetCodeGenInfo()
390 .getCUDADeviceBuiltinSurfaceDeviceType())
391 return Ty;
392 } else if (T->isCUDADeviceBuiltinTextureType()) {
393 if (auto *Ty = CGM.getTargetCodeGenInfo()
394 .getCUDADeviceBuiltinTextureDeviceType())
395 return Ty;
396 }
397 }
398
399 // RecordTypes are cached and processed specially.
400 if (const auto *RT = dyn_cast<RecordType>(Val: Ty))
401 return ConvertRecordDeclType(TD: RT->getDecl()->getDefinitionOrSelf());
402
403 llvm::Type *CachedType = nullptr;
404 auto TCI = TypeCache.find(Val: Ty);
405 if (TCI != TypeCache.end())
406 CachedType = TCI->second;
407 // With expensive checks, check that the type we compute matches the
408 // cached type.
409#ifndef EXPENSIVE_CHECKS
410 if (CachedType)
411 return CachedType;
412#endif
413
414 // If we don't have it in the cache, convert it now.
415 llvm::Type *ResultType = nullptr;
416 switch (Ty->getTypeClass()) {
417 case Type::Record: // Handled above.
418#define TYPE(Class, Base)
419#define ABSTRACT_TYPE(Class, Base)
420#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
421#define DEPENDENT_TYPE(Class, Base) case Type::Class:
422#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
423#include "clang/AST/TypeNodes.inc"
424 llvm_unreachable("Non-canonical or dependent types aren't possible.");
425
426 case Type::Builtin: {
427 switch (cast<BuiltinType>(Val: Ty)->getKind()) {
428 case BuiltinType::Void:
429 case BuiltinType::ObjCId:
430 case BuiltinType::ObjCClass:
431 case BuiltinType::ObjCSel:
432 // LLVM void type can only be used as the result of a function call. Just
433 // map to the same as char.
434 ResultType = llvm::Type::getInt8Ty(C&: getLLVMContext());
435 break;
436
437 case BuiltinType::Bool:
438 // Note that we always return bool as i1 for use as a scalar type.
439 ResultType = llvm::Type::getInt1Ty(C&: getLLVMContext());
440 break;
441
442 case BuiltinType::Char_S:
443 case BuiltinType::Char_U:
444 case BuiltinType::SChar:
445 case BuiltinType::UChar:
446 case BuiltinType::Short:
447 case BuiltinType::UShort:
448 case BuiltinType::Int:
449 case BuiltinType::UInt:
450 case BuiltinType::Long:
451 case BuiltinType::ULong:
452 case BuiltinType::LongLong:
453 case BuiltinType::ULongLong:
454 case BuiltinType::WChar_S:
455 case BuiltinType::WChar_U:
456 case BuiltinType::Char8:
457 case BuiltinType::Char16:
458 case BuiltinType::Char32:
459 case BuiltinType::ShortAccum:
460 case BuiltinType::Accum:
461 case BuiltinType::LongAccum:
462 case BuiltinType::UShortAccum:
463 case BuiltinType::UAccum:
464 case BuiltinType::ULongAccum:
465 case BuiltinType::ShortFract:
466 case BuiltinType::Fract:
467 case BuiltinType::LongFract:
468 case BuiltinType::UShortFract:
469 case BuiltinType::UFract:
470 case BuiltinType::ULongFract:
471 case BuiltinType::SatShortAccum:
472 case BuiltinType::SatAccum:
473 case BuiltinType::SatLongAccum:
474 case BuiltinType::SatUShortAccum:
475 case BuiltinType::SatUAccum:
476 case BuiltinType::SatULongAccum:
477 case BuiltinType::SatShortFract:
478 case BuiltinType::SatFract:
479 case BuiltinType::SatLongFract:
480 case BuiltinType::SatUShortFract:
481 case BuiltinType::SatUFract:
482 case BuiltinType::SatULongFract:
483 ResultType = llvm::IntegerType::get(C&: getLLVMContext(),
484 NumBits: static_cast<unsigned>(Context.getTypeSize(T)));
485 break;
486
487 case BuiltinType::Float16:
488 ResultType =
489 getTypeForFormat(VMContext&: getLLVMContext(), format: Context.getFloatTypeSemantics(T),
490 /* UseNativeHalf = */ true);
491 break;
492
493 case BuiltinType::Half:
494 // Half FP can either be storage-only (lowered to i16) or native.
495 ResultType = getTypeForFormat(
496 VMContext&: getLLVMContext(), format: Context.getFloatTypeSemantics(T),
497 UseNativeHalf: Context.getLangOpts().NativeHalfType ||
498 !Context.getTargetInfo().useFP16ConversionIntrinsics());
499 break;
500 case BuiltinType::LongDouble:
501 LongDoubleReferenced = true;
502 [[fallthrough]];
503 case BuiltinType::BFloat16:
504 case BuiltinType::Float:
505 case BuiltinType::Double:
506 case BuiltinType::Float128:
507 case BuiltinType::Ibm128:
508 ResultType = getTypeForFormat(VMContext&: getLLVMContext(),
509 format: Context.getFloatTypeSemantics(T),
510 /* UseNativeHalf = */ false);
511 break;
512
513 case BuiltinType::NullPtr:
514 // Model std::nullptr_t as i8*
515 ResultType = llvm::PointerType::getUnqual(C&: getLLVMContext());
516 break;
517
518 case BuiltinType::UInt128:
519 case BuiltinType::Int128:
520 ResultType = llvm::IntegerType::get(C&: getLLVMContext(), NumBits: 128);
521 break;
522
523#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
524 case BuiltinType::Id:
525#include "clang/Basic/OpenCLImageTypes.def"
526#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
527 case BuiltinType::Id:
528#include "clang/Basic/OpenCLExtensionTypes.def"
529 case BuiltinType::OCLSampler:
530 case BuiltinType::OCLEvent:
531 case BuiltinType::OCLClkEvent:
532 case BuiltinType::OCLQueue:
533 case BuiltinType::OCLReserveID:
534 ResultType = CGM.getOpenCLRuntime().convertOpenCLSpecificType(T: Ty);
535 break;
536#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId) \
537 case BuiltinType::Id:
538#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId) \
539 case BuiltinType::Id:
540#include "clang/Basic/AArch64ACLETypes.def"
541 {
542 ASTContext::BuiltinVectorTypeInfo Info =
543 Context.getBuiltinVectorTypeInfo(VecTy: cast<BuiltinType>(Val: Ty));
544 // The `__mfp8` type maps to `<1 x i8>` which can't be used to build
545 // a <N x i8> vector type, hence bypass the call to `ConvertType` for
546 // the element type and create the vector type directly.
547 auto *EltTy = Info.ElementType->isMFloat8Type()
548 ? llvm::Type::getInt8Ty(C&: getLLVMContext())
549 : ConvertType(T: Info.ElementType);
550 auto *VTy = llvm::VectorType::get(ElementType: EltTy, EC: Info.EC);
551 switch (Info.NumVectors) {
552 default:
553 llvm_unreachable("Expected 1, 2, 3 or 4 vectors!");
554 case 1:
555 return VTy;
556 case 2:
557 return llvm::StructType::get(elt1: VTy, elts: VTy);
558 case 3:
559 return llvm::StructType::get(elt1: VTy, elts: VTy, elts: VTy);
560 case 4:
561 return llvm::StructType::get(elt1: VTy, elts: VTy, elts: VTy, elts: VTy);
562 }
563 }
564 case BuiltinType::SveCount:
565 return llvm::TargetExtType::get(Context&: getLLVMContext(), Name: "aarch64.svcount");
566 case BuiltinType::MFloat8:
567 return llvm::VectorType::get(ElementType: llvm::Type::getInt8Ty(C&: getLLVMContext()), NumElements: 1,
568 Scalable: false);
569#define PPC_VECTOR_TYPE(Name, Id, Size) \
570 case BuiltinType::Id: \
571 ResultType = \
572 llvm::FixedVectorType::get(ConvertType(Context.BoolTy), Size); \
573 break;
574#include "clang/Basic/PPCTypes.def"
575#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
576#include "clang/Basic/RISCVVTypes.def"
577 {
578 ASTContext::BuiltinVectorTypeInfo Info =
579 Context.getBuiltinVectorTypeInfo(VecTy: cast<BuiltinType>(Val: Ty));
580 if (Info.NumVectors != 1) {
581 unsigned I8EltCount =
582 Info.EC.getKnownMinValue() *
583 ConvertType(T: Info.ElementType)->getScalarSizeInBits() / 8;
584 return llvm::TargetExtType::get(
585 Context&: getLLVMContext(), Name: "riscv.vector.tuple",
586 Types: llvm::ScalableVectorType::get(
587 ElementType: llvm::Type::getInt8Ty(C&: getLLVMContext()), MinNumElts: I8EltCount),
588 Ints: Info.NumVectors);
589 }
590 return llvm::ScalableVectorType::get(ElementType: ConvertType(T: Info.ElementType),
591 MinNumElts: Info.EC.getKnownMinValue());
592 }
593#define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \
594 case BuiltinType::Id: { \
595 if (BuiltinType::Id == BuiltinType::WasmExternRef) \
596 ResultType = CGM.getTargetCodeGenInfo().getWasmExternrefReferenceType(); \
597 else \
598 llvm_unreachable("Unexpected wasm reference builtin type!"); \
599 } break;
600#include "clang/Basic/WebAssemblyReferenceTypes.def"
601#define AMDGPU_OPAQUE_PTR_TYPE(Name, Id, SingletonId, Width, Align, AS) \
602 case BuiltinType::Id: { \
603 if (BuiltinType::Id == BuiltinType::AMDGPUTexture) { \
604 return llvm::FixedVectorType::get( \
605 llvm::Type::getInt32Ty(getLLVMContext()), 8); \
606 } \
607 return llvm::PointerType::get(getLLVMContext(), AS); \
608 }
609#define AMDGPU_NAMED_BARRIER_TYPE(Name, Id, SingletonId, Width, Align, Scope) \
610 case BuiltinType::Id: \
611 return llvm::TargetExtType::get(getLLVMContext(), "amdgcn.named.barrier", \
612 {}, {Scope});
613#define AMDGPU_FEATURE_PREDICATE_TYPE(Name, Id, SingletonId, Width, Align) \
614 case BuiltinType::Id: \
615 return ConvertType(getContext().getLogicalOperationType());
616#include "clang/Basic/AMDGPUTypes.def"
617#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
618#include "clang/Basic/HLSLIntangibleTypes.def"
619 ResultType = CGM.getHLSLRuntime().convertHLSLSpecificType(T: Ty);
620 break;
621 case BuiltinType::Dependent:
622#define BUILTIN_TYPE(Id, SingletonId)
623#define PLACEHOLDER_TYPE(Id, SingletonId) \
624 case BuiltinType::Id:
625#include "clang/AST/BuiltinTypes.def"
626 llvm_unreachable("Unexpected placeholder builtin type!");
627 }
628 break;
629 }
630 case Type::Auto:
631 case Type::DeducedTemplateSpecialization:
632 llvm_unreachable("Unexpected undeduced type!");
633 case Type::Complex: {
634 llvm::Type *EltTy = ConvertType(T: cast<ComplexType>(Val: Ty)->getElementType());
635 ResultType = llvm::StructType::get(elt1: EltTy, elts: EltTy);
636 break;
637 }
638 case Type::LValueReference:
639 case Type::RValueReference: {
640 const ReferenceType *RTy = cast<ReferenceType>(Val: Ty);
641 QualType ETy = RTy->getPointeeType();
642 unsigned AS = getTargetAddressSpace(T: ETy);
643 ResultType = llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: AS);
644 break;
645 }
646 case Type::Pointer: {
647 const PointerType *PTy = cast<PointerType>(Val: Ty);
648 QualType ETy = PTy->getPointeeType();
649 unsigned AS = getTargetAddressSpace(T: ETy);
650 ResultType = llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: AS);
651 break;
652 }
653
654 case Type::VariableArray: {
655 const VariableArrayType *A = cast<VariableArrayType>(Val: Ty);
656 assert(A->getIndexTypeCVRQualifiers() == 0 &&
657 "FIXME: We only handle trivial array types so far!");
658 // VLAs resolve to the innermost element type; this matches
659 // the return of alloca, and there isn't any obviously better choice.
660 ResultType = ConvertTypeForMem(T: A->getElementType());
661 break;
662 }
663 case Type::IncompleteArray: {
664 const IncompleteArrayType *A = cast<IncompleteArrayType>(Val: Ty);
665 assert(A->getIndexTypeCVRQualifiers() == 0 &&
666 "FIXME: We only handle trivial array types so far!");
667 // int X[] -> [0 x int], unless the element type is not sized. If it is
668 // unsized (e.g. an incomplete struct) just use [0 x i8].
669 ResultType = ConvertTypeForMem(T: A->getElementType());
670 if (!ResultType->isSized()) {
671 SkippedLayout = true;
672 ResultType = llvm::Type::getInt8Ty(C&: getLLVMContext());
673 }
674 ResultType = llvm::ArrayType::get(ElementType: ResultType, NumElements: 0);
675 break;
676 }
677 case Type::ArrayParameter:
678 case Type::ConstantArray: {
679 const ConstantArrayType *A = cast<ConstantArrayType>(Val: Ty);
680 llvm::Type *EltTy = ConvertTypeForMem(T: A->getElementType());
681
682 // Lower arrays of undefined struct type to arrays of i8 just to have a
683 // concrete type.
684 if (!EltTy->isSized()) {
685 SkippedLayout = true;
686 EltTy = llvm::Type::getInt8Ty(C&: getLLVMContext());
687 }
688
689 ResultType = llvm::ArrayType::get(ElementType: EltTy, NumElements: A->getZExtSize());
690 break;
691 }
692 case Type::ExtVector:
693 case Type::Vector: {
694 const auto *VT = cast<VectorType>(Val: Ty);
695 // An ext_vector_type of Bool is really a vector of bits.
696 llvm::Type *IRElemTy = VT->isPackedVectorBoolType(ctx: Context)
697 ? llvm::Type::getInt1Ty(C&: getLLVMContext())
698 : VT->getElementType()->isMFloat8Type()
699 ? llvm::Type::getInt8Ty(C&: getLLVMContext())
700 : ConvertType(T: VT->getElementType());
701 ResultType = llvm::FixedVectorType::get(ElementType: IRElemTy, NumElts: VT->getNumElements());
702 break;
703 }
704 case Type::ConstantMatrix: {
705 const ConstantMatrixType *MT = cast<ConstantMatrixType>(Val: Ty);
706 ResultType =
707 llvm::FixedVectorType::get(ElementType: ConvertType(T: MT->getElementType()),
708 NumElts: MT->getNumRows() * MT->getNumColumns());
709 break;
710 }
711 case Type::FunctionNoProto:
712 case Type::FunctionProto:
713 ResultType = ConvertFunctionTypeInternal(QFT: T);
714 break;
715 case Type::ObjCObject:
716 ResultType = ConvertType(T: cast<ObjCObjectType>(Val: Ty)->getBaseType());
717 break;
718
719 case Type::ObjCInterface: {
720 // Objective-C interfaces are always opaque (outside of the
721 // runtime, which can do whatever it likes); we never refine
722 // these.
723 llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Val: Ty)];
724 if (!T)
725 T = llvm::StructType::create(Context&: getLLVMContext());
726 ResultType = T;
727 break;
728 }
729
730 case Type::ObjCObjectPointer:
731 ResultType = llvm::PointerType::getUnqual(C&: getLLVMContext());
732 break;
733
734 case Type::Enum: {
735 const auto *ED = Ty->castAsEnumDecl();
736 if (ED->isCompleteDefinition() || ED->isFixed())
737 return ConvertType(T: ED->getIntegerType());
738 // Return a placeholder 'i32' type. This can be changed later when the
739 // type is defined (see UpdateCompletedType), but is likely to be the
740 // "right" answer.
741 ResultType = llvm::Type::getInt32Ty(C&: getLLVMContext());
742 break;
743 }
744
745 case Type::BlockPointer: {
746 // Block pointers lower to function type. For function type,
747 // getTargetAddressSpace() returns default address space for
748 // function pointer i.e. program address space. Therefore, for block
749 // pointers, it is important to pass the pointee AST address space when
750 // calling getTargetAddressSpace(), to ensure that we get the LLVM IR
751 // address space for data pointers and not function pointers.
752 const QualType FTy = cast<BlockPointerType>(Val: Ty)->getPointeeType();
753 unsigned AS = Context.getTargetAddressSpace(AS: FTy.getAddressSpace());
754 ResultType = llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: AS);
755 break;
756 }
757
758 case Type::MemberPointer: {
759 auto *MPTy = cast<MemberPointerType>(Val: Ty);
760 if (!getCXXABI().isMemberPointerConvertible(MPT: MPTy)) {
761 CanQualType T = CGM.getContext().getCanonicalTagType(
762 TD: MPTy->getMostRecentCXXRecordDecl());
763 auto Insertion =
764 RecordsWithOpaqueMemberPointers.try_emplace(Key: T.getTypePtr());
765 if (Insertion.second)
766 Insertion.first->second = llvm::StructType::create(Context&: getLLVMContext());
767 ResultType = Insertion.first->second;
768 } else {
769 ResultType = getCXXABI().ConvertMemberPointerType(MPT: MPTy);
770 }
771 break;
772 }
773
774 case Type::Atomic: {
775 QualType valueType = cast<AtomicType>(Val: Ty)->getValueType();
776 ResultType = ConvertTypeForMem(T: valueType);
777
778 // Pad out to the inflated size if necessary.
779 uint64_t valueSize = Context.getTypeSize(T: valueType);
780 uint64_t atomicSize = Context.getTypeSize(T: Ty);
781 if (valueSize != atomicSize) {
782 assert(valueSize < atomicSize);
783 llvm::Type *elts[] = {
784 ResultType,
785 llvm::ArrayType::get(ElementType: CGM.Int8Ty, NumElements: (atomicSize - valueSize) / 8)
786 };
787 ResultType =
788 llvm::StructType::get(Context&: getLLVMContext(), Elements: llvm::ArrayRef(elts));
789 }
790 break;
791 }
792 case Type::Pipe: {
793 ResultType = CGM.getOpenCLRuntime().getPipeType(T: cast<PipeType>(Val: Ty));
794 break;
795 }
796 case Type::BitInt: {
797 const auto &EIT = cast<BitIntType>(Val: Ty);
798 ResultType = llvm::Type::getIntNTy(C&: getLLVMContext(), N: EIT->getNumBits());
799 break;
800 }
801 case Type::HLSLAttributedResource:
802 case Type::HLSLInlineSpirv:
803 ResultType = CGM.getHLSLRuntime().convertHLSLSpecificType(T: Ty);
804 break;
805 case Type::OverflowBehavior:
806 ResultType =
807 ConvertType(T: dyn_cast<OverflowBehaviorType>(Val: Ty)->getUnderlyingType());
808 break;
809 }
810
811 assert(ResultType && "Didn't convert a type?");
812 assert((!CachedType || CachedType == ResultType) &&
813 "Cached type doesn't match computed type");
814
815 TypeCache[Ty] = ResultType;
816 return ResultType;
817}
818
819bool CodeGenModule::isPaddedAtomicType(QualType type) {
820 return isPaddedAtomicType(type: type->castAs<AtomicType>());
821}
822
823bool CodeGenModule::isPaddedAtomicType(const AtomicType *type) {
824 return Context.getTypeSize(T: type) != Context.getTypeSize(T: type->getValueType());
825}
826
827/// ConvertRecordDeclType - Lay out a tagged decl type like struct or union.
828llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
829 // TagDecl's are not necessarily unique, instead use the (clang)
830 // type connected to the decl.
831 const Type *Key = Context.getCanonicalTagType(TD: RD).getTypePtr();
832
833 llvm::StructType *&Entry = RecordDeclTypes[Key];
834
835 // If we don't have a StructType at all yet, create the forward declaration.
836 if (!Entry) {
837 Entry = llvm::StructType::create(Context&: getLLVMContext());
838 addRecordTypeName(RD, Ty: Entry, suffix: "");
839 }
840 llvm::StructType *Ty = Entry;
841
842 // If this is still a forward declaration, or the LLVM type is already
843 // complete, there's nothing more to do.
844 RD = RD->getDefinition();
845 if (!RD || !RD->isCompleteDefinition() || !Ty->isOpaque())
846 return Ty;
847
848 // Force conversion of non-virtual base classes recursively.
849 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(Val: RD)) {
850 for (const auto &I : CRD->bases()) {
851 if (I.isVirtual()) continue;
852 ConvertRecordDeclType(RD: I.getType()->castAsRecordDecl());
853 }
854 }
855
856 // Layout fields.
857 std::unique_ptr<CGRecordLayout> Layout = ComputeRecordLayout(D: RD, Ty);
858 CGRecordLayouts[Key] = std::move(Layout);
859
860 // If this struct blocked a FunctionType conversion, then recompute whatever
861 // was derived from that.
862 // FIXME: This is hugely overconservative.
863 if (SkippedLayout)
864 TypeCache.clear();
865
866 return Ty;
867}
868
869/// getCGRecordLayout - Return record layout info for the given record decl.
870const CGRecordLayout &
871CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
872 const Type *Key = Context.getCanonicalTagType(TD: RD).getTypePtr();
873
874 auto I = CGRecordLayouts.find(Val: Key);
875 if (I != CGRecordLayouts.end())
876 return *I->second;
877 // Compute the type information.
878 ConvertRecordDeclType(RD);
879
880 // Now try again.
881 I = CGRecordLayouts.find(Val: Key);
882
883 assert(I != CGRecordLayouts.end() &&
884 "Unable to find record layout information for type");
885 return *I->second;
886}
887
888bool CodeGenTypes::isPointerZeroInitializable(QualType T) {
889 assert((T->isAnyPointerType() || T->isBlockPointerType() ||
890 T->isNullPtrType()) &&
891 "Invalid type");
892 return isZeroInitializable(T);
893}
894
895bool CodeGenTypes::isZeroInitializable(QualType T) {
896 if (T->getAs<PointerType>() || T->isNullPtrType())
897 return Context.getTargetNullPointerValue(QT: T) == 0;
898
899 if (const auto *AT = Context.getAsArrayType(T)) {
900 if (isa<IncompleteArrayType>(Val: AT))
901 return true;
902 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: AT))
903 if (Context.getConstantArrayElementCount(CA: CAT) == 0)
904 return true;
905 T = Context.getBaseElementType(QT: T);
906 }
907
908 // Records are non-zero-initializable if they contain any
909 // non-zero-initializable subobjects.
910 if (const auto *RD = T->getAsRecordDecl())
911 return isZeroInitializable(RD);
912
913 // We have to ask the ABI about member pointers.
914 if (const MemberPointerType *MPT = T->getAs<MemberPointerType>())
915 return getCXXABI().isZeroInitializable(MPT);
916
917 // HLSL Inline SPIR-V types are non-zero-initializable.
918 if (T->getAs<HLSLInlineSpirvType>())
919 return false;
920
921 // Everything else is okay.
922 return true;
923}
924
925bool CodeGenTypes::isZeroInitializable(const RecordDecl *RD) {
926 return getCGRecordLayout(RD).isZeroInitializable();
927}
928
929unsigned CodeGenTypes::getTargetAddressSpace(QualType T) const {
930 // Return the address space for the type. If the type is a
931 // function type without an address space qualifier, the
932 // program address space is used. Otherwise, the target picks
933 // the best address space based on the type information
934 return T->isFunctionType() && !T.hasAddressSpace()
935 ? getDataLayout().getProgramAddressSpace()
936 : getContext().getTargetAddressSpace(AS: T.getAddressSpace());
937}
938