1 | //===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This is the code that handles AST -> LLVM type lowering. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "CodeGenTypes.h" |
14 | #include "CGCXXABI.h" |
15 | #include "CGCall.h" |
16 | #include "CGDebugInfo.h" |
17 | #include "CGHLSLRuntime.h" |
18 | #include "CGOpenCLRuntime.h" |
19 | #include "CGRecordLayout.h" |
20 | #include "TargetInfo.h" |
21 | #include "clang/AST/ASTContext.h" |
22 | #include "clang/AST/DeclCXX.h" |
23 | #include "clang/AST/DeclObjC.h" |
24 | #include "clang/AST/Expr.h" |
25 | #include "clang/AST/RecordLayout.h" |
26 | #include "clang/CodeGen/CGFunctionInfo.h" |
27 | #include "llvm/IR/DataLayout.h" |
28 | #include "llvm/IR/DerivedTypes.h" |
29 | #include "llvm/IR/Module.h" |
30 | |
31 | using namespace clang; |
32 | using namespace CodeGen; |
33 | |
34 | CodeGenTypes::CodeGenTypes(CodeGenModule &cgm) |
35 | : CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()), |
36 | Target(cgm.getTarget()) { |
37 | SkippedLayout = false; |
38 | LongDoubleReferenced = false; |
39 | } |
40 | |
41 | CodeGenTypes::~CodeGenTypes() { |
42 | for (llvm::FoldingSet<CGFunctionInfo>::iterator |
43 | I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; ) |
44 | delete &*I++; |
45 | } |
46 | |
47 | CGCXXABI &CodeGenTypes::getCXXABI() const { return getCGM().getCXXABI(); } |
48 | |
49 | const CodeGenOptions &CodeGenTypes::getCodeGenOpts() const { |
50 | return CGM.getCodeGenOpts(); |
51 | } |
52 | |
53 | void CodeGenTypes::addRecordTypeName(const RecordDecl *RD, |
54 | llvm::StructType *Ty, |
55 | StringRef suffix) { |
56 | SmallString<256> TypeName; |
57 | llvm::raw_svector_ostream OS(TypeName); |
58 | OS << RD->getKindName() << '.'; |
59 | |
60 | // FIXME: We probably want to make more tweaks to the printing policy. For |
61 | // example, we should probably enable PrintCanonicalTypes and |
62 | // FullyQualifiedNames. |
63 | PrintingPolicy Policy = RD->getASTContext().getPrintingPolicy(); |
64 | Policy.SuppressInlineNamespace = |
65 | PrintingPolicy::SuppressInlineNamespaceMode::None; |
66 | |
67 | // Name the codegen type after the typedef name |
68 | // if there is no tag type name available |
69 | if (RD->getIdentifier()) { |
70 | // FIXME: We should not have to check for a null decl context here. |
71 | // Right now we do it because the implicit Obj-C decls don't have one. |
72 | if (RD->getDeclContext()) |
73 | RD->printQualifiedName(OS, Policy); |
74 | else |
75 | RD->printName(OS, Policy); |
76 | } else if (const TypedefNameDecl *TDD = RD->getTypedefNameForAnonDecl()) { |
77 | // FIXME: We should not have to check for a null decl context here. |
78 | // Right now we do it because the implicit Obj-C decls don't have one. |
79 | if (TDD->getDeclContext()) |
80 | TDD->printQualifiedName(OS, Policy); |
81 | else |
82 | TDD->printName(OS); |
83 | } else |
84 | OS << "anon" ; |
85 | |
86 | if (!suffix.empty()) |
87 | OS << suffix; |
88 | |
89 | Ty->setName(OS.str()); |
90 | } |
91 | |
92 | /// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from |
93 | /// ConvertType in that it is used to convert to the memory representation for |
94 | /// a type. For example, the scalar representation for _Bool is i1, but the |
95 | /// memory representation is usually i8 or i32, depending on the target. |
96 | /// |
97 | /// We generally assume that the alloc size of this type under the LLVM |
98 | /// data layout is the same as the size of the AST type. The alignment |
99 | /// does not have to match: Clang should always use explicit alignments |
100 | /// and packed structs as necessary to produce the layout it needs. |
101 | /// But the size does need to be exactly right or else things like struct |
102 | /// layout will break. |
103 | llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) { |
104 | if (T->isConstantMatrixType()) { |
105 | const Type *Ty = Context.getCanonicalType(T).getTypePtr(); |
106 | const ConstantMatrixType *MT = cast<ConstantMatrixType>(Val: Ty); |
107 | return llvm::ArrayType::get(ElementType: ConvertType(T: MT->getElementType()), |
108 | NumElements: MT->getNumRows() * MT->getNumColumns()); |
109 | } |
110 | |
111 | llvm::Type *R = ConvertType(T); |
112 | |
113 | // Check for the boolean vector case. |
114 | if (T->isExtVectorBoolType()) { |
115 | auto *FixedVT = cast<llvm::FixedVectorType>(Val: R); |
116 | |
117 | if (Context.getLangOpts().HLSL) { |
118 | llvm::Type *IRElemTy = ConvertTypeForMem(T: Context.BoolTy); |
119 | return llvm::FixedVectorType::get(ElementType: IRElemTy, NumElts: FixedVT->getNumElements()); |
120 | } |
121 | |
122 | // Pad to at least one byte. |
123 | uint64_t BytePadded = std::max<uint64_t>(a: FixedVT->getNumElements(), b: 8); |
124 | return llvm::IntegerType::get(C&: FixedVT->getContext(), NumBits: BytePadded); |
125 | } |
126 | |
127 | // If T is _Bool or a _BitInt type, ConvertType will produce an IR type |
128 | // with the exact semantic bit-width of the AST type; for example, |
129 | // _BitInt(17) will turn into i17. In memory, however, we need to store |
130 | // such values extended to their full storage size as decided by AST |
131 | // layout; this is an ABI requirement. Ideally, we would always use an |
132 | // integer type that's just the bit-size of the AST type; for example, if |
133 | // sizeof(_BitInt(17)) == 4, _BitInt(17) would turn into i32. That is what's |
134 | // returned by convertTypeForLoadStore. However, that type does not |
135 | // always satisfy the size requirement on memory representation types |
136 | // describe above. For example, a 32-bit platform might reasonably set |
137 | // sizeof(_BitInt(65)) == 12, but i96 is likely to have to have an alloc size |
138 | // of 16 bytes in the LLVM data layout. In these cases, we simply return |
139 | // a byte array of the appropriate size. |
140 | if (T->isBitIntType()) { |
141 | if (typeRequiresSplitIntoByteArray(ASTTy: T, LLVMTy: R)) |
142 | return llvm::ArrayType::get(ElementType: CGM.Int8Ty, |
143 | NumElements: Context.getTypeSizeInChars(T).getQuantity()); |
144 | return llvm::IntegerType::get(C&: getLLVMContext(), |
145 | NumBits: (unsigned)Context.getTypeSize(T)); |
146 | } |
147 | |
148 | if (R->isIntegerTy(Bitwidth: 1)) |
149 | return llvm::IntegerType::get(C&: getLLVMContext(), |
150 | NumBits: (unsigned)Context.getTypeSize(T)); |
151 | |
152 | // Else, don't map it. |
153 | return R; |
154 | } |
155 | |
156 | bool CodeGenTypes::typeRequiresSplitIntoByteArray(QualType ASTTy, |
157 | llvm::Type *LLVMTy) { |
158 | if (!LLVMTy) |
159 | LLVMTy = ConvertType(T: ASTTy); |
160 | |
161 | CharUnits ASTSize = Context.getTypeSizeInChars(T: ASTTy); |
162 | CharUnits LLVMSize = |
163 | CharUnits::fromQuantity(Quantity: getDataLayout().getTypeAllocSize(Ty: LLVMTy)); |
164 | return ASTSize != LLVMSize; |
165 | } |
166 | |
167 | llvm::Type *CodeGenTypes::convertTypeForLoadStore(QualType T, |
168 | llvm::Type *LLVMTy) { |
169 | if (!LLVMTy) |
170 | LLVMTy = ConvertType(T); |
171 | |
172 | if (T->isBitIntType()) |
173 | return llvm::Type::getIntNTy( |
174 | C&: getLLVMContext(), N: Context.getTypeSizeInChars(T).getQuantity() * 8); |
175 | |
176 | if (LLVMTy->isIntegerTy(Bitwidth: 1)) |
177 | return llvm::IntegerType::get(C&: getLLVMContext(), |
178 | NumBits: (unsigned)Context.getTypeSize(T)); |
179 | |
180 | if (T->isExtVectorBoolType()) |
181 | return ConvertTypeForMem(T); |
182 | |
183 | return LLVMTy; |
184 | } |
185 | |
186 | /// isRecordLayoutComplete - Return true if the specified type is already |
187 | /// completely laid out. |
188 | bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const { |
189 | llvm::DenseMap<const Type*, llvm::StructType *>::const_iterator I = |
190 | RecordDeclTypes.find(Val: Ty); |
191 | return I != RecordDeclTypes.end() && !I->second->isOpaque(); |
192 | } |
193 | |
194 | /// isFuncParamTypeConvertible - Return true if the specified type in a |
195 | /// function parameter or result position can be converted to an IR type at this |
196 | /// point. This boils down to being whether it is complete. |
197 | bool CodeGenTypes::isFuncParamTypeConvertible(QualType Ty) { |
198 | // Some ABIs cannot have their member pointers represented in IR unless |
199 | // certain circumstances have been reached. |
200 | if (const auto *MPT = Ty->getAs<MemberPointerType>()) |
201 | return getCXXABI().isMemberPointerConvertible(MPT); |
202 | |
203 | // If this isn't a tagged type, we can convert it! |
204 | const TagType *TT = Ty->getAs<TagType>(); |
205 | if (!TT) return true; |
206 | |
207 | // Incomplete types cannot be converted. |
208 | return !TT->isIncompleteType(); |
209 | } |
210 | |
211 | |
212 | /// Code to verify a given function type is complete, i.e. the return type |
213 | /// and all of the parameter types are complete. Also check to see if we are in |
214 | /// a RS_StructPointer context, and if so whether any struct types have been |
215 | /// pended. If so, we don't want to ask the ABI lowering code to handle a type |
216 | /// that cannot be converted to an IR type. |
217 | bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) { |
218 | if (!isFuncParamTypeConvertible(Ty: FT->getReturnType())) |
219 | return false; |
220 | |
221 | if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(Val: FT)) |
222 | for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++) |
223 | if (!isFuncParamTypeConvertible(Ty: FPT->getParamType(i))) |
224 | return false; |
225 | |
226 | return true; |
227 | } |
228 | |
229 | /// UpdateCompletedType - When we find the full definition for a TagDecl, |
230 | /// replace the 'opaque' type we previously made for it if applicable. |
231 | void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) { |
232 | // If this is an enum being completed, then we flush all non-struct types from |
233 | // the cache. This allows function types and other things that may be derived |
234 | // from the enum to be recomputed. |
235 | if (const EnumDecl *ED = dyn_cast<EnumDecl>(Val: TD)) { |
236 | // Only flush the cache if we've actually already converted this type. |
237 | if (TypeCache.count(Val: ED->getTypeForDecl())) { |
238 | // Okay, we formed some types based on this. We speculated that the enum |
239 | // would be lowered to i32, so we only need to flush the cache if this |
240 | // didn't happen. |
241 | if (!ConvertType(T: ED->getIntegerType())->isIntegerTy(Bitwidth: 32)) |
242 | TypeCache.clear(); |
243 | } |
244 | // If necessary, provide the full definition of a type only used with a |
245 | // declaration so far. |
246 | if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) |
247 | DI->completeType(ED); |
248 | return; |
249 | } |
250 | |
251 | // If we completed a RecordDecl that we previously used and converted to an |
252 | // anonymous type, then go ahead and complete it now. |
253 | const RecordDecl *RD = cast<RecordDecl>(Val: TD); |
254 | if (RD->isDependentType()) return; |
255 | |
256 | // Only complete it if we converted it already. If we haven't converted it |
257 | // yet, we'll just do it lazily. |
258 | if (RecordDeclTypes.count(Val: Context.getTagDeclType(Decl: RD).getTypePtr())) |
259 | ConvertRecordDeclType(TD: RD); |
260 | |
261 | // If necessary, provide the full definition of a type only used with a |
262 | // declaration so far. |
263 | if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) |
264 | DI->completeType(RD); |
265 | } |
266 | |
267 | void CodeGenTypes::RefreshTypeCacheForClass(const CXXRecordDecl *RD) { |
268 | QualType T = Context.getRecordType(Decl: RD); |
269 | T = Context.getCanonicalType(T); |
270 | |
271 | const Type *Ty = T.getTypePtr(); |
272 | if (RecordsWithOpaqueMemberPointers.count(Val: Ty)) { |
273 | TypeCache.clear(); |
274 | RecordsWithOpaqueMemberPointers.clear(); |
275 | } |
276 | } |
277 | |
278 | static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext, |
279 | const llvm::fltSemantics &format, |
280 | bool UseNativeHalf = false) { |
281 | if (&format == &llvm::APFloat::IEEEhalf()) { |
282 | if (UseNativeHalf) |
283 | return llvm::Type::getHalfTy(C&: VMContext); |
284 | else |
285 | return llvm::Type::getInt16Ty(C&: VMContext); |
286 | } |
287 | if (&format == &llvm::APFloat::BFloat()) |
288 | return llvm::Type::getBFloatTy(C&: VMContext); |
289 | if (&format == &llvm::APFloat::IEEEsingle()) |
290 | return llvm::Type::getFloatTy(C&: VMContext); |
291 | if (&format == &llvm::APFloat::IEEEdouble()) |
292 | return llvm::Type::getDoubleTy(C&: VMContext); |
293 | if (&format == &llvm::APFloat::IEEEquad()) |
294 | return llvm::Type::getFP128Ty(C&: VMContext); |
295 | if (&format == &llvm::APFloat::PPCDoubleDouble()) |
296 | return llvm::Type::getPPC_FP128Ty(C&: VMContext); |
297 | if (&format == &llvm::APFloat::x87DoubleExtended()) |
298 | return llvm::Type::getX86_FP80Ty(C&: VMContext); |
299 | llvm_unreachable("Unknown float format!" ); |
300 | } |
301 | |
302 | llvm::Type *CodeGenTypes::ConvertFunctionTypeInternal(QualType QFT) { |
303 | assert(QFT.isCanonical()); |
304 | const FunctionType *FT = cast<FunctionType>(Val: QFT.getTypePtr()); |
305 | // First, check whether we can build the full function type. If the |
306 | // function type depends on an incomplete type (e.g. a struct or enum), we |
307 | // cannot lower the function type. |
308 | if (!isFuncTypeConvertible(FT)) { |
309 | // This function's type depends on an incomplete tag type. |
310 | |
311 | // Force conversion of all the relevant record types, to make sure |
312 | // we re-convert the FunctionType when appropriate. |
313 | if (const RecordType *RT = FT->getReturnType()->getAs<RecordType>()) |
314 | ConvertRecordDeclType(TD: RT->getDecl()); |
315 | if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(Val: FT)) |
316 | for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++) |
317 | if (const RecordType *RT = FPT->getParamType(i)->getAs<RecordType>()) |
318 | ConvertRecordDeclType(TD: RT->getDecl()); |
319 | |
320 | SkippedLayout = true; |
321 | |
322 | // Return a placeholder type. |
323 | return llvm::StructType::get(Context&: getLLVMContext()); |
324 | } |
325 | |
326 | // The function type can be built; call the appropriate routines to |
327 | // build it. |
328 | const CGFunctionInfo *FI; |
329 | if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(Val: FT)) { |
330 | FI = &arrangeFreeFunctionType( |
331 | Ty: CanQual<FunctionProtoType>::CreateUnsafe(Other: QualType(FPT, 0))); |
332 | } else { |
333 | const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(Val: FT); |
334 | FI = &arrangeFreeFunctionType( |
335 | Ty: CanQual<FunctionNoProtoType>::CreateUnsafe(Other: QualType(FNPT, 0))); |
336 | } |
337 | |
338 | llvm::Type *ResultType = nullptr; |
339 | // If there is something higher level prodding our CGFunctionInfo, then |
340 | // don't recurse into it again. |
341 | if (FunctionsBeingProcessed.count(Ptr: FI)) { |
342 | |
343 | ResultType = llvm::StructType::get(Context&: getLLVMContext()); |
344 | SkippedLayout = true; |
345 | } else { |
346 | |
347 | // Otherwise, we're good to go, go ahead and convert it. |
348 | ResultType = GetFunctionType(Info: *FI); |
349 | } |
350 | |
351 | return ResultType; |
352 | } |
353 | |
354 | /// ConvertType - Convert the specified type to its LLVM form. |
355 | llvm::Type *CodeGenTypes::ConvertType(QualType T) { |
356 | T = Context.getCanonicalType(T); |
357 | |
358 | const Type *Ty = T.getTypePtr(); |
359 | |
360 | // For the device-side compilation, CUDA device builtin surface/texture types |
361 | // may be represented in different types. |
362 | if (Context.getLangOpts().CUDAIsDevice) { |
363 | if (T->isCUDADeviceBuiltinSurfaceType()) { |
364 | if (auto *Ty = CGM.getTargetCodeGenInfo() |
365 | .getCUDADeviceBuiltinSurfaceDeviceType()) |
366 | return Ty; |
367 | } else if (T->isCUDADeviceBuiltinTextureType()) { |
368 | if (auto *Ty = CGM.getTargetCodeGenInfo() |
369 | .getCUDADeviceBuiltinTextureDeviceType()) |
370 | return Ty; |
371 | } |
372 | } |
373 | |
374 | // RecordTypes are cached and processed specially. |
375 | if (const RecordType *RT = dyn_cast<RecordType>(Val: Ty)) |
376 | return ConvertRecordDeclType(TD: RT->getDecl()); |
377 | |
378 | llvm::Type *CachedType = nullptr; |
379 | auto TCI = TypeCache.find(Val: Ty); |
380 | if (TCI != TypeCache.end()) |
381 | CachedType = TCI->second; |
382 | // With expensive checks, check that the type we compute matches the |
383 | // cached type. |
384 | #ifndef EXPENSIVE_CHECKS |
385 | if (CachedType) |
386 | return CachedType; |
387 | #endif |
388 | |
389 | // If we don't have it in the cache, convert it now. |
390 | llvm::Type *ResultType = nullptr; |
391 | switch (Ty->getTypeClass()) { |
392 | case Type::Record: // Handled above. |
393 | #define TYPE(Class, Base) |
394 | #define ABSTRACT_TYPE(Class, Base) |
395 | #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: |
396 | #define DEPENDENT_TYPE(Class, Base) case Type::Class: |
397 | #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: |
398 | #include "clang/AST/TypeNodes.inc" |
399 | llvm_unreachable("Non-canonical or dependent types aren't possible." ); |
400 | |
401 | case Type::Builtin: { |
402 | switch (cast<BuiltinType>(Val: Ty)->getKind()) { |
403 | case BuiltinType::Void: |
404 | case BuiltinType::ObjCId: |
405 | case BuiltinType::ObjCClass: |
406 | case BuiltinType::ObjCSel: |
407 | // LLVM void type can only be used as the result of a function call. Just |
408 | // map to the same as char. |
409 | ResultType = llvm::Type::getInt8Ty(C&: getLLVMContext()); |
410 | break; |
411 | |
412 | case BuiltinType::Bool: |
413 | // Note that we always return bool as i1 for use as a scalar type. |
414 | ResultType = llvm::Type::getInt1Ty(C&: getLLVMContext()); |
415 | break; |
416 | |
417 | case BuiltinType::Char_S: |
418 | case BuiltinType::Char_U: |
419 | case BuiltinType::SChar: |
420 | case BuiltinType::UChar: |
421 | case BuiltinType::Short: |
422 | case BuiltinType::UShort: |
423 | case BuiltinType::Int: |
424 | case BuiltinType::UInt: |
425 | case BuiltinType::Long: |
426 | case BuiltinType::ULong: |
427 | case BuiltinType::LongLong: |
428 | case BuiltinType::ULongLong: |
429 | case BuiltinType::WChar_S: |
430 | case BuiltinType::WChar_U: |
431 | case BuiltinType::Char8: |
432 | case BuiltinType::Char16: |
433 | case BuiltinType::Char32: |
434 | case BuiltinType::ShortAccum: |
435 | case BuiltinType::Accum: |
436 | case BuiltinType::LongAccum: |
437 | case BuiltinType::UShortAccum: |
438 | case BuiltinType::UAccum: |
439 | case BuiltinType::ULongAccum: |
440 | case BuiltinType::ShortFract: |
441 | case BuiltinType::Fract: |
442 | case BuiltinType::LongFract: |
443 | case BuiltinType::UShortFract: |
444 | case BuiltinType::UFract: |
445 | case BuiltinType::ULongFract: |
446 | case BuiltinType::SatShortAccum: |
447 | case BuiltinType::SatAccum: |
448 | case BuiltinType::SatLongAccum: |
449 | case BuiltinType::SatUShortAccum: |
450 | case BuiltinType::SatUAccum: |
451 | case BuiltinType::SatULongAccum: |
452 | case BuiltinType::SatShortFract: |
453 | case BuiltinType::SatFract: |
454 | case BuiltinType::SatLongFract: |
455 | case BuiltinType::SatUShortFract: |
456 | case BuiltinType::SatUFract: |
457 | case BuiltinType::SatULongFract: |
458 | ResultType = llvm::IntegerType::get(C&: getLLVMContext(), |
459 | NumBits: static_cast<unsigned>(Context.getTypeSize(T))); |
460 | break; |
461 | |
462 | case BuiltinType::Float16: |
463 | ResultType = |
464 | getTypeForFormat(VMContext&: getLLVMContext(), format: Context.getFloatTypeSemantics(T), |
465 | /* UseNativeHalf = */ true); |
466 | break; |
467 | |
468 | case BuiltinType::Half: |
469 | // Half FP can either be storage-only (lowered to i16) or native. |
470 | ResultType = getTypeForFormat( |
471 | VMContext&: getLLVMContext(), format: Context.getFloatTypeSemantics(T), |
472 | UseNativeHalf: Context.getLangOpts().NativeHalfType || |
473 | !Context.getTargetInfo().useFP16ConversionIntrinsics()); |
474 | break; |
475 | case BuiltinType::LongDouble: |
476 | LongDoubleReferenced = true; |
477 | [[fallthrough]]; |
478 | case BuiltinType::BFloat16: |
479 | case BuiltinType::Float: |
480 | case BuiltinType::Double: |
481 | case BuiltinType::Float128: |
482 | case BuiltinType::Ibm128: |
483 | ResultType = getTypeForFormat(VMContext&: getLLVMContext(), |
484 | format: Context.getFloatTypeSemantics(T), |
485 | /* UseNativeHalf = */ false); |
486 | break; |
487 | |
488 | case BuiltinType::NullPtr: |
489 | // Model std::nullptr_t as i8* |
490 | ResultType = llvm::PointerType::getUnqual(C&: getLLVMContext()); |
491 | break; |
492 | |
493 | case BuiltinType::UInt128: |
494 | case BuiltinType::Int128: |
495 | ResultType = llvm::IntegerType::get(C&: getLLVMContext(), NumBits: 128); |
496 | break; |
497 | |
498 | #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ |
499 | case BuiltinType::Id: |
500 | #include "clang/Basic/OpenCLImageTypes.def" |
501 | #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ |
502 | case BuiltinType::Id: |
503 | #include "clang/Basic/OpenCLExtensionTypes.def" |
504 | case BuiltinType::OCLSampler: |
505 | case BuiltinType::OCLEvent: |
506 | case BuiltinType::OCLClkEvent: |
507 | case BuiltinType::OCLQueue: |
508 | case BuiltinType::OCLReserveID: |
509 | ResultType = CGM.getOpenCLRuntime().convertOpenCLSpecificType(T: Ty); |
510 | break; |
511 | #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId) \ |
512 | case BuiltinType::Id: |
513 | #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId) \ |
514 | case BuiltinType::Id: |
515 | #include "clang/Basic/AArch64ACLETypes.def" |
516 | { |
517 | ASTContext::BuiltinVectorTypeInfo Info = |
518 | Context.getBuiltinVectorTypeInfo(VecTy: cast<BuiltinType>(Val: Ty)); |
519 | // The `__mfp8` type maps to `<1 x i8>` which can't be used to build |
520 | // a <N x i8> vector type, hence bypass the call to `ConvertType` for |
521 | // the element type and create the vector type directly. |
522 | auto *EltTy = Info.ElementType->isMFloat8Type() |
523 | ? llvm::Type::getInt8Ty(C&: getLLVMContext()) |
524 | : ConvertType(T: Info.ElementType); |
525 | auto *VTy = llvm::VectorType::get(ElementType: EltTy, EC: Info.EC); |
526 | switch (Info.NumVectors) { |
527 | default: |
528 | llvm_unreachable("Expected 1, 2, 3 or 4 vectors!" ); |
529 | case 1: |
530 | return VTy; |
531 | case 2: |
532 | return llvm::StructType::get(elt1: VTy, elts: VTy); |
533 | case 3: |
534 | return llvm::StructType::get(elt1: VTy, elts: VTy, elts: VTy); |
535 | case 4: |
536 | return llvm::StructType::get(elt1: VTy, elts: VTy, elts: VTy, elts: VTy); |
537 | } |
538 | } |
539 | case BuiltinType::SveCount: |
540 | return llvm::TargetExtType::get(Context&: getLLVMContext(), Name: "aarch64.svcount" ); |
541 | case BuiltinType::MFloat8: |
542 | return llvm::VectorType::get(ElementType: llvm::Type::getInt8Ty(C&: getLLVMContext()), NumElements: 1, |
543 | Scalable: false); |
544 | #define PPC_VECTOR_TYPE(Name, Id, Size) \ |
545 | case BuiltinType::Id: \ |
546 | ResultType = \ |
547 | llvm::FixedVectorType::get(ConvertType(Context.BoolTy), Size); \ |
548 | break; |
549 | #include "clang/Basic/PPCTypes.def" |
550 | #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: |
551 | #include "clang/Basic/RISCVVTypes.def" |
552 | { |
553 | ASTContext::BuiltinVectorTypeInfo Info = |
554 | Context.getBuiltinVectorTypeInfo(VecTy: cast<BuiltinType>(Val: Ty)); |
555 | if (Info.NumVectors != 1) { |
556 | unsigned I8EltCount = |
557 | Info.EC.getKnownMinValue() * |
558 | ConvertType(T: Info.ElementType)->getScalarSizeInBits() / 8; |
559 | return llvm::TargetExtType::get( |
560 | Context&: getLLVMContext(), Name: "riscv.vector.tuple" , |
561 | Types: llvm::ScalableVectorType::get( |
562 | ElementType: llvm::Type::getInt8Ty(C&: getLLVMContext()), MinNumElts: I8EltCount), |
563 | Ints: Info.NumVectors); |
564 | } |
565 | return llvm::ScalableVectorType::get(ElementType: ConvertType(T: Info.ElementType), |
566 | MinNumElts: Info.EC.getKnownMinValue()); |
567 | } |
568 | #define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \ |
569 | case BuiltinType::Id: { \ |
570 | if (BuiltinType::Id == BuiltinType::WasmExternRef) \ |
571 | ResultType = CGM.getTargetCodeGenInfo().getWasmExternrefReferenceType(); \ |
572 | else \ |
573 | llvm_unreachable("Unexpected wasm reference builtin type!"); \ |
574 | } break; |
575 | #include "clang/Basic/WebAssemblyReferenceTypes.def" |
576 | #define AMDGPU_OPAQUE_PTR_TYPE(Name, Id, SingletonId, Width, Align, AS) \ |
577 | case BuiltinType::Id: \ |
578 | return llvm::PointerType::get(getLLVMContext(), AS); |
579 | #define AMDGPU_NAMED_BARRIER_TYPE(Name, Id, SingletonId, Width, Align, Scope) \ |
580 | case BuiltinType::Id: \ |
581 | return llvm::TargetExtType::get(getLLVMContext(), "amdgcn.named.barrier", \ |
582 | {}, {Scope}); |
583 | #include "clang/Basic/AMDGPUTypes.def" |
584 | #define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id: |
585 | #include "clang/Basic/HLSLIntangibleTypes.def" |
586 | ResultType = CGM.getHLSLRuntime().convertHLSLSpecificType(T: Ty); |
587 | break; |
588 | case BuiltinType::Dependent: |
589 | #define BUILTIN_TYPE(Id, SingletonId) |
590 | #define PLACEHOLDER_TYPE(Id, SingletonId) \ |
591 | case BuiltinType::Id: |
592 | #include "clang/AST/BuiltinTypes.def" |
593 | llvm_unreachable("Unexpected placeholder builtin type!" ); |
594 | } |
595 | break; |
596 | } |
597 | case Type::Auto: |
598 | case Type::DeducedTemplateSpecialization: |
599 | llvm_unreachable("Unexpected undeduced type!" ); |
600 | case Type::Complex: { |
601 | llvm::Type *EltTy = ConvertType(T: cast<ComplexType>(Val: Ty)->getElementType()); |
602 | ResultType = llvm::StructType::get(elt1: EltTy, elts: EltTy); |
603 | break; |
604 | } |
605 | case Type::LValueReference: |
606 | case Type::RValueReference: { |
607 | const ReferenceType *RTy = cast<ReferenceType>(Val: Ty); |
608 | QualType ETy = RTy->getPointeeType(); |
609 | unsigned AS = getTargetAddressSpace(T: ETy); |
610 | ResultType = llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: AS); |
611 | break; |
612 | } |
613 | case Type::Pointer: { |
614 | const PointerType *PTy = cast<PointerType>(Val: Ty); |
615 | QualType ETy = PTy->getPointeeType(); |
616 | unsigned AS = getTargetAddressSpace(T: ETy); |
617 | ResultType = llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: AS); |
618 | break; |
619 | } |
620 | |
621 | case Type::VariableArray: { |
622 | const VariableArrayType *A = cast<VariableArrayType>(Val: Ty); |
623 | assert(A->getIndexTypeCVRQualifiers() == 0 && |
624 | "FIXME: We only handle trivial array types so far!" ); |
625 | // VLAs resolve to the innermost element type; this matches |
626 | // the return of alloca, and there isn't any obviously better choice. |
627 | ResultType = ConvertTypeForMem(T: A->getElementType()); |
628 | break; |
629 | } |
630 | case Type::IncompleteArray: { |
631 | const IncompleteArrayType *A = cast<IncompleteArrayType>(Val: Ty); |
632 | assert(A->getIndexTypeCVRQualifiers() == 0 && |
633 | "FIXME: We only handle trivial array types so far!" ); |
634 | // int X[] -> [0 x int], unless the element type is not sized. If it is |
635 | // unsized (e.g. an incomplete struct) just use [0 x i8]. |
636 | ResultType = ConvertTypeForMem(T: A->getElementType()); |
637 | if (!ResultType->isSized()) { |
638 | SkippedLayout = true; |
639 | ResultType = llvm::Type::getInt8Ty(C&: getLLVMContext()); |
640 | } |
641 | ResultType = llvm::ArrayType::get(ElementType: ResultType, NumElements: 0); |
642 | break; |
643 | } |
644 | case Type::ArrayParameter: |
645 | case Type::ConstantArray: { |
646 | const ConstantArrayType *A = cast<ConstantArrayType>(Val: Ty); |
647 | llvm::Type *EltTy = ConvertTypeForMem(T: A->getElementType()); |
648 | |
649 | // Lower arrays of undefined struct type to arrays of i8 just to have a |
650 | // concrete type. |
651 | if (!EltTy->isSized()) { |
652 | SkippedLayout = true; |
653 | EltTy = llvm::Type::getInt8Ty(C&: getLLVMContext()); |
654 | } |
655 | |
656 | ResultType = llvm::ArrayType::get(ElementType: EltTy, NumElements: A->getZExtSize()); |
657 | break; |
658 | } |
659 | case Type::ExtVector: |
660 | case Type::Vector: { |
661 | const auto *VT = cast<VectorType>(Val: Ty); |
662 | // An ext_vector_type of Bool is really a vector of bits. |
663 | llvm::Type *IRElemTy = VT->isPackedVectorBoolType(ctx: Context) |
664 | ? llvm::Type::getInt1Ty(C&: getLLVMContext()) |
665 | : VT->getElementType()->isMFloat8Type() |
666 | ? llvm::Type::getInt8Ty(C&: getLLVMContext()) |
667 | : ConvertType(T: VT->getElementType()); |
668 | ResultType = llvm::FixedVectorType::get(ElementType: IRElemTy, NumElts: VT->getNumElements()); |
669 | break; |
670 | } |
671 | case Type::ConstantMatrix: { |
672 | const ConstantMatrixType *MT = cast<ConstantMatrixType>(Val: Ty); |
673 | ResultType = |
674 | llvm::FixedVectorType::get(ElementType: ConvertType(T: MT->getElementType()), |
675 | NumElts: MT->getNumRows() * MT->getNumColumns()); |
676 | break; |
677 | } |
678 | case Type::FunctionNoProto: |
679 | case Type::FunctionProto: |
680 | ResultType = ConvertFunctionTypeInternal(QFT: T); |
681 | break; |
682 | case Type::ObjCObject: |
683 | ResultType = ConvertType(T: cast<ObjCObjectType>(Val: Ty)->getBaseType()); |
684 | break; |
685 | |
686 | case Type::ObjCInterface: { |
687 | // Objective-C interfaces are always opaque (outside of the |
688 | // runtime, which can do whatever it likes); we never refine |
689 | // these. |
690 | llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Val: Ty)]; |
691 | if (!T) |
692 | T = llvm::StructType::create(Context&: getLLVMContext()); |
693 | ResultType = T; |
694 | break; |
695 | } |
696 | |
697 | case Type::ObjCObjectPointer: |
698 | ResultType = llvm::PointerType::getUnqual(C&: getLLVMContext()); |
699 | break; |
700 | |
701 | case Type::Enum: { |
702 | const EnumDecl *ED = cast<EnumType>(Val: Ty)->getDecl(); |
703 | if (ED->isCompleteDefinition() || ED->isFixed()) |
704 | return ConvertType(T: ED->getIntegerType()); |
705 | // Return a placeholder 'i32' type. This can be changed later when the |
706 | // type is defined (see UpdateCompletedType), but is likely to be the |
707 | // "right" answer. |
708 | ResultType = llvm::Type::getInt32Ty(C&: getLLVMContext()); |
709 | break; |
710 | } |
711 | |
712 | case Type::BlockPointer: { |
713 | // Block pointers lower to function type. For function type, |
714 | // getTargetAddressSpace() returns default address space for |
715 | // function pointer i.e. program address space. Therefore, for block |
716 | // pointers, it is important to pass the pointee AST address space when |
717 | // calling getTargetAddressSpace(), to ensure that we get the LLVM IR |
718 | // address space for data pointers and not function pointers. |
719 | const QualType FTy = cast<BlockPointerType>(Val: Ty)->getPointeeType(); |
720 | unsigned AS = Context.getTargetAddressSpace(AS: FTy.getAddressSpace()); |
721 | ResultType = llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: AS); |
722 | break; |
723 | } |
724 | |
725 | case Type::MemberPointer: { |
726 | auto *MPTy = cast<MemberPointerType>(Val: Ty); |
727 | if (!getCXXABI().isMemberPointerConvertible(MPT: MPTy)) { |
728 | auto *C = MPTy->getMostRecentCXXRecordDecl()->getTypeForDecl(); |
729 | auto Insertion = RecordsWithOpaqueMemberPointers.try_emplace(Key: C); |
730 | if (Insertion.second) |
731 | Insertion.first->second = llvm::StructType::create(Context&: getLLVMContext()); |
732 | ResultType = Insertion.first->second; |
733 | } else { |
734 | ResultType = getCXXABI().ConvertMemberPointerType(MPT: MPTy); |
735 | } |
736 | break; |
737 | } |
738 | |
739 | case Type::Atomic: { |
740 | QualType valueType = cast<AtomicType>(Val: Ty)->getValueType(); |
741 | ResultType = ConvertTypeForMem(T: valueType); |
742 | |
743 | // Pad out to the inflated size if necessary. |
744 | uint64_t valueSize = Context.getTypeSize(T: valueType); |
745 | uint64_t atomicSize = Context.getTypeSize(T: Ty); |
746 | if (valueSize != atomicSize) { |
747 | assert(valueSize < atomicSize); |
748 | llvm::Type *elts[] = { |
749 | ResultType, |
750 | llvm::ArrayType::get(ElementType: CGM.Int8Ty, NumElements: (atomicSize - valueSize) / 8) |
751 | }; |
752 | ResultType = |
753 | llvm::StructType::get(Context&: getLLVMContext(), Elements: llvm::ArrayRef(elts)); |
754 | } |
755 | break; |
756 | } |
757 | case Type::Pipe: { |
758 | ResultType = CGM.getOpenCLRuntime().getPipeType(T: cast<PipeType>(Val: Ty)); |
759 | break; |
760 | } |
761 | case Type::BitInt: { |
762 | const auto &EIT = cast<BitIntType>(Val: Ty); |
763 | ResultType = llvm::Type::getIntNTy(C&: getLLVMContext(), N: EIT->getNumBits()); |
764 | break; |
765 | } |
766 | case Type::HLSLAttributedResource: |
767 | case Type::HLSLInlineSpirv: |
768 | ResultType = CGM.getHLSLRuntime().convertHLSLSpecificType(T: Ty); |
769 | break; |
770 | } |
771 | |
772 | assert(ResultType && "Didn't convert a type?" ); |
773 | assert((!CachedType || CachedType == ResultType) && |
774 | "Cached type doesn't match computed type" ); |
775 | |
776 | TypeCache[Ty] = ResultType; |
777 | return ResultType; |
778 | } |
779 | |
780 | bool CodeGenModule::isPaddedAtomicType(QualType type) { |
781 | return isPaddedAtomicType(type: type->castAs<AtomicType>()); |
782 | } |
783 | |
784 | bool CodeGenModule::isPaddedAtomicType(const AtomicType *type) { |
785 | return Context.getTypeSize(T: type) != Context.getTypeSize(T: type->getValueType()); |
786 | } |
787 | |
788 | /// ConvertRecordDeclType - Lay out a tagged decl type like struct or union. |
789 | llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) { |
790 | // TagDecl's are not necessarily unique, instead use the (clang) |
791 | // type connected to the decl. |
792 | const Type *Key = Context.getTagDeclType(Decl: RD).getTypePtr(); |
793 | |
794 | llvm::StructType *&Entry = RecordDeclTypes[Key]; |
795 | |
796 | // If we don't have a StructType at all yet, create the forward declaration. |
797 | if (!Entry) { |
798 | Entry = llvm::StructType::create(Context&: getLLVMContext()); |
799 | addRecordTypeName(RD, Ty: Entry, suffix: "" ); |
800 | } |
801 | llvm::StructType *Ty = Entry; |
802 | |
803 | // If this is still a forward declaration, or the LLVM type is already |
804 | // complete, there's nothing more to do. |
805 | RD = RD->getDefinition(); |
806 | if (!RD || !RD->isCompleteDefinition() || !Ty->isOpaque()) |
807 | return Ty; |
808 | |
809 | // Force conversion of non-virtual base classes recursively. |
810 | if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(Val: RD)) { |
811 | for (const auto &I : CRD->bases()) { |
812 | if (I.isVirtual()) continue; |
813 | ConvertRecordDeclType(RD: I.getType()->castAs<RecordType>()->getDecl()); |
814 | } |
815 | } |
816 | |
817 | // Layout fields. |
818 | std::unique_ptr<CGRecordLayout> Layout = ComputeRecordLayout(D: RD, Ty); |
819 | CGRecordLayouts[Key] = std::move(Layout); |
820 | |
821 | // If this struct blocked a FunctionType conversion, then recompute whatever |
822 | // was derived from that. |
823 | // FIXME: This is hugely overconservative. |
824 | if (SkippedLayout) |
825 | TypeCache.clear(); |
826 | |
827 | return Ty; |
828 | } |
829 | |
830 | /// getCGRecordLayout - Return record layout info for the given record decl. |
831 | const CGRecordLayout & |
832 | CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) { |
833 | const Type *Key = Context.getTagDeclType(Decl: RD).getTypePtr(); |
834 | |
835 | auto I = CGRecordLayouts.find(Val: Key); |
836 | if (I != CGRecordLayouts.end()) |
837 | return *I->second; |
838 | // Compute the type information. |
839 | ConvertRecordDeclType(RD); |
840 | |
841 | // Now try again. |
842 | I = CGRecordLayouts.find(Val: Key); |
843 | |
844 | assert(I != CGRecordLayouts.end() && |
845 | "Unable to find record layout information for type" ); |
846 | return *I->second; |
847 | } |
848 | |
849 | bool CodeGenTypes::isPointerZeroInitializable(QualType T) { |
850 | assert((T->isAnyPointerType() || T->isBlockPointerType() || |
851 | T->isNullPtrType()) && |
852 | "Invalid type" ); |
853 | return isZeroInitializable(T); |
854 | } |
855 | |
856 | bool CodeGenTypes::isZeroInitializable(QualType T) { |
857 | if (T->getAs<PointerType>() || T->isNullPtrType()) |
858 | return Context.getTargetNullPointerValue(QT: T) == 0; |
859 | |
860 | if (const auto *AT = Context.getAsArrayType(T)) { |
861 | if (isa<IncompleteArrayType>(Val: AT)) |
862 | return true; |
863 | if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: AT)) |
864 | if (Context.getConstantArrayElementCount(CA: CAT) == 0) |
865 | return true; |
866 | T = Context.getBaseElementType(QT: T); |
867 | } |
868 | |
869 | // Records are non-zero-initializable if they contain any |
870 | // non-zero-initializable subobjects. |
871 | if (const RecordType *RT = T->getAs<RecordType>()) { |
872 | const RecordDecl *RD = RT->getDecl(); |
873 | return isZeroInitializable(RD); |
874 | } |
875 | |
876 | // We have to ask the ABI about member pointers. |
877 | if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) |
878 | return getCXXABI().isZeroInitializable(MPT); |
879 | |
880 | // HLSL Inline SPIR-V types are non-zero-initializable. |
881 | if (T->getAs<HLSLInlineSpirvType>()) |
882 | return false; |
883 | |
884 | // Everything else is okay. |
885 | return true; |
886 | } |
887 | |
888 | bool CodeGenTypes::isZeroInitializable(const RecordDecl *RD) { |
889 | return getCGRecordLayout(RD).isZeroInitializable(); |
890 | } |
891 | |
892 | unsigned CodeGenTypes::getTargetAddressSpace(QualType T) const { |
893 | // Return the address space for the type. If the type is a |
894 | // function type without an address space qualifier, the |
895 | // program address space is used. Otherwise, the target picks |
896 | // the best address space based on the type information |
897 | return T->isFunctionType() && !T.hasAddressSpace() |
898 | ? getDataLayout().getProgramAddressSpace() |
899 | : getContext().getTargetAddressSpace(AS: T.getAddressSpace()); |
900 | } |
901 | |