1 | //===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This is the code that handles AST -> LLVM type lowering. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "CodeGenTypes.h" |
14 | #include "CGCXXABI.h" |
15 | #include "CGCall.h" |
16 | #include "CGOpenCLRuntime.h" |
17 | #include "CGRecordLayout.h" |
18 | #include "TargetInfo.h" |
19 | #include "clang/AST/ASTContext.h" |
20 | #include "clang/AST/DeclCXX.h" |
21 | #include "clang/AST/DeclObjC.h" |
22 | #include "clang/AST/Expr.h" |
23 | #include "clang/AST/RecordLayout.h" |
24 | #include "clang/CodeGen/CGFunctionInfo.h" |
25 | #include "llvm/IR/DataLayout.h" |
26 | #include "llvm/IR/DerivedTypes.h" |
27 | #include "llvm/IR/Module.h" |
28 | |
29 | using namespace clang; |
30 | using namespace CodeGen; |
31 | |
32 | CodeGenTypes::CodeGenTypes(CodeGenModule &cgm) |
33 | : CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()), |
34 | Target(cgm.getTarget()), TheCXXABI(cgm.getCXXABI()), |
35 | TheABIInfo(cgm.getTargetCodeGenInfo().getABIInfo()) { |
36 | SkippedLayout = false; |
37 | LongDoubleReferenced = false; |
38 | } |
39 | |
40 | CodeGenTypes::~CodeGenTypes() { |
41 | for (llvm::FoldingSet<CGFunctionInfo>::iterator |
42 | I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; ) |
43 | delete &*I++; |
44 | } |
45 | |
46 | const CodeGenOptions &CodeGenTypes::getCodeGenOpts() const { |
47 | return CGM.getCodeGenOpts(); |
48 | } |
49 | |
50 | void CodeGenTypes::addRecordTypeName(const RecordDecl *RD, |
51 | llvm::StructType *Ty, |
52 | StringRef suffix) { |
53 | SmallString<256> TypeName; |
54 | llvm::raw_svector_ostream OS(TypeName); |
55 | OS << RD->getKindName() << '.'; |
56 | |
57 | // FIXME: We probably want to make more tweaks to the printing policy. For |
58 | // example, we should probably enable PrintCanonicalTypes and |
59 | // FullyQualifiedNames. |
60 | PrintingPolicy Policy = RD->getASTContext().getPrintingPolicy(); |
61 | Policy.SuppressInlineNamespace = false; |
62 | |
63 | // Name the codegen type after the typedef name |
64 | // if there is no tag type name available |
65 | if (RD->getIdentifier()) { |
66 | // FIXME: We should not have to check for a null decl context here. |
67 | // Right now we do it because the implicit Obj-C decls don't have one. |
68 | if (RD->getDeclContext()) |
69 | RD->printQualifiedName(OS, Policy); |
70 | else |
71 | RD->printName(OS, Policy); |
72 | } else if (const TypedefNameDecl *TDD = RD->getTypedefNameForAnonDecl()) { |
73 | // FIXME: We should not have to check for a null decl context here. |
74 | // Right now we do it because the implicit Obj-C decls don't have one. |
75 | if (TDD->getDeclContext()) |
76 | TDD->printQualifiedName(OS, Policy); |
77 | else |
78 | TDD->printName(OS); |
79 | } else |
80 | OS << "anon" ; |
81 | |
82 | if (!suffix.empty()) |
83 | OS << suffix; |
84 | |
85 | Ty->setName(OS.str()); |
86 | } |
87 | |
88 | /// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from |
89 | /// ConvertType in that it is used to convert to the memory representation for |
90 | /// a type. For example, the scalar representation for _Bool is i1, but the |
91 | /// memory representation is usually i8 or i32, depending on the target. |
92 | /// |
93 | /// We generally assume that the alloc size of this type under the LLVM |
94 | /// data layout is the same as the size of the AST type. The alignment |
95 | /// does not have to match: Clang should always use explicit alignments |
96 | /// and packed structs as necessary to produce the layout it needs. |
97 | /// But the size does need to be exactly right or else things like struct |
98 | /// layout will break. |
99 | llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) { |
100 | if (T->isConstantMatrixType()) { |
101 | const Type *Ty = Context.getCanonicalType(T).getTypePtr(); |
102 | const ConstantMatrixType *MT = cast<ConstantMatrixType>(Val: Ty); |
103 | return llvm::ArrayType::get(ElementType: ConvertType(T: MT->getElementType()), |
104 | NumElements: MT->getNumRows() * MT->getNumColumns()); |
105 | } |
106 | |
107 | llvm::Type *R = ConvertType(T); |
108 | |
109 | // Check for the boolean vector case. |
110 | if (T->isExtVectorBoolType()) { |
111 | auto *FixedVT = cast<llvm::FixedVectorType>(Val: R); |
112 | // Pad to at least one byte. |
113 | uint64_t BytePadded = std::max<uint64_t>(a: FixedVT->getNumElements(), b: 8); |
114 | return llvm::IntegerType::get(C&: FixedVT->getContext(), NumBits: BytePadded); |
115 | } |
116 | |
117 | // If T is _Bool or a _BitInt type, ConvertType will produce an IR type |
118 | // with the exact semantic bit-width of the AST type; for example, |
119 | // _BitInt(17) will turn into i17. In memory, however, we need to store |
120 | // such values extended to their full storage size as decided by AST |
121 | // layout; this is an ABI requirement. Ideally, we would always use an |
122 | // integer type that's just the bit-size of the AST type; for example, if |
123 | // sizeof(_BitInt(17)) == 4, _BitInt(17) would turn into i32. That is what's |
124 | // returned by convertTypeForLoadStore. However, that type does not |
125 | // always satisfy the size requirement on memory representation types |
126 | // describe above. For example, a 32-bit platform might reasonably set |
127 | // sizeof(_BitInt(65)) == 12, but i96 is likely to have to have an alloc size |
128 | // of 16 bytes in the LLVM data layout. In these cases, we simply return |
129 | // a byte array of the appropriate size. |
130 | if (T->isBitIntType()) { |
131 | if (typeRequiresSplitIntoByteArray(ASTTy: T, LLVMTy: R)) |
132 | return llvm::ArrayType::get(ElementType: CGM.Int8Ty, |
133 | NumElements: Context.getTypeSizeInChars(T).getQuantity()); |
134 | return llvm::IntegerType::get(C&: getLLVMContext(), |
135 | NumBits: (unsigned)Context.getTypeSize(T)); |
136 | } |
137 | |
138 | if (R->isIntegerTy(Bitwidth: 1)) |
139 | return llvm::IntegerType::get(C&: getLLVMContext(), |
140 | NumBits: (unsigned)Context.getTypeSize(T)); |
141 | |
142 | // Else, don't map it. |
143 | return R; |
144 | } |
145 | |
146 | bool CodeGenTypes::typeRequiresSplitIntoByteArray(QualType ASTTy, |
147 | llvm::Type *LLVMTy) { |
148 | if (!LLVMTy) |
149 | LLVMTy = ConvertType(T: ASTTy); |
150 | |
151 | CharUnits ASTSize = Context.getTypeSizeInChars(T: ASTTy); |
152 | CharUnits LLVMSize = |
153 | CharUnits::fromQuantity(Quantity: getDataLayout().getTypeAllocSize(Ty: LLVMTy)); |
154 | return ASTSize != LLVMSize; |
155 | } |
156 | |
157 | llvm::Type *CodeGenTypes::convertTypeForLoadStore(QualType T, |
158 | llvm::Type *LLVMTy) { |
159 | if (!LLVMTy) |
160 | LLVMTy = ConvertType(T); |
161 | |
162 | if (T->isBitIntType()) |
163 | return llvm::Type::getIntNTy( |
164 | C&: getLLVMContext(), N: Context.getTypeSizeInChars(T).getQuantity() * 8); |
165 | |
166 | if (LLVMTy->isIntegerTy(Bitwidth: 1)) |
167 | return llvm::IntegerType::get(C&: getLLVMContext(), |
168 | NumBits: (unsigned)Context.getTypeSize(T)); |
169 | |
170 | if (T->isExtVectorBoolType()) |
171 | return ConvertTypeForMem(T); |
172 | |
173 | return LLVMTy; |
174 | } |
175 | |
176 | /// isRecordLayoutComplete - Return true if the specified type is already |
177 | /// completely laid out. |
178 | bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const { |
179 | llvm::DenseMap<const Type*, llvm::StructType *>::const_iterator I = |
180 | RecordDeclTypes.find(Val: Ty); |
181 | return I != RecordDeclTypes.end() && !I->second->isOpaque(); |
182 | } |
183 | |
184 | /// isFuncParamTypeConvertible - Return true if the specified type in a |
185 | /// function parameter or result position can be converted to an IR type at this |
186 | /// point. This boils down to being whether it is complete. |
187 | bool CodeGenTypes::isFuncParamTypeConvertible(QualType Ty) { |
188 | // Some ABIs cannot have their member pointers represented in IR unless |
189 | // certain circumstances have been reached. |
190 | if (const auto *MPT = Ty->getAs<MemberPointerType>()) |
191 | return getCXXABI().isMemberPointerConvertible(MPT); |
192 | |
193 | // If this isn't a tagged type, we can convert it! |
194 | const TagType *TT = Ty->getAs<TagType>(); |
195 | if (!TT) return true; |
196 | |
197 | // Incomplete types cannot be converted. |
198 | return !TT->isIncompleteType(); |
199 | } |
200 | |
201 | |
202 | /// Code to verify a given function type is complete, i.e. the return type |
203 | /// and all of the parameter types are complete. Also check to see if we are in |
204 | /// a RS_StructPointer context, and if so whether any struct types have been |
205 | /// pended. If so, we don't want to ask the ABI lowering code to handle a type |
206 | /// that cannot be converted to an IR type. |
207 | bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) { |
208 | if (!isFuncParamTypeConvertible(Ty: FT->getReturnType())) |
209 | return false; |
210 | |
211 | if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(Val: FT)) |
212 | for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++) |
213 | if (!isFuncParamTypeConvertible(Ty: FPT->getParamType(i))) |
214 | return false; |
215 | |
216 | return true; |
217 | } |
218 | |
219 | /// UpdateCompletedType - When we find the full definition for a TagDecl, |
220 | /// replace the 'opaque' type we previously made for it if applicable. |
221 | void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) { |
222 | // If this is an enum being completed, then we flush all non-struct types from |
223 | // the cache. This allows function types and other things that may be derived |
224 | // from the enum to be recomputed. |
225 | if (const EnumDecl *ED = dyn_cast<EnumDecl>(Val: TD)) { |
226 | // Only flush the cache if we've actually already converted this type. |
227 | if (TypeCache.count(Val: ED->getTypeForDecl())) { |
228 | // Okay, we formed some types based on this. We speculated that the enum |
229 | // would be lowered to i32, so we only need to flush the cache if this |
230 | // didn't happen. |
231 | if (!ConvertType(T: ED->getIntegerType())->isIntegerTy(Bitwidth: 32)) |
232 | TypeCache.clear(); |
233 | } |
234 | // If necessary, provide the full definition of a type only used with a |
235 | // declaration so far. |
236 | if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) |
237 | DI->completeType(ED); |
238 | return; |
239 | } |
240 | |
241 | // If we completed a RecordDecl that we previously used and converted to an |
242 | // anonymous type, then go ahead and complete it now. |
243 | const RecordDecl *RD = cast<RecordDecl>(Val: TD); |
244 | if (RD->isDependentType()) return; |
245 | |
246 | // Only complete it if we converted it already. If we haven't converted it |
247 | // yet, we'll just do it lazily. |
248 | if (RecordDeclTypes.count(Val: Context.getTagDeclType(Decl: RD).getTypePtr())) |
249 | ConvertRecordDeclType(TD: RD); |
250 | |
251 | // If necessary, provide the full definition of a type only used with a |
252 | // declaration so far. |
253 | if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) |
254 | DI->completeType(RD); |
255 | } |
256 | |
257 | void CodeGenTypes::RefreshTypeCacheForClass(const CXXRecordDecl *RD) { |
258 | QualType T = Context.getRecordType(Decl: RD); |
259 | T = Context.getCanonicalType(T); |
260 | |
261 | const Type *Ty = T.getTypePtr(); |
262 | if (RecordsWithOpaqueMemberPointers.count(Val: Ty)) { |
263 | TypeCache.clear(); |
264 | RecordsWithOpaqueMemberPointers.clear(); |
265 | } |
266 | } |
267 | |
268 | static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext, |
269 | const llvm::fltSemantics &format, |
270 | bool UseNativeHalf = false) { |
271 | if (&format == &llvm::APFloat::IEEEhalf()) { |
272 | if (UseNativeHalf) |
273 | return llvm::Type::getHalfTy(C&: VMContext); |
274 | else |
275 | return llvm::Type::getInt16Ty(C&: VMContext); |
276 | } |
277 | if (&format == &llvm::APFloat::BFloat()) |
278 | return llvm::Type::getBFloatTy(C&: VMContext); |
279 | if (&format == &llvm::APFloat::IEEEsingle()) |
280 | return llvm::Type::getFloatTy(C&: VMContext); |
281 | if (&format == &llvm::APFloat::IEEEdouble()) |
282 | return llvm::Type::getDoubleTy(C&: VMContext); |
283 | if (&format == &llvm::APFloat::IEEEquad()) |
284 | return llvm::Type::getFP128Ty(C&: VMContext); |
285 | if (&format == &llvm::APFloat::PPCDoubleDouble()) |
286 | return llvm::Type::getPPC_FP128Ty(C&: VMContext); |
287 | if (&format == &llvm::APFloat::x87DoubleExtended()) |
288 | return llvm::Type::getX86_FP80Ty(C&: VMContext); |
289 | llvm_unreachable("Unknown float format!" ); |
290 | } |
291 | |
292 | llvm::Type *CodeGenTypes::ConvertFunctionTypeInternal(QualType QFT) { |
293 | assert(QFT.isCanonical()); |
294 | const FunctionType *FT = cast<FunctionType>(Val: QFT.getTypePtr()); |
295 | // First, check whether we can build the full function type. If the |
296 | // function type depends on an incomplete type (e.g. a struct or enum), we |
297 | // cannot lower the function type. |
298 | if (!isFuncTypeConvertible(FT)) { |
299 | // This function's type depends on an incomplete tag type. |
300 | |
301 | // Force conversion of all the relevant record types, to make sure |
302 | // we re-convert the FunctionType when appropriate. |
303 | if (const RecordType *RT = FT->getReturnType()->getAs<RecordType>()) |
304 | ConvertRecordDeclType(TD: RT->getDecl()); |
305 | if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(Val: FT)) |
306 | for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++) |
307 | if (const RecordType *RT = FPT->getParamType(i)->getAs<RecordType>()) |
308 | ConvertRecordDeclType(TD: RT->getDecl()); |
309 | |
310 | SkippedLayout = true; |
311 | |
312 | // Return a placeholder type. |
313 | return llvm::StructType::get(Context&: getLLVMContext()); |
314 | } |
315 | |
316 | // The function type can be built; call the appropriate routines to |
317 | // build it. |
318 | const CGFunctionInfo *FI; |
319 | if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(Val: FT)) { |
320 | FI = &arrangeFreeFunctionType( |
321 | Ty: CanQual<FunctionProtoType>::CreateUnsafe(Other: QualType(FPT, 0))); |
322 | } else { |
323 | const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(Val: FT); |
324 | FI = &arrangeFreeFunctionType( |
325 | Ty: CanQual<FunctionNoProtoType>::CreateUnsafe(Other: QualType(FNPT, 0))); |
326 | } |
327 | |
328 | llvm::Type *ResultType = nullptr; |
329 | // If there is something higher level prodding our CGFunctionInfo, then |
330 | // don't recurse into it again. |
331 | if (FunctionsBeingProcessed.count(Ptr: FI)) { |
332 | |
333 | ResultType = llvm::StructType::get(Context&: getLLVMContext()); |
334 | SkippedLayout = true; |
335 | } else { |
336 | |
337 | // Otherwise, we're good to go, go ahead and convert it. |
338 | ResultType = GetFunctionType(Info: *FI); |
339 | } |
340 | |
341 | return ResultType; |
342 | } |
343 | |
344 | /// ConvertType - Convert the specified type to its LLVM form. |
345 | llvm::Type *CodeGenTypes::ConvertType(QualType T) { |
346 | T = Context.getCanonicalType(T); |
347 | |
348 | const Type *Ty = T.getTypePtr(); |
349 | |
350 | // For the device-side compilation, CUDA device builtin surface/texture types |
351 | // may be represented in different types. |
352 | if (Context.getLangOpts().CUDAIsDevice) { |
353 | if (T->isCUDADeviceBuiltinSurfaceType()) { |
354 | if (auto *Ty = CGM.getTargetCodeGenInfo() |
355 | .getCUDADeviceBuiltinSurfaceDeviceType()) |
356 | return Ty; |
357 | } else if (T->isCUDADeviceBuiltinTextureType()) { |
358 | if (auto *Ty = CGM.getTargetCodeGenInfo() |
359 | .getCUDADeviceBuiltinTextureDeviceType()) |
360 | return Ty; |
361 | } |
362 | } |
363 | |
364 | // RecordTypes are cached and processed specially. |
365 | if (const RecordType *RT = dyn_cast<RecordType>(Val: Ty)) |
366 | return ConvertRecordDeclType(TD: RT->getDecl()); |
367 | |
368 | llvm::Type *CachedType = nullptr; |
369 | auto TCI = TypeCache.find(Val: Ty); |
370 | if (TCI != TypeCache.end()) |
371 | CachedType = TCI->second; |
372 | // With expensive checks, check that the type we compute matches the |
373 | // cached type. |
374 | #ifndef EXPENSIVE_CHECKS |
375 | if (CachedType) |
376 | return CachedType; |
377 | #endif |
378 | |
379 | // If we don't have it in the cache, convert it now. |
380 | llvm::Type *ResultType = nullptr; |
381 | switch (Ty->getTypeClass()) { |
382 | case Type::Record: // Handled above. |
383 | #define TYPE(Class, Base) |
384 | #define ABSTRACT_TYPE(Class, Base) |
385 | #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: |
386 | #define DEPENDENT_TYPE(Class, Base) case Type::Class: |
387 | #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: |
388 | #include "clang/AST/TypeNodes.inc" |
389 | llvm_unreachable("Non-canonical or dependent types aren't possible." ); |
390 | |
391 | case Type::Builtin: { |
392 | switch (cast<BuiltinType>(Val: Ty)->getKind()) { |
393 | case BuiltinType::Void: |
394 | case BuiltinType::ObjCId: |
395 | case BuiltinType::ObjCClass: |
396 | case BuiltinType::ObjCSel: |
397 | // LLVM void type can only be used as the result of a function call. Just |
398 | // map to the same as char. |
399 | ResultType = llvm::Type::getInt8Ty(C&: getLLVMContext()); |
400 | break; |
401 | |
402 | case BuiltinType::Bool: |
403 | // Note that we always return bool as i1 for use as a scalar type. |
404 | ResultType = llvm::Type::getInt1Ty(C&: getLLVMContext()); |
405 | break; |
406 | |
407 | case BuiltinType::Char_S: |
408 | case BuiltinType::Char_U: |
409 | case BuiltinType::SChar: |
410 | case BuiltinType::UChar: |
411 | case BuiltinType::Short: |
412 | case BuiltinType::UShort: |
413 | case BuiltinType::Int: |
414 | case BuiltinType::UInt: |
415 | case BuiltinType::Long: |
416 | case BuiltinType::ULong: |
417 | case BuiltinType::LongLong: |
418 | case BuiltinType::ULongLong: |
419 | case BuiltinType::WChar_S: |
420 | case BuiltinType::WChar_U: |
421 | case BuiltinType::Char8: |
422 | case BuiltinType::Char16: |
423 | case BuiltinType::Char32: |
424 | case BuiltinType::ShortAccum: |
425 | case BuiltinType::Accum: |
426 | case BuiltinType::LongAccum: |
427 | case BuiltinType::UShortAccum: |
428 | case BuiltinType::UAccum: |
429 | case BuiltinType::ULongAccum: |
430 | case BuiltinType::ShortFract: |
431 | case BuiltinType::Fract: |
432 | case BuiltinType::LongFract: |
433 | case BuiltinType::UShortFract: |
434 | case BuiltinType::UFract: |
435 | case BuiltinType::ULongFract: |
436 | case BuiltinType::SatShortAccum: |
437 | case BuiltinType::SatAccum: |
438 | case BuiltinType::SatLongAccum: |
439 | case BuiltinType::SatUShortAccum: |
440 | case BuiltinType::SatUAccum: |
441 | case BuiltinType::SatULongAccum: |
442 | case BuiltinType::SatShortFract: |
443 | case BuiltinType::SatFract: |
444 | case BuiltinType::SatLongFract: |
445 | case BuiltinType::SatUShortFract: |
446 | case BuiltinType::SatUFract: |
447 | case BuiltinType::SatULongFract: |
448 | ResultType = llvm::IntegerType::get(C&: getLLVMContext(), |
449 | NumBits: static_cast<unsigned>(Context.getTypeSize(T))); |
450 | break; |
451 | |
452 | case BuiltinType::Float16: |
453 | ResultType = |
454 | getTypeForFormat(VMContext&: getLLVMContext(), format: Context.getFloatTypeSemantics(T), |
455 | /* UseNativeHalf = */ true); |
456 | break; |
457 | |
458 | case BuiltinType::Half: |
459 | // Half FP can either be storage-only (lowered to i16) or native. |
460 | ResultType = getTypeForFormat( |
461 | VMContext&: getLLVMContext(), format: Context.getFloatTypeSemantics(T), |
462 | UseNativeHalf: Context.getLangOpts().NativeHalfType || |
463 | !Context.getTargetInfo().useFP16ConversionIntrinsics()); |
464 | break; |
465 | case BuiltinType::LongDouble: |
466 | LongDoubleReferenced = true; |
467 | [[fallthrough]]; |
468 | case BuiltinType::BFloat16: |
469 | case BuiltinType::Float: |
470 | case BuiltinType::Double: |
471 | case BuiltinType::Float128: |
472 | case BuiltinType::Ibm128: |
473 | ResultType = getTypeForFormat(VMContext&: getLLVMContext(), |
474 | format: Context.getFloatTypeSemantics(T), |
475 | /* UseNativeHalf = */ false); |
476 | break; |
477 | |
478 | case BuiltinType::NullPtr: |
479 | // Model std::nullptr_t as i8* |
480 | ResultType = llvm::PointerType::getUnqual(C&: getLLVMContext()); |
481 | break; |
482 | |
483 | case BuiltinType::UInt128: |
484 | case BuiltinType::Int128: |
485 | ResultType = llvm::IntegerType::get(C&: getLLVMContext(), NumBits: 128); |
486 | break; |
487 | |
488 | #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ |
489 | case BuiltinType::Id: |
490 | #include "clang/Basic/OpenCLImageTypes.def" |
491 | #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ |
492 | case BuiltinType::Id: |
493 | #include "clang/Basic/OpenCLExtensionTypes.def" |
494 | case BuiltinType::OCLSampler: |
495 | case BuiltinType::OCLEvent: |
496 | case BuiltinType::OCLClkEvent: |
497 | case BuiltinType::OCLQueue: |
498 | case BuiltinType::OCLReserveID: |
499 | ResultType = CGM.getOpenCLRuntime().convertOpenCLSpecificType(T: Ty); |
500 | break; |
501 | case BuiltinType::SveInt8: |
502 | case BuiltinType::SveUint8: |
503 | case BuiltinType::SveInt8x2: |
504 | case BuiltinType::SveUint8x2: |
505 | case BuiltinType::SveInt8x3: |
506 | case BuiltinType::SveUint8x3: |
507 | case BuiltinType::SveInt8x4: |
508 | case BuiltinType::SveUint8x4: |
509 | case BuiltinType::SveInt16: |
510 | case BuiltinType::SveUint16: |
511 | case BuiltinType::SveInt16x2: |
512 | case BuiltinType::SveUint16x2: |
513 | case BuiltinType::SveInt16x3: |
514 | case BuiltinType::SveUint16x3: |
515 | case BuiltinType::SveInt16x4: |
516 | case BuiltinType::SveUint16x4: |
517 | case BuiltinType::SveInt32: |
518 | case BuiltinType::SveUint32: |
519 | case BuiltinType::SveInt32x2: |
520 | case BuiltinType::SveUint32x2: |
521 | case BuiltinType::SveInt32x3: |
522 | case BuiltinType::SveUint32x3: |
523 | case BuiltinType::SveInt32x4: |
524 | case BuiltinType::SveUint32x4: |
525 | case BuiltinType::SveInt64: |
526 | case BuiltinType::SveUint64: |
527 | case BuiltinType::SveInt64x2: |
528 | case BuiltinType::SveUint64x2: |
529 | case BuiltinType::SveInt64x3: |
530 | case BuiltinType::SveUint64x3: |
531 | case BuiltinType::SveInt64x4: |
532 | case BuiltinType::SveUint64x4: |
533 | case BuiltinType::SveBool: |
534 | case BuiltinType::SveBoolx2: |
535 | case BuiltinType::SveBoolx4: |
536 | case BuiltinType::SveFloat16: |
537 | case BuiltinType::SveFloat16x2: |
538 | case BuiltinType::SveFloat16x3: |
539 | case BuiltinType::SveFloat16x4: |
540 | case BuiltinType::SveFloat32: |
541 | case BuiltinType::SveFloat32x2: |
542 | case BuiltinType::SveFloat32x3: |
543 | case BuiltinType::SveFloat32x4: |
544 | case BuiltinType::SveFloat64: |
545 | case BuiltinType::SveFloat64x2: |
546 | case BuiltinType::SveFloat64x3: |
547 | case BuiltinType::SveFloat64x4: |
548 | case BuiltinType::SveBFloat16: |
549 | case BuiltinType::SveBFloat16x2: |
550 | case BuiltinType::SveBFloat16x3: |
551 | case BuiltinType::SveBFloat16x4: { |
552 | ASTContext::BuiltinVectorTypeInfo Info = |
553 | Context.getBuiltinVectorTypeInfo(VecTy: cast<BuiltinType>(Val: Ty)); |
554 | return llvm::ScalableVectorType::get(ElementType: ConvertType(T: Info.ElementType), |
555 | MinNumElts: Info.EC.getKnownMinValue() * |
556 | Info.NumVectors); |
557 | } |
558 | case BuiltinType::SveCount: |
559 | return llvm::TargetExtType::get(Context&: getLLVMContext(), Name: "aarch64.svcount" ); |
560 | #define PPC_VECTOR_TYPE(Name, Id, Size) \ |
561 | case BuiltinType::Id: \ |
562 | ResultType = \ |
563 | llvm::FixedVectorType::get(ConvertType(Context.BoolTy), Size); \ |
564 | break; |
565 | #include "clang/Basic/PPCTypes.def" |
566 | #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: |
567 | #include "clang/Basic/RISCVVTypes.def" |
568 | { |
569 | ASTContext::BuiltinVectorTypeInfo Info = |
570 | Context.getBuiltinVectorTypeInfo(VecTy: cast<BuiltinType>(Val: Ty)); |
571 | // Tuple types are expressed as aggregregate types of the same scalable |
572 | // vector type (e.g. vint32m1x2_t is two vint32m1_t, which is {<vscale x |
573 | // 2 x i32>, <vscale x 2 x i32>}). |
574 | if (Info.NumVectors != 1) { |
575 | llvm::Type *EltTy = llvm::ScalableVectorType::get( |
576 | ElementType: ConvertType(T: Info.ElementType), MinNumElts: Info.EC.getKnownMinValue()); |
577 | llvm::SmallVector<llvm::Type *, 4> EltTys(Info.NumVectors, EltTy); |
578 | return llvm::StructType::get(Context&: getLLVMContext(), Elements: EltTys); |
579 | } |
580 | return llvm::ScalableVectorType::get(ElementType: ConvertType(T: Info.ElementType), |
581 | MinNumElts: Info.EC.getKnownMinValue()); |
582 | } |
583 | #define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \ |
584 | case BuiltinType::Id: { \ |
585 | if (BuiltinType::Id == BuiltinType::WasmExternRef) \ |
586 | ResultType = CGM.getTargetCodeGenInfo().getWasmExternrefReferenceType(); \ |
587 | else \ |
588 | llvm_unreachable("Unexpected wasm reference builtin type!"); \ |
589 | } break; |
590 | #include "clang/Basic/WebAssemblyReferenceTypes.def" |
591 | #define AMDGPU_OPAQUE_PTR_TYPE(Name, MangledName, AS, Width, Align, Id, \ |
592 | SingletonId) \ |
593 | case BuiltinType::Id: \ |
594 | return llvm::PointerType::get(getLLVMContext(), AS); |
595 | #include "clang/Basic/AMDGPUTypes.def" |
596 | case BuiltinType::Dependent: |
597 | #define BUILTIN_TYPE(Id, SingletonId) |
598 | #define PLACEHOLDER_TYPE(Id, SingletonId) \ |
599 | case BuiltinType::Id: |
600 | #include "clang/AST/BuiltinTypes.def" |
601 | llvm_unreachable("Unexpected placeholder builtin type!" ); |
602 | } |
603 | break; |
604 | } |
605 | case Type::Auto: |
606 | case Type::DeducedTemplateSpecialization: |
607 | llvm_unreachable("Unexpected undeduced type!" ); |
608 | case Type::Complex: { |
609 | llvm::Type *EltTy = ConvertType(T: cast<ComplexType>(Val: Ty)->getElementType()); |
610 | ResultType = llvm::StructType::get(elt1: EltTy, elts: EltTy); |
611 | break; |
612 | } |
613 | case Type::LValueReference: |
614 | case Type::RValueReference: { |
615 | const ReferenceType *RTy = cast<ReferenceType>(Val: Ty); |
616 | QualType ETy = RTy->getPointeeType(); |
617 | unsigned AS = getTargetAddressSpace(T: ETy); |
618 | ResultType = llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: AS); |
619 | break; |
620 | } |
621 | case Type::Pointer: { |
622 | const PointerType *PTy = cast<PointerType>(Val: Ty); |
623 | QualType ETy = PTy->getPointeeType(); |
624 | unsigned AS = getTargetAddressSpace(T: ETy); |
625 | ResultType = llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: AS); |
626 | break; |
627 | } |
628 | |
629 | case Type::VariableArray: { |
630 | const VariableArrayType *A = cast<VariableArrayType>(Val: Ty); |
631 | assert(A->getIndexTypeCVRQualifiers() == 0 && |
632 | "FIXME: We only handle trivial array types so far!" ); |
633 | // VLAs resolve to the innermost element type; this matches |
634 | // the return of alloca, and there isn't any obviously better choice. |
635 | ResultType = ConvertTypeForMem(T: A->getElementType()); |
636 | break; |
637 | } |
638 | case Type::IncompleteArray: { |
639 | const IncompleteArrayType *A = cast<IncompleteArrayType>(Val: Ty); |
640 | assert(A->getIndexTypeCVRQualifiers() == 0 && |
641 | "FIXME: We only handle trivial array types so far!" ); |
642 | // int X[] -> [0 x int], unless the element type is not sized. If it is |
643 | // unsized (e.g. an incomplete struct) just use [0 x i8]. |
644 | ResultType = ConvertTypeForMem(T: A->getElementType()); |
645 | if (!ResultType->isSized()) { |
646 | SkippedLayout = true; |
647 | ResultType = llvm::Type::getInt8Ty(C&: getLLVMContext()); |
648 | } |
649 | ResultType = llvm::ArrayType::get(ElementType: ResultType, NumElements: 0); |
650 | break; |
651 | } |
652 | case Type::ArrayParameter: |
653 | case Type::ConstantArray: { |
654 | const ConstantArrayType *A = cast<ConstantArrayType>(Val: Ty); |
655 | llvm::Type *EltTy = ConvertTypeForMem(T: A->getElementType()); |
656 | |
657 | // Lower arrays of undefined struct type to arrays of i8 just to have a |
658 | // concrete type. |
659 | if (!EltTy->isSized()) { |
660 | SkippedLayout = true; |
661 | EltTy = llvm::Type::getInt8Ty(C&: getLLVMContext()); |
662 | } |
663 | |
664 | ResultType = llvm::ArrayType::get(ElementType: EltTy, NumElements: A->getZExtSize()); |
665 | break; |
666 | } |
667 | case Type::ExtVector: |
668 | case Type::Vector: { |
669 | const auto *VT = cast<VectorType>(Val: Ty); |
670 | // An ext_vector_type of Bool is really a vector of bits. |
671 | llvm::Type *IRElemTy = VT->isExtVectorBoolType() |
672 | ? llvm::Type::getInt1Ty(C&: getLLVMContext()) |
673 | : ConvertType(T: VT->getElementType()); |
674 | ResultType = llvm::FixedVectorType::get(ElementType: IRElemTy, NumElts: VT->getNumElements()); |
675 | break; |
676 | } |
677 | case Type::ConstantMatrix: { |
678 | const ConstantMatrixType *MT = cast<ConstantMatrixType>(Val: Ty); |
679 | ResultType = |
680 | llvm::FixedVectorType::get(ElementType: ConvertType(T: MT->getElementType()), |
681 | NumElts: MT->getNumRows() * MT->getNumColumns()); |
682 | break; |
683 | } |
684 | case Type::FunctionNoProto: |
685 | case Type::FunctionProto: |
686 | ResultType = ConvertFunctionTypeInternal(QFT: T); |
687 | break; |
688 | case Type::ObjCObject: |
689 | ResultType = ConvertType(T: cast<ObjCObjectType>(Val: Ty)->getBaseType()); |
690 | break; |
691 | |
692 | case Type::ObjCInterface: { |
693 | // Objective-C interfaces are always opaque (outside of the |
694 | // runtime, which can do whatever it likes); we never refine |
695 | // these. |
696 | llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Val: Ty)]; |
697 | if (!T) |
698 | T = llvm::StructType::create(Context&: getLLVMContext()); |
699 | ResultType = T; |
700 | break; |
701 | } |
702 | |
703 | case Type::ObjCObjectPointer: |
704 | ResultType = llvm::PointerType::getUnqual(C&: getLLVMContext()); |
705 | break; |
706 | |
707 | case Type::Enum: { |
708 | const EnumDecl *ED = cast<EnumType>(Val: Ty)->getDecl(); |
709 | if (ED->isCompleteDefinition() || ED->isFixed()) |
710 | return ConvertType(T: ED->getIntegerType()); |
711 | // Return a placeholder 'i32' type. This can be changed later when the |
712 | // type is defined (see UpdateCompletedType), but is likely to be the |
713 | // "right" answer. |
714 | ResultType = llvm::Type::getInt32Ty(C&: getLLVMContext()); |
715 | break; |
716 | } |
717 | |
718 | case Type::BlockPointer: { |
719 | // Block pointers lower to function type. For function type, |
720 | // getTargetAddressSpace() returns default address space for |
721 | // function pointer i.e. program address space. Therefore, for block |
722 | // pointers, it is important to pass the pointee AST address space when |
723 | // calling getTargetAddressSpace(), to ensure that we get the LLVM IR |
724 | // address space for data pointers and not function pointers. |
725 | const QualType FTy = cast<BlockPointerType>(Val: Ty)->getPointeeType(); |
726 | unsigned AS = Context.getTargetAddressSpace(AS: FTy.getAddressSpace()); |
727 | ResultType = llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: AS); |
728 | break; |
729 | } |
730 | |
731 | case Type::MemberPointer: { |
732 | auto *MPTy = cast<MemberPointerType>(Val: Ty); |
733 | if (!getCXXABI().isMemberPointerConvertible(MPT: MPTy)) { |
734 | auto *C = MPTy->getClass(); |
735 | auto Insertion = RecordsWithOpaqueMemberPointers.insert(KV: {C, nullptr}); |
736 | if (Insertion.second) |
737 | Insertion.first->second = llvm::StructType::create(Context&: getLLVMContext()); |
738 | ResultType = Insertion.first->second; |
739 | } else { |
740 | ResultType = getCXXABI().ConvertMemberPointerType(MPT: MPTy); |
741 | } |
742 | break; |
743 | } |
744 | |
745 | case Type::Atomic: { |
746 | QualType valueType = cast<AtomicType>(Val: Ty)->getValueType(); |
747 | ResultType = ConvertTypeForMem(T: valueType); |
748 | |
749 | // Pad out to the inflated size if necessary. |
750 | uint64_t valueSize = Context.getTypeSize(T: valueType); |
751 | uint64_t atomicSize = Context.getTypeSize(T: Ty); |
752 | if (valueSize != atomicSize) { |
753 | assert(valueSize < atomicSize); |
754 | llvm::Type *elts[] = { |
755 | ResultType, |
756 | llvm::ArrayType::get(ElementType: CGM.Int8Ty, NumElements: (atomicSize - valueSize) / 8) |
757 | }; |
758 | ResultType = |
759 | llvm::StructType::get(Context&: getLLVMContext(), Elements: llvm::ArrayRef(elts)); |
760 | } |
761 | break; |
762 | } |
763 | case Type::Pipe: { |
764 | ResultType = CGM.getOpenCLRuntime().getPipeType(T: cast<PipeType>(Val: Ty)); |
765 | break; |
766 | } |
767 | case Type::BitInt: { |
768 | const auto &EIT = cast<BitIntType>(Val: Ty); |
769 | ResultType = llvm::Type::getIntNTy(C&: getLLVMContext(), N: EIT->getNumBits()); |
770 | break; |
771 | } |
772 | } |
773 | |
774 | assert(ResultType && "Didn't convert a type?" ); |
775 | assert((!CachedType || CachedType == ResultType) && |
776 | "Cached type doesn't match computed type" ); |
777 | |
778 | TypeCache[Ty] = ResultType; |
779 | return ResultType; |
780 | } |
781 | |
782 | bool CodeGenModule::isPaddedAtomicType(QualType type) { |
783 | return isPaddedAtomicType(type: type->castAs<AtomicType>()); |
784 | } |
785 | |
786 | bool CodeGenModule::isPaddedAtomicType(const AtomicType *type) { |
787 | return Context.getTypeSize(T: type) != Context.getTypeSize(T: type->getValueType()); |
788 | } |
789 | |
790 | /// ConvertRecordDeclType - Lay out a tagged decl type like struct or union. |
791 | llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) { |
792 | // TagDecl's are not necessarily unique, instead use the (clang) |
793 | // type connected to the decl. |
794 | const Type *Key = Context.getTagDeclType(Decl: RD).getTypePtr(); |
795 | |
796 | llvm::StructType *&Entry = RecordDeclTypes[Key]; |
797 | |
798 | // If we don't have a StructType at all yet, create the forward declaration. |
799 | if (!Entry) { |
800 | Entry = llvm::StructType::create(Context&: getLLVMContext()); |
801 | addRecordTypeName(RD, Ty: Entry, suffix: "" ); |
802 | } |
803 | llvm::StructType *Ty = Entry; |
804 | |
805 | // If this is still a forward declaration, or the LLVM type is already |
806 | // complete, there's nothing more to do. |
807 | RD = RD->getDefinition(); |
808 | if (!RD || !RD->isCompleteDefinition() || !Ty->isOpaque()) |
809 | return Ty; |
810 | |
811 | // Force conversion of non-virtual base classes recursively. |
812 | if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(Val: RD)) { |
813 | for (const auto &I : CRD->bases()) { |
814 | if (I.isVirtual()) continue; |
815 | ConvertRecordDeclType(RD: I.getType()->castAs<RecordType>()->getDecl()); |
816 | } |
817 | } |
818 | |
819 | // Layout fields. |
820 | std::unique_ptr<CGRecordLayout> Layout = ComputeRecordLayout(D: RD, Ty); |
821 | CGRecordLayouts[Key] = std::move(Layout); |
822 | |
823 | // If this struct blocked a FunctionType conversion, then recompute whatever |
824 | // was derived from that. |
825 | // FIXME: This is hugely overconservative. |
826 | if (SkippedLayout) |
827 | TypeCache.clear(); |
828 | |
829 | return Ty; |
830 | } |
831 | |
832 | /// getCGRecordLayout - Return record layout info for the given record decl. |
833 | const CGRecordLayout & |
834 | CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) { |
835 | const Type *Key = Context.getTagDeclType(Decl: RD).getTypePtr(); |
836 | |
837 | auto I = CGRecordLayouts.find(Val: Key); |
838 | if (I != CGRecordLayouts.end()) |
839 | return *I->second; |
840 | // Compute the type information. |
841 | ConvertRecordDeclType(RD); |
842 | |
843 | // Now try again. |
844 | I = CGRecordLayouts.find(Val: Key); |
845 | |
846 | assert(I != CGRecordLayouts.end() && |
847 | "Unable to find record layout information for type" ); |
848 | return *I->second; |
849 | } |
850 | |
851 | bool CodeGenTypes::isPointerZeroInitializable(QualType T) { |
852 | assert((T->isAnyPointerType() || T->isBlockPointerType()) && "Invalid type" ); |
853 | return isZeroInitializable(T); |
854 | } |
855 | |
856 | bool CodeGenTypes::isZeroInitializable(QualType T) { |
857 | if (T->getAs<PointerType>()) |
858 | return Context.getTargetNullPointerValue(QT: T) == 0; |
859 | |
860 | if (const auto *AT = Context.getAsArrayType(T)) { |
861 | if (isa<IncompleteArrayType>(Val: AT)) |
862 | return true; |
863 | if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: AT)) |
864 | if (Context.getConstantArrayElementCount(CA: CAT) == 0) |
865 | return true; |
866 | T = Context.getBaseElementType(QT: T); |
867 | } |
868 | |
869 | // Records are non-zero-initializable if they contain any |
870 | // non-zero-initializable subobjects. |
871 | if (const RecordType *RT = T->getAs<RecordType>()) { |
872 | const RecordDecl *RD = RT->getDecl(); |
873 | return isZeroInitializable(RD); |
874 | } |
875 | |
876 | // We have to ask the ABI about member pointers. |
877 | if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) |
878 | return getCXXABI().isZeroInitializable(MPT); |
879 | |
880 | // Everything else is okay. |
881 | return true; |
882 | } |
883 | |
884 | bool CodeGenTypes::isZeroInitializable(const RecordDecl *RD) { |
885 | return getCGRecordLayout(RD).isZeroInitializable(); |
886 | } |
887 | |
888 | unsigned CodeGenTypes::getTargetAddressSpace(QualType T) const { |
889 | // Return the address space for the type. If the type is a |
890 | // function type without an address space qualifier, the |
891 | // program address space is used. Otherwise, the target picks |
892 | // the best address space based on the type information |
893 | return T->isFunctionType() && !T.hasAddressSpace() |
894 | ? getDataLayout().getProgramAddressSpace() |
895 | : getContext().getTargetAddressSpace(AS: T.getAddressSpace()); |
896 | } |
897 | |