1//===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the ASTContext interface.
10//
11//===----------------------------------------------------------------------===//
12
13#include "clang/AST/ASTContext.h"
14#include "ByteCode/Context.h"
15#include "CXXABI.h"
16#include "clang/AST/APValue.h"
17#include "clang/AST/ASTConcept.h"
18#include "clang/AST/ASTMutationListener.h"
19#include "clang/AST/ASTStructuralEquivalence.h"
20#include "clang/AST/ASTTypeTraits.h"
21#include "clang/AST/Attr.h"
22#include "clang/AST/AttrIterator.h"
23#include "clang/AST/CharUnits.h"
24#include "clang/AST/Comment.h"
25#include "clang/AST/Decl.h"
26#include "clang/AST/DeclBase.h"
27#include "clang/AST/DeclCXX.h"
28#include "clang/AST/DeclContextInternals.h"
29#include "clang/AST/DeclObjC.h"
30#include "clang/AST/DeclOpenMP.h"
31#include "clang/AST/DeclTemplate.h"
32#include "clang/AST/DeclarationName.h"
33#include "clang/AST/DependenceFlags.h"
34#include "clang/AST/Expr.h"
35#include "clang/AST/ExprCXX.h"
36#include "clang/AST/ExternalASTSource.h"
37#include "clang/AST/Mangle.h"
38#include "clang/AST/MangleNumberingContext.h"
39#include "clang/AST/NestedNameSpecifier.h"
40#include "clang/AST/ParentMapContext.h"
41#include "clang/AST/RawCommentList.h"
42#include "clang/AST/RecordLayout.h"
43#include "clang/AST/Stmt.h"
44#include "clang/AST/TemplateBase.h"
45#include "clang/AST/TemplateName.h"
46#include "clang/AST/Type.h"
47#include "clang/AST/TypeLoc.h"
48#include "clang/AST/UnresolvedSet.h"
49#include "clang/AST/VTableBuilder.h"
50#include "clang/Basic/AddressSpaces.h"
51#include "clang/Basic/Builtins.h"
52#include "clang/Basic/CommentOptions.h"
53#include "clang/Basic/ExceptionSpecificationType.h"
54#include "clang/Basic/IdentifierTable.h"
55#include "clang/Basic/LLVM.h"
56#include "clang/Basic/LangOptions.h"
57#include "clang/Basic/Linkage.h"
58#include "clang/Basic/Module.h"
59#include "clang/Basic/NoSanitizeList.h"
60#include "clang/Basic/ObjCRuntime.h"
61#include "clang/Basic/ProfileList.h"
62#include "clang/Basic/SourceLocation.h"
63#include "clang/Basic/SourceManager.h"
64#include "clang/Basic/Specifiers.h"
65#include "clang/Basic/TargetCXXABI.h"
66#include "clang/Basic/TargetInfo.h"
67#include "clang/Basic/XRayLists.h"
68#include "llvm/ADT/APFixedPoint.h"
69#include "llvm/ADT/APInt.h"
70#include "llvm/ADT/APSInt.h"
71#include "llvm/ADT/ArrayRef.h"
72#include "llvm/ADT/DenseMap.h"
73#include "llvm/ADT/DenseSet.h"
74#include "llvm/ADT/FoldingSet.h"
75#include "llvm/ADT/PointerUnion.h"
76#include "llvm/ADT/STLExtras.h"
77#include "llvm/ADT/SmallPtrSet.h"
78#include "llvm/ADT/SmallVector.h"
79#include "llvm/ADT/StringExtras.h"
80#include "llvm/ADT/StringRef.h"
81#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
82#include "llvm/Support/Capacity.h"
83#include "llvm/Support/Compiler.h"
84#include "llvm/Support/ErrorHandling.h"
85#include "llvm/Support/MD5.h"
86#include "llvm/Support/MathExtras.h"
87#include "llvm/Support/SipHash.h"
88#include "llvm/Support/raw_ostream.h"
89#include "llvm/TargetParser/AArch64TargetParser.h"
90#include "llvm/TargetParser/Triple.h"
91#include <algorithm>
92#include <cassert>
93#include <cstddef>
94#include <cstdint>
95#include <cstdlib>
96#include <map>
97#include <memory>
98#include <optional>
99#include <string>
100#include <tuple>
101#include <utility>
102
103using namespace clang;
104
105enum FloatingRank {
106 BFloat16Rank,
107 Float16Rank,
108 HalfRank,
109 FloatRank,
110 DoubleRank,
111 LongDoubleRank,
112 Float128Rank,
113 Ibm128Rank
114};
115
116template <> struct llvm::DenseMapInfo<llvm::FoldingSetNodeID> {
117 static FoldingSetNodeID getEmptyKey() { return FoldingSetNodeID{}; }
118
119 static FoldingSetNodeID getTombstoneKey() {
120 FoldingSetNodeID id;
121 for (size_t i = 0; i < sizeof(id) / sizeof(unsigned); ++i) {
122 id.AddInteger(I: std::numeric_limits<unsigned>::max());
123 }
124 return id;
125 }
126
127 static unsigned getHashValue(const FoldingSetNodeID &Val) {
128 return Val.ComputeHash();
129 }
130
131 static bool isEqual(const FoldingSetNodeID &LHS,
132 const FoldingSetNodeID &RHS) {
133 return LHS == RHS;
134 }
135};
136
137/// \returns The locations that are relevant when searching for Doc comments
138/// related to \p D.
139static SmallVector<SourceLocation, 2>
140getDeclLocsForCommentSearch(const Decl *D, SourceManager &SourceMgr) {
141 assert(D);
142
143 // User can not attach documentation to implicit declarations.
144 if (D->isImplicit())
145 return {};
146
147 // User can not attach documentation to implicit instantiations.
148 if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
149 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
150 return {};
151 }
152
153 if (const auto *VD = dyn_cast<VarDecl>(Val: D)) {
154 if (VD->isStaticDataMember() &&
155 VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
156 return {};
157 }
158
159 if (const auto *CRD = dyn_cast<CXXRecordDecl>(Val: D)) {
160 if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
161 return {};
162 }
163
164 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(Val: D)) {
165 TemplateSpecializationKind TSK = CTSD->getSpecializationKind();
166 if (TSK == TSK_ImplicitInstantiation ||
167 TSK == TSK_Undeclared)
168 return {};
169 }
170
171 if (const auto *ED = dyn_cast<EnumDecl>(Val: D)) {
172 if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
173 return {};
174 }
175 if (const auto *TD = dyn_cast<TagDecl>(Val: D)) {
176 // When tag declaration (but not definition!) is part of the
177 // decl-specifier-seq of some other declaration, it doesn't get comment
178 if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition())
179 return {};
180 }
181 // TODO: handle comments for function parameters properly.
182 if (isa<ParmVarDecl>(Val: D))
183 return {};
184
185 // TODO: we could look up template parameter documentation in the template
186 // documentation.
187 if (isa<TemplateTypeParmDecl>(Val: D) ||
188 isa<NonTypeTemplateParmDecl>(Val: D) ||
189 isa<TemplateTemplateParmDecl>(Val: D))
190 return {};
191
192 SmallVector<SourceLocation, 2> Locations;
193 // Find declaration location.
194 // For Objective-C declarations we generally don't expect to have multiple
195 // declarators, thus use declaration starting location as the "declaration
196 // location".
197 // For all other declarations multiple declarators are used quite frequently,
198 // so we use the location of the identifier as the "declaration location".
199 SourceLocation BaseLocation;
200 if (isa<ObjCMethodDecl>(Val: D) || isa<ObjCContainerDecl>(Val: D) ||
201 isa<ObjCPropertyDecl>(Val: D) || isa<RedeclarableTemplateDecl>(Val: D) ||
202 isa<ClassTemplateSpecializationDecl>(Val: D) ||
203 // Allow association with Y across {} in `typedef struct X {} Y`.
204 isa<TypedefDecl>(Val: D))
205 BaseLocation = D->getBeginLoc();
206 else
207 BaseLocation = D->getLocation();
208
209 if (!D->getLocation().isMacroID()) {
210 Locations.emplace_back(Args&: BaseLocation);
211 } else {
212 const auto *DeclCtx = D->getDeclContext();
213
214 // When encountering definitions generated from a macro (that are not
215 // contained by another declaration in the macro) we need to try and find
216 // the comment at the location of the expansion but if there is no comment
217 // there we should retry to see if there is a comment inside the macro as
218 // well. To this end we return first BaseLocation to first look at the
219 // expansion site, the second value is the spelling location of the
220 // beginning of the declaration defined inside the macro.
221 if (!(DeclCtx &&
222 Decl::castFromDeclContext(DeclCtx)->getLocation().isMacroID())) {
223 Locations.emplace_back(Args: SourceMgr.getExpansionLoc(Loc: BaseLocation));
224 }
225
226 // We use Decl::getBeginLoc() and not just BaseLocation here to ensure that
227 // we don't refer to the macro argument location at the expansion site (this
228 // can happen if the name's spelling is provided via macro argument), and
229 // always to the declaration itself.
230 Locations.emplace_back(Args: SourceMgr.getSpellingLoc(Loc: D->getBeginLoc()));
231 }
232
233 return Locations;
234}
235
236RawComment *ASTContext::getRawCommentForDeclNoCacheImpl(
237 const Decl *D, const SourceLocation RepresentativeLocForDecl,
238 const std::map<unsigned, RawComment *> &CommentsInTheFile) const {
239 // If the declaration doesn't map directly to a location in a file, we
240 // can't find the comment.
241 if (RepresentativeLocForDecl.isInvalid() ||
242 !RepresentativeLocForDecl.isFileID())
243 return nullptr;
244
245 // If there are no comments anywhere, we won't find anything.
246 if (CommentsInTheFile.empty())
247 return nullptr;
248
249 // Decompose the location for the declaration and find the beginning of the
250 // file buffer.
251 const FileIDAndOffset DeclLocDecomp =
252 SourceMgr.getDecomposedLoc(Loc: RepresentativeLocForDecl);
253
254 // Slow path.
255 auto OffsetCommentBehindDecl =
256 CommentsInTheFile.lower_bound(x: DeclLocDecomp.second);
257
258 // First check whether we have a trailing comment.
259 if (OffsetCommentBehindDecl != CommentsInTheFile.end()) {
260 RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second;
261 if ((CommentBehindDecl->isDocumentation() ||
262 LangOpts.CommentOpts.ParseAllComments) &&
263 CommentBehindDecl->isTrailingComment() &&
264 (isa<FieldDecl>(Val: D) || isa<EnumConstantDecl>(Val: D) || isa<VarDecl>(Val: D) ||
265 isa<ObjCMethodDecl>(Val: D) || isa<ObjCPropertyDecl>(Val: D))) {
266
267 // Check that Doxygen trailing comment comes after the declaration, starts
268 // on the same line and in the same file as the declaration.
269 if (SourceMgr.getLineNumber(FID: DeclLocDecomp.first, FilePos: DeclLocDecomp.second) ==
270 Comments.getCommentBeginLine(C: CommentBehindDecl, File: DeclLocDecomp.first,
271 Offset: OffsetCommentBehindDecl->first)) {
272 return CommentBehindDecl;
273 }
274 }
275 }
276
277 // The comment just after the declaration was not a trailing comment.
278 // Let's look at the previous comment.
279 if (OffsetCommentBehindDecl == CommentsInTheFile.begin())
280 return nullptr;
281
282 auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl;
283 RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second;
284
285 // Check that we actually have a non-member Doxygen comment.
286 if (!(CommentBeforeDecl->isDocumentation() ||
287 LangOpts.CommentOpts.ParseAllComments) ||
288 CommentBeforeDecl->isTrailingComment())
289 return nullptr;
290
291 // Decompose the end of the comment.
292 const unsigned CommentEndOffset =
293 Comments.getCommentEndOffset(C: CommentBeforeDecl);
294
295 // Get the corresponding buffer.
296 bool Invalid = false;
297 const char *Buffer = SourceMgr.getBufferData(FID: DeclLocDecomp.first,
298 Invalid: &Invalid).data();
299 if (Invalid)
300 return nullptr;
301
302 // Extract text between the comment and declaration.
303 StringRef Text(Buffer + CommentEndOffset,
304 DeclLocDecomp.second - CommentEndOffset);
305
306 // There should be no other declarations or preprocessor directives between
307 // comment and declaration.
308 if (Text.find_last_of(Chars: ";{}#@") != StringRef::npos)
309 return nullptr;
310
311 return CommentBeforeDecl;
312}
313
314RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
315 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr);
316
317 for (const auto DeclLoc : DeclLocs) {
318 // If the declaration doesn't map directly to a location in a file, we
319 // can't find the comment.
320 if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
321 continue;
322
323 if (ExternalSource && !CommentsLoaded) {
324 ExternalSource->ReadComments();
325 CommentsLoaded = true;
326 }
327
328 if (Comments.empty())
329 continue;
330
331 const FileID File = SourceMgr.getDecomposedLoc(Loc: DeclLoc).first;
332 if (!File.isValid())
333 continue;
334
335 const auto CommentsInThisFile = Comments.getCommentsInFile(File);
336 if (!CommentsInThisFile || CommentsInThisFile->empty())
337 continue;
338
339 if (RawComment *Comment =
340 getRawCommentForDeclNoCacheImpl(D, RepresentativeLocForDecl: DeclLoc, CommentsInTheFile: *CommentsInThisFile))
341 return Comment;
342 }
343
344 return nullptr;
345}
346
347void ASTContext::addComment(const RawComment &RC) {
348 assert(LangOpts.RetainCommentsFromSystemHeaders ||
349 !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin()));
350 Comments.addComment(RC, CommentOpts: LangOpts.CommentOpts, Allocator&: BumpAlloc);
351}
352
353/// If we have a 'templated' declaration for a template, adjust 'D' to
354/// refer to the actual template.
355/// If we have an implicit instantiation, adjust 'D' to refer to template.
356static const Decl &adjustDeclToTemplate(const Decl &D) {
357 if (const auto *FD = dyn_cast<FunctionDecl>(Val: &D)) {
358 // Is this function declaration part of a function template?
359 if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate())
360 return *FTD;
361
362 // Nothing to do if function is not an implicit instantiation.
363 if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation)
364 return D;
365
366 // Function is an implicit instantiation of a function template?
367 if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate())
368 return *FTD;
369
370 // Function is instantiated from a member definition of a class template?
371 if (const FunctionDecl *MemberDecl =
372 FD->getInstantiatedFromMemberFunction())
373 return *MemberDecl;
374
375 return D;
376 }
377 if (const auto *VD = dyn_cast<VarDecl>(Val: &D)) {
378 // Static data member is instantiated from a member definition of a class
379 // template?
380 if (VD->isStaticDataMember())
381 if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember())
382 return *MemberDecl;
383
384 return D;
385 }
386 if (const auto *CRD = dyn_cast<CXXRecordDecl>(Val: &D)) {
387 // Is this class declaration part of a class template?
388 if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate())
389 return *CTD;
390
391 // Class is an implicit instantiation of a class template or partial
392 // specialization?
393 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(Val: CRD)) {
394 if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation)
395 return D;
396 llvm::PointerUnion<ClassTemplateDecl *,
397 ClassTemplatePartialSpecializationDecl *>
398 PU = CTSD->getSpecializedTemplateOrPartial();
399 return isa<ClassTemplateDecl *>(Val: PU)
400 ? *static_cast<const Decl *>(cast<ClassTemplateDecl *>(Val&: PU))
401 : *static_cast<const Decl *>(
402 cast<ClassTemplatePartialSpecializationDecl *>(Val&: PU));
403 }
404
405 // Class is instantiated from a member definition of a class template?
406 if (const MemberSpecializationInfo *Info =
407 CRD->getMemberSpecializationInfo())
408 return *Info->getInstantiatedFrom();
409
410 return D;
411 }
412 if (const auto *ED = dyn_cast<EnumDecl>(Val: &D)) {
413 // Enum is instantiated from a member definition of a class template?
414 if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum())
415 return *MemberDecl;
416
417 return D;
418 }
419 // FIXME: Adjust alias templates?
420 return D;
421}
422
423const RawComment *ASTContext::getRawCommentForAnyRedecl(
424 const Decl *D,
425 const Decl **OriginalDecl) const {
426 if (!D) {
427 if (OriginalDecl)
428 OriginalDecl = nullptr;
429 return nullptr;
430 }
431
432 D = &adjustDeclToTemplate(D: *D);
433
434 // Any comment directly attached to D?
435 {
436 auto DeclComment = DeclRawComments.find(Val: D);
437 if (DeclComment != DeclRawComments.end()) {
438 if (OriginalDecl)
439 *OriginalDecl = D;
440 return DeclComment->second;
441 }
442 }
443
444 // Any comment attached to any redeclaration of D?
445 const Decl *CanonicalD = D->getCanonicalDecl();
446 if (!CanonicalD)
447 return nullptr;
448
449 {
450 auto RedeclComment = RedeclChainComments.find(Val: CanonicalD);
451 if (RedeclComment != RedeclChainComments.end()) {
452 if (OriginalDecl)
453 *OriginalDecl = RedeclComment->second;
454 auto CommentAtRedecl = DeclRawComments.find(Val: RedeclComment->second);
455 assert(CommentAtRedecl != DeclRawComments.end() &&
456 "This decl is supposed to have comment attached.");
457 return CommentAtRedecl->second;
458 }
459 }
460
461 // Any redeclarations of D that we haven't checked for comments yet?
462 const Decl *LastCheckedRedecl = [&]() {
463 const Decl *LastChecked = CommentlessRedeclChains.lookup(Val: CanonicalD);
464 bool CanUseCommentlessCache = false;
465 if (LastChecked) {
466 for (auto *Redecl : CanonicalD->redecls()) {
467 if (Redecl == D) {
468 CanUseCommentlessCache = true;
469 break;
470 }
471 if (Redecl == LastChecked)
472 break;
473 }
474 }
475 // FIXME: This could be improved so that even if CanUseCommentlessCache
476 // is false, once we've traversed past CanonicalD we still skip ahead
477 // LastChecked.
478 return CanUseCommentlessCache ? LastChecked : nullptr;
479 }();
480
481 for (const Decl *Redecl : D->redecls()) {
482 assert(Redecl);
483 // Skip all redeclarations that have been checked previously.
484 if (LastCheckedRedecl) {
485 if (LastCheckedRedecl == Redecl) {
486 LastCheckedRedecl = nullptr;
487 }
488 continue;
489 }
490 const RawComment *RedeclComment = getRawCommentForDeclNoCache(D: Redecl);
491 if (RedeclComment) {
492 cacheRawCommentForDecl(OriginalD: *Redecl, Comment: *RedeclComment);
493 if (OriginalDecl)
494 *OriginalDecl = Redecl;
495 return RedeclComment;
496 }
497 CommentlessRedeclChains[CanonicalD] = Redecl;
498 }
499
500 if (OriginalDecl)
501 *OriginalDecl = nullptr;
502 return nullptr;
503}
504
505void ASTContext::cacheRawCommentForDecl(const Decl &OriginalD,
506 const RawComment &Comment) const {
507 assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments);
508 DeclRawComments.try_emplace(Key: &OriginalD, Args: &Comment);
509 const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl();
510 RedeclChainComments.try_emplace(Key: CanonicalDecl, Args: &OriginalD);
511 CommentlessRedeclChains.erase(Val: CanonicalDecl);
512}
513
514static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod,
515 SmallVectorImpl<const NamedDecl *> &Redeclared) {
516 const DeclContext *DC = ObjCMethod->getDeclContext();
517 if (const auto *IMD = dyn_cast<ObjCImplDecl>(Val: DC)) {
518 const ObjCInterfaceDecl *ID = IMD->getClassInterface();
519 if (!ID)
520 return;
521 // Add redeclared method here.
522 for (const auto *Ext : ID->known_extensions()) {
523 if (ObjCMethodDecl *RedeclaredMethod =
524 Ext->getMethod(Sel: ObjCMethod->getSelector(),
525 isInstance: ObjCMethod->isInstanceMethod()))
526 Redeclared.push_back(Elt: RedeclaredMethod);
527 }
528 }
529}
530
531void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls,
532 const Preprocessor *PP) {
533 if (Comments.empty() || Decls.empty())
534 return;
535
536 FileID File;
537 for (const Decl *D : Decls) {
538 if (D->isInvalidDecl())
539 continue;
540
541 D = &adjustDeclToTemplate(D: *D);
542 SourceLocation Loc = D->getLocation();
543 if (Loc.isValid()) {
544 // See if there are any new comments that are not attached to a decl.
545 // The location doesn't have to be precise - we care only about the file.
546 File = SourceMgr.getDecomposedLoc(Loc).first;
547 break;
548 }
549 }
550
551 if (File.isInvalid())
552 return;
553
554 auto CommentsInThisFile = Comments.getCommentsInFile(File);
555 if (!CommentsInThisFile || CommentsInThisFile->empty() ||
556 CommentsInThisFile->rbegin()->second->isAttached())
557 return;
558
559 // There is at least one comment not attached to a decl.
560 // Maybe it should be attached to one of Decls?
561 //
562 // Note that this way we pick up not only comments that precede the
563 // declaration, but also comments that *follow* the declaration -- thanks to
564 // the lookahead in the lexer: we've consumed the semicolon and looked
565 // ahead through comments.
566 for (const Decl *D : Decls) {
567 assert(D);
568 if (D->isInvalidDecl())
569 continue;
570
571 D = &adjustDeclToTemplate(D: *D);
572
573 if (DeclRawComments.count(Val: D) > 0)
574 continue;
575
576 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr);
577
578 for (const auto DeclLoc : DeclLocs) {
579 if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
580 continue;
581
582 if (RawComment *const DocComment = getRawCommentForDeclNoCacheImpl(
583 D, RepresentativeLocForDecl: DeclLoc, CommentsInTheFile: *CommentsInThisFile)) {
584 cacheRawCommentForDecl(OriginalD: *D, Comment: *DocComment);
585 comments::FullComment *FC = DocComment->parse(Context: *this, PP, D);
586 ParsedComments[D->getCanonicalDecl()] = FC;
587 break;
588 }
589 }
590 }
591}
592
593comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC,
594 const Decl *D) const {
595 auto *ThisDeclInfo = new (*this) comments::DeclInfo;
596 ThisDeclInfo->CommentDecl = D;
597 ThisDeclInfo->IsFilled = false;
598 ThisDeclInfo->fill();
599 ThisDeclInfo->CommentDecl = FC->getDecl();
600 if (!ThisDeclInfo->TemplateParameters)
601 ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters;
602 comments::FullComment *CFC =
603 new (*this) comments::FullComment(FC->getBlocks(),
604 ThisDeclInfo);
605 return CFC;
606}
607
608comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const {
609 const RawComment *RC = getRawCommentForDeclNoCache(D);
610 return RC ? RC->parse(Context: *this, PP: nullptr, D) : nullptr;
611}
612
613comments::FullComment *ASTContext::getCommentForDecl(
614 const Decl *D,
615 const Preprocessor *PP) const {
616 if (!D || D->isInvalidDecl())
617 return nullptr;
618 D = &adjustDeclToTemplate(D: *D);
619
620 const Decl *Canonical = D->getCanonicalDecl();
621 llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos =
622 ParsedComments.find(Val: Canonical);
623
624 if (Pos != ParsedComments.end()) {
625 if (Canonical != D) {
626 comments::FullComment *FC = Pos->second;
627 comments::FullComment *CFC = cloneFullComment(FC, D);
628 return CFC;
629 }
630 return Pos->second;
631 }
632
633 const Decl *OriginalDecl = nullptr;
634
635 const RawComment *RC = getRawCommentForAnyRedecl(D, OriginalDecl: &OriginalDecl);
636 if (!RC) {
637 if (isa<ObjCMethodDecl>(Val: D) || isa<FunctionDecl>(Val: D)) {
638 SmallVector<const NamedDecl*, 8> Overridden;
639 const auto *OMD = dyn_cast<ObjCMethodDecl>(Val: D);
640 if (OMD && OMD->isPropertyAccessor())
641 if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl())
642 if (comments::FullComment *FC = getCommentForDecl(D: PDecl, PP))
643 return cloneFullComment(FC, D);
644 if (OMD)
645 addRedeclaredMethods(ObjCMethod: OMD, Redeclared&: Overridden);
646 getOverriddenMethods(Method: dyn_cast<NamedDecl>(Val: D), Overridden);
647 for (unsigned i = 0, e = Overridden.size(); i < e; i++)
648 if (comments::FullComment *FC = getCommentForDecl(D: Overridden[i], PP))
649 return cloneFullComment(FC, D);
650 }
651 else if (const auto *TD = dyn_cast<TypedefNameDecl>(Val: D)) {
652 // Attach any tag type's documentation to its typedef if latter
653 // does not have one of its own.
654 QualType QT = TD->getUnderlyingType();
655 if (const auto *TT = QT->getAs<TagType>())
656 if (const Decl *TD = TT->getDecl())
657 if (comments::FullComment *FC = getCommentForDecl(D: TD, PP))
658 return cloneFullComment(FC, D);
659 }
660 else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(Val: D)) {
661 while (IC->getSuperClass()) {
662 IC = IC->getSuperClass();
663 if (comments::FullComment *FC = getCommentForDecl(D: IC, PP))
664 return cloneFullComment(FC, D);
665 }
666 }
667 else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(Val: D)) {
668 if (const ObjCInterfaceDecl *IC = CD->getClassInterface())
669 if (comments::FullComment *FC = getCommentForDecl(D: IC, PP))
670 return cloneFullComment(FC, D);
671 }
672 else if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: D)) {
673 if (!(RD = RD->getDefinition()))
674 return nullptr;
675 // Check non-virtual bases.
676 for (const auto &I : RD->bases()) {
677 if (I.isVirtual() || (I.getAccessSpecifier() != AS_public))
678 continue;
679 QualType Ty = I.getType();
680 if (Ty.isNull())
681 continue;
682 if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) {
683 if (!(NonVirtualBase= NonVirtualBase->getDefinition()))
684 continue;
685
686 if (comments::FullComment *FC = getCommentForDecl(D: (NonVirtualBase), PP))
687 return cloneFullComment(FC, D);
688 }
689 }
690 // Check virtual bases.
691 for (const auto &I : RD->vbases()) {
692 if (I.getAccessSpecifier() != AS_public)
693 continue;
694 QualType Ty = I.getType();
695 if (Ty.isNull())
696 continue;
697 if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) {
698 if (!(VirtualBase= VirtualBase->getDefinition()))
699 continue;
700 if (comments::FullComment *FC = getCommentForDecl(D: (VirtualBase), PP))
701 return cloneFullComment(FC, D);
702 }
703 }
704 }
705 return nullptr;
706 }
707
708 // If the RawComment was attached to other redeclaration of this Decl, we
709 // should parse the comment in context of that other Decl. This is important
710 // because comments can contain references to parameter names which can be
711 // different across redeclarations.
712 if (D != OriginalDecl && OriginalDecl)
713 return getCommentForDecl(D: OriginalDecl, PP);
714
715 comments::FullComment *FC = RC->parse(Context: *this, PP, D);
716 ParsedComments[Canonical] = FC;
717 return FC;
718}
719
720void
721ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
722 const ASTContext &C,
723 TemplateTemplateParmDecl *Parm) {
724 ID.AddInteger(I: Parm->getDepth());
725 ID.AddInteger(I: Parm->getPosition());
726 ID.AddBoolean(B: Parm->isParameterPack());
727
728 TemplateParameterList *Params = Parm->getTemplateParameters();
729 ID.AddInteger(I: Params->size());
730 for (TemplateParameterList::const_iterator P = Params->begin(),
731 PEnd = Params->end();
732 P != PEnd; ++P) {
733 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Val: *P)) {
734 ID.AddInteger(I: 0);
735 ID.AddBoolean(B: TTP->isParameterPack());
736 ID.AddInteger(
737 I: TTP->getNumExpansionParameters().toInternalRepresentation());
738 continue;
739 }
740
741 if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Val: *P)) {
742 ID.AddInteger(I: 1);
743 ID.AddBoolean(B: NTTP->isParameterPack());
744 ID.AddPointer(Ptr: C.getUnconstrainedType(T: C.getCanonicalType(T: NTTP->getType()))
745 .getAsOpaquePtr());
746 if (NTTP->isExpandedParameterPack()) {
747 ID.AddBoolean(B: true);
748 ID.AddInteger(I: NTTP->getNumExpansionTypes());
749 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
750 QualType T = NTTP->getExpansionType(I);
751 ID.AddPointer(Ptr: T.getCanonicalType().getAsOpaquePtr());
752 }
753 } else
754 ID.AddBoolean(B: false);
755 continue;
756 }
757
758 auto *TTP = cast<TemplateTemplateParmDecl>(Val: *P);
759 ID.AddInteger(I: 2);
760 Profile(ID, C, Parm: TTP);
761 }
762}
763
764TemplateTemplateParmDecl *
765ASTContext::getCanonicalTemplateTemplateParmDecl(
766 TemplateTemplateParmDecl *TTP) const {
767 // Check if we already have a canonical template template parameter.
768 llvm::FoldingSetNodeID ID;
769 CanonicalTemplateTemplateParm::Profile(ID, C: *this, Parm: TTP);
770 void *InsertPos = nullptr;
771 CanonicalTemplateTemplateParm *Canonical
772 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
773 if (Canonical)
774 return Canonical->getParam();
775
776 // Build a canonical template parameter list.
777 TemplateParameterList *Params = TTP->getTemplateParameters();
778 SmallVector<NamedDecl *, 4> CanonParams;
779 CanonParams.reserve(N: Params->size());
780 for (TemplateParameterList::const_iterator P = Params->begin(),
781 PEnd = Params->end();
782 P != PEnd; ++P) {
783 // Note that, per C++20 [temp.over.link]/6, when determining whether
784 // template-parameters are equivalent, constraints are ignored.
785 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Val: *P)) {
786 TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create(
787 C: *this, DC: getTranslationUnitDecl(), KeyLoc: SourceLocation(), NameLoc: SourceLocation(),
788 D: TTP->getDepth(), P: TTP->getIndex(), Id: nullptr, Typename: false,
789 ParameterPack: TTP->isParameterPack(), /*HasTypeConstraint=*/false,
790 NumExpanded: TTP->getNumExpansionParameters());
791 CanonParams.push_back(Elt: NewTTP);
792 } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Val: *P)) {
793 QualType T = getUnconstrainedType(T: getCanonicalType(T: NTTP->getType()));
794 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T);
795 NonTypeTemplateParmDecl *Param;
796 if (NTTP->isExpandedParameterPack()) {
797 SmallVector<QualType, 2> ExpandedTypes;
798 SmallVector<TypeSourceInfo *, 2> ExpandedTInfos;
799 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
800 ExpandedTypes.push_back(Elt: getCanonicalType(T: NTTP->getExpansionType(I)));
801 ExpandedTInfos.push_back(
802 Elt: getTrivialTypeSourceInfo(T: ExpandedTypes.back()));
803 }
804
805 Param = NonTypeTemplateParmDecl::Create(C: *this, DC: getTranslationUnitDecl(),
806 StartLoc: SourceLocation(),
807 IdLoc: SourceLocation(),
808 D: NTTP->getDepth(),
809 P: NTTP->getPosition(), Id: nullptr,
810 T,
811 TInfo,
812 ExpandedTypes,
813 ExpandedTInfos);
814 } else {
815 Param = NonTypeTemplateParmDecl::Create(C: *this, DC: getTranslationUnitDecl(),
816 StartLoc: SourceLocation(),
817 IdLoc: SourceLocation(),
818 D: NTTP->getDepth(),
819 P: NTTP->getPosition(), Id: nullptr,
820 T,
821 ParameterPack: NTTP->isParameterPack(),
822 TInfo);
823 }
824 CanonParams.push_back(Elt: Param);
825 } else
826 CanonParams.push_back(Elt: getCanonicalTemplateTemplateParmDecl(
827 TTP: cast<TemplateTemplateParmDecl>(Val: *P)));
828 }
829
830 TemplateTemplateParmDecl *CanonTTP = TemplateTemplateParmDecl::Create(
831 C: *this, DC: getTranslationUnitDecl(), L: SourceLocation(), D: TTP->getDepth(),
832 P: TTP->getPosition(), ParameterPack: TTP->isParameterPack(), Id: nullptr, /*Typename=*/false,
833 Params: TemplateParameterList::Create(C: *this, TemplateLoc: SourceLocation(), LAngleLoc: SourceLocation(),
834 Params: CanonParams, RAngleLoc: SourceLocation(),
835 /*RequiresClause=*/nullptr));
836
837 // Get the new insert position for the node we care about.
838 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
839 assert(!Canonical && "Shouldn't be in the map!");
840 (void)Canonical;
841
842 // Create the canonical template template parameter entry.
843 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP);
844 CanonTemplateTemplateParms.InsertNode(N: Canonical, InsertPos);
845 return CanonTTP;
846}
847
848TemplateTemplateParmDecl *
849ASTContext::findCanonicalTemplateTemplateParmDeclInternal(
850 TemplateTemplateParmDecl *TTP) const {
851 llvm::FoldingSetNodeID ID;
852 CanonicalTemplateTemplateParm::Profile(ID, C: *this, Parm: TTP);
853 void *InsertPos = nullptr;
854 CanonicalTemplateTemplateParm *Canonical =
855 CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
856 return Canonical ? Canonical->getParam() : nullptr;
857}
858
859TemplateTemplateParmDecl *
860ASTContext::insertCanonicalTemplateTemplateParmDeclInternal(
861 TemplateTemplateParmDecl *CanonTTP) const {
862 llvm::FoldingSetNodeID ID;
863 CanonicalTemplateTemplateParm::Profile(ID, C: *this, Parm: CanonTTP);
864 void *InsertPos = nullptr;
865 if (auto *Existing =
866 CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos))
867 return Existing->getParam();
868 CanonTemplateTemplateParms.InsertNode(
869 N: new (*this) CanonicalTemplateTemplateParm(CanonTTP), InsertPos);
870 return CanonTTP;
871}
872
873/// Check if a type can have its sanitizer instrumentation elided based on its
874/// presence within an ignorelist.
875bool ASTContext::isTypeIgnoredBySanitizer(const SanitizerMask &Mask,
876 const QualType &Ty) const {
877 std::string TyName = Ty.getUnqualifiedType().getAsString(Policy: getPrintingPolicy());
878 return NoSanitizeL->containsType(Mask, MangledTypeName: TyName);
879}
880
881TargetCXXABI::Kind ASTContext::getCXXABIKind() const {
882 auto Kind = getTargetInfo().getCXXABI().getKind();
883 return getLangOpts().CXXABI.value_or(u&: Kind);
884}
885
886CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
887 if (!LangOpts.CPlusPlus) return nullptr;
888
889 switch (getCXXABIKind()) {
890 case TargetCXXABI::AppleARM64:
891 case TargetCXXABI::Fuchsia:
892 case TargetCXXABI::GenericARM: // Same as Itanium at this level
893 case TargetCXXABI::iOS:
894 case TargetCXXABI::WatchOS:
895 case TargetCXXABI::GenericAArch64:
896 case TargetCXXABI::GenericMIPS:
897 case TargetCXXABI::GenericItanium:
898 case TargetCXXABI::WebAssembly:
899 case TargetCXXABI::XL:
900 return CreateItaniumCXXABI(Ctx&: *this);
901 case TargetCXXABI::Microsoft:
902 return CreateMicrosoftCXXABI(Ctx&: *this);
903 }
904 llvm_unreachable("Invalid CXXABI type!");
905}
906
907interp::Context &ASTContext::getInterpContext() {
908 if (!InterpContext) {
909 InterpContext.reset(p: new interp::Context(*this));
910 }
911 return *InterpContext;
912}
913
914ParentMapContext &ASTContext::getParentMapContext() {
915 if (!ParentMapCtx)
916 ParentMapCtx.reset(p: new ParentMapContext(*this));
917 return *ParentMapCtx;
918}
919
920static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI,
921 const LangOptions &LangOpts) {
922 switch (LangOpts.getAddressSpaceMapMangling()) {
923 case LangOptions::ASMM_Target:
924 return TI.useAddressSpaceMapMangling();
925 case LangOptions::ASMM_On:
926 return true;
927 case LangOptions::ASMM_Off:
928 return false;
929 }
930 llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything.");
931}
932
933ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM,
934 IdentifierTable &idents, SelectorTable &sels,
935 Builtin::Context &builtins, TranslationUnitKind TUKind)
936 : ConstantArrayTypes(this_(), ConstantArrayTypesLog2InitSize),
937 DependentSizedArrayTypes(this_()), DependentSizedExtVectorTypes(this_()),
938 DependentAddressSpaceTypes(this_()), DependentVectorTypes(this_()),
939 DependentSizedMatrixTypes(this_()),
940 FunctionProtoTypes(this_(), FunctionProtoTypesLog2InitSize),
941 DependentTypeOfExprTypes(this_()), DependentDecltypeTypes(this_()),
942 DependentPackIndexingTypes(this_()), TemplateSpecializationTypes(this_()),
943 DependentTemplateSpecializationTypes(this_()),
944 DependentBitIntTypes(this_()), SubstTemplateTemplateParmPacks(this_()),
945 DeducedTemplates(this_()), ArrayParameterTypes(this_()),
946 CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts),
947 NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)),
948 XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles,
949 LangOpts.XRayNeverInstrumentFiles,
950 LangOpts.XRayAttrListFiles, SM)),
951 ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)),
952 PrintingPolicy(LOpts), Idents(idents), Selectors(sels),
953 BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this),
954 Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts),
955 CompCategories(this_()), LastSDM(nullptr, 0) {
956 addTranslationUnitDecl();
957}
958
959void ASTContext::cleanup() {
960 // Release the DenseMaps associated with DeclContext objects.
961 // FIXME: Is this the ideal solution?
962 ReleaseDeclContextMaps();
963
964 // Call all of the deallocation functions on all of their targets.
965 for (auto &Pair : Deallocations)
966 (Pair.first)(Pair.second);
967 Deallocations.clear();
968
969 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
970 // because they can contain DenseMaps.
971 for (llvm::DenseMap<const ObjCInterfaceDecl *,
972 const ASTRecordLayout *>::iterator
973 I = ObjCLayouts.begin(),
974 E = ObjCLayouts.end();
975 I != E;)
976 // Increment in loop to prevent using deallocated memory.
977 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
978 R->Destroy(Ctx&: *this);
979 ObjCLayouts.clear();
980
981 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
982 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
983 // Increment in loop to prevent using deallocated memory.
984 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
985 R->Destroy(Ctx&: *this);
986 }
987 ASTRecordLayouts.clear();
988
989 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(),
990 AEnd = DeclAttrs.end();
991 A != AEnd; ++A)
992 A->second->~AttrVec();
993 DeclAttrs.clear();
994
995 for (const auto &Value : ModuleInitializers)
996 Value.second->~PerModuleInitializers();
997 ModuleInitializers.clear();
998}
999
1000ASTContext::~ASTContext() { cleanup(); }
1001
1002void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) {
1003 TraversalScope = TopLevelDecls;
1004 getParentMapContext().clear();
1005}
1006
1007void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const {
1008 Deallocations.push_back(Elt: {Callback, Data});
1009}
1010
1011void
1012ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) {
1013 ExternalSource = std::move(Source);
1014}
1015
1016void ASTContext::PrintStats() const {
1017 llvm::errs() << "\n*** AST Context Stats:\n";
1018 llvm::errs() << " " << Types.size() << " types total.\n";
1019
1020 unsigned counts[] = {
1021#define TYPE(Name, Parent) 0,
1022#define ABSTRACT_TYPE(Name, Parent)
1023#include "clang/AST/TypeNodes.inc"
1024 0 // Extra
1025 };
1026
1027 for (unsigned i = 0, e = Types.size(); i != e; ++i) {
1028 Type *T = Types[i];
1029 counts[(unsigned)T->getTypeClass()]++;
1030 }
1031
1032 unsigned Idx = 0;
1033 unsigned TotalBytes = 0;
1034#define TYPE(Name, Parent) \
1035 if (counts[Idx]) \
1036 llvm::errs() << " " << counts[Idx] << " " << #Name \
1037 << " types, " << sizeof(Name##Type) << " each " \
1038 << "(" << counts[Idx] * sizeof(Name##Type) \
1039 << " bytes)\n"; \
1040 TotalBytes += counts[Idx] * sizeof(Name##Type); \
1041 ++Idx;
1042#define ABSTRACT_TYPE(Name, Parent)
1043#include "clang/AST/TypeNodes.inc"
1044
1045 llvm::errs() << "Total bytes = " << TotalBytes << "\n";
1046
1047 // Implicit special member functions.
1048 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/"
1049 << NumImplicitDefaultConstructors
1050 << " implicit default constructors created\n";
1051 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/"
1052 << NumImplicitCopyConstructors
1053 << " implicit copy constructors created\n";
1054 if (getLangOpts().CPlusPlus)
1055 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/"
1056 << NumImplicitMoveConstructors
1057 << " implicit move constructors created\n";
1058 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/"
1059 << NumImplicitCopyAssignmentOperators
1060 << " implicit copy assignment operators created\n";
1061 if (getLangOpts().CPlusPlus)
1062 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/"
1063 << NumImplicitMoveAssignmentOperators
1064 << " implicit move assignment operators created\n";
1065 llvm::errs() << NumImplicitDestructorsDeclared << "/"
1066 << NumImplicitDestructors
1067 << " implicit destructors created\n";
1068
1069 if (ExternalSource) {
1070 llvm::errs() << "\n";
1071 ExternalSource->PrintStats();
1072 }
1073
1074 BumpAlloc.PrintStats();
1075}
1076
1077void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M,
1078 bool NotifyListeners) {
1079 if (NotifyListeners)
1080 if (auto *Listener = getASTMutationListener();
1081 Listener && !ND->isUnconditionallyVisible())
1082 Listener->RedefinedHiddenDefinition(D: ND, M);
1083
1084 MergedDefModules[cast<NamedDecl>(Val: ND->getCanonicalDecl())].push_back(NewVal: M);
1085}
1086
1087void ASTContext::deduplicateMergedDefinitionsFor(NamedDecl *ND) {
1088 auto It = MergedDefModules.find(Val: cast<NamedDecl>(Val: ND->getCanonicalDecl()));
1089 if (It == MergedDefModules.end())
1090 return;
1091
1092 auto &Merged = It->second;
1093 llvm::DenseSet<Module*> Found;
1094 for (Module *&M : Merged)
1095 if (!Found.insert(V: M).second)
1096 M = nullptr;
1097 llvm::erase(C&: Merged, V: nullptr);
1098}
1099
1100ArrayRef<Module *>
1101ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) {
1102 auto MergedIt =
1103 MergedDefModules.find(Val: cast<NamedDecl>(Val: Def->getCanonicalDecl()));
1104 if (MergedIt == MergedDefModules.end())
1105 return {};
1106 return MergedIt->second;
1107}
1108
1109void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) {
1110 if (LazyInitializers.empty())
1111 return;
1112
1113 auto *Source = Ctx.getExternalSource();
1114 assert(Source && "lazy initializers but no external source");
1115
1116 auto LazyInits = std::move(LazyInitializers);
1117 LazyInitializers.clear();
1118
1119 for (auto ID : LazyInits)
1120 Initializers.push_back(Elt: Source->GetExternalDecl(ID));
1121
1122 assert(LazyInitializers.empty() &&
1123 "GetExternalDecl for lazy module initializer added more inits");
1124}
1125
1126void ASTContext::addModuleInitializer(Module *M, Decl *D) {
1127 // One special case: if we add a module initializer that imports another
1128 // module, and that module's only initializer is an ImportDecl, simplify.
1129 if (const auto *ID = dyn_cast<ImportDecl>(Val: D)) {
1130 auto It = ModuleInitializers.find(Val: ID->getImportedModule());
1131
1132 // Maybe the ImportDecl does nothing at all. (Common case.)
1133 if (It == ModuleInitializers.end())
1134 return;
1135
1136 // Maybe the ImportDecl only imports another ImportDecl.
1137 auto &Imported = *It->second;
1138 if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) {
1139 Imported.resolve(Ctx&: *this);
1140 auto *OnlyDecl = Imported.Initializers.front();
1141 if (isa<ImportDecl>(Val: OnlyDecl))
1142 D = OnlyDecl;
1143 }
1144 }
1145
1146 auto *&Inits = ModuleInitializers[M];
1147 if (!Inits)
1148 Inits = new (*this) PerModuleInitializers;
1149 Inits->Initializers.push_back(Elt: D);
1150}
1151
1152void ASTContext::addLazyModuleInitializers(Module *M,
1153 ArrayRef<GlobalDeclID> IDs) {
1154 auto *&Inits = ModuleInitializers[M];
1155 if (!Inits)
1156 Inits = new (*this) PerModuleInitializers;
1157 Inits->LazyInitializers.insert(I: Inits->LazyInitializers.end(),
1158 From: IDs.begin(), To: IDs.end());
1159}
1160
1161ArrayRef<Decl *> ASTContext::getModuleInitializers(Module *M) {
1162 auto It = ModuleInitializers.find(Val: M);
1163 if (It == ModuleInitializers.end())
1164 return {};
1165
1166 auto *Inits = It->second;
1167 Inits->resolve(Ctx&: *this);
1168 return Inits->Initializers;
1169}
1170
1171void ASTContext::setCurrentNamedModule(Module *M) {
1172 assert(M->isNamedModule());
1173 assert(!CurrentCXXNamedModule &&
1174 "We should set named module for ASTContext for only once");
1175 CurrentCXXNamedModule = M;
1176}
1177
1178bool ASTContext::isInSameModule(const Module *M1, const Module *M2) const {
1179 if (!M1 != !M2)
1180 return false;
1181
1182 /// Get the representative module for M. The representative module is the
1183 /// first module unit for a specific primary module name. So that the module
1184 /// units have the same representative module belongs to the same module.
1185 ///
1186 /// The process is helpful to reduce the expensive string operations.
1187 auto GetRepresentativeModule = [this](const Module *M) {
1188 auto Iter = SameModuleLookupSet.find(Val: M);
1189 if (Iter != SameModuleLookupSet.end())
1190 return Iter->second;
1191
1192 const Module *RepresentativeModule =
1193 PrimaryModuleNameMap.try_emplace(Key: M->getPrimaryModuleInterfaceName(), Args&: M)
1194 .first->second;
1195 SameModuleLookupSet[M] = RepresentativeModule;
1196 return RepresentativeModule;
1197 };
1198
1199 assert(M1 && "Shouldn't call `isInSameModule` if both M1 and M2 are none.");
1200 return GetRepresentativeModule(M1) == GetRepresentativeModule(M2);
1201}
1202
1203ExternCContextDecl *ASTContext::getExternCContextDecl() const {
1204 if (!ExternCContext)
1205 ExternCContext = ExternCContextDecl::Create(C: *this, TU: getTranslationUnitDecl());
1206
1207 return ExternCContext;
1208}
1209
1210BuiltinTemplateDecl *
1211ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK,
1212 const IdentifierInfo *II) const {
1213 auto *BuiltinTemplate =
1214 BuiltinTemplateDecl::Create(C: *this, DC: getTranslationUnitDecl(), Name: II, BTK);
1215 BuiltinTemplate->setImplicit();
1216 getTranslationUnitDecl()->addDecl(D: BuiltinTemplate);
1217
1218 return BuiltinTemplate;
1219}
1220
1221#define BuiltinTemplate(BTName) \
1222 BuiltinTemplateDecl *ASTContext::get##BTName##Decl() const { \
1223 if (!Decl##BTName) \
1224 Decl##BTName = \
1225 buildBuiltinTemplateDecl(BTK##BTName, get##BTName##Name()); \
1226 return Decl##BTName; \
1227 }
1228#include "clang/Basic/BuiltinTemplates.inc"
1229
1230RecordDecl *ASTContext::buildImplicitRecord(StringRef Name,
1231 RecordDecl::TagKind TK) const {
1232 SourceLocation Loc;
1233 RecordDecl *NewDecl;
1234 if (getLangOpts().CPlusPlus)
1235 NewDecl = CXXRecordDecl::Create(C: *this, TK, DC: getTranslationUnitDecl(), StartLoc: Loc,
1236 IdLoc: Loc, Id: &Idents.get(Name));
1237 else
1238 NewDecl = RecordDecl::Create(C: *this, TK, DC: getTranslationUnitDecl(), StartLoc: Loc, IdLoc: Loc,
1239 Id: &Idents.get(Name));
1240 NewDecl->setImplicit();
1241 NewDecl->addAttr(A: TypeVisibilityAttr::CreateImplicit(
1242 Ctx&: const_cast<ASTContext &>(*this), Visibility: TypeVisibilityAttr::Default));
1243 return NewDecl;
1244}
1245
1246TypedefDecl *ASTContext::buildImplicitTypedef(QualType T,
1247 StringRef Name) const {
1248 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T);
1249 TypedefDecl *NewDecl = TypedefDecl::Create(
1250 C&: const_cast<ASTContext &>(*this), DC: getTranslationUnitDecl(),
1251 StartLoc: SourceLocation(), IdLoc: SourceLocation(), Id: &Idents.get(Name), TInfo);
1252 NewDecl->setImplicit();
1253 return NewDecl;
1254}
1255
1256TypedefDecl *ASTContext::getInt128Decl() const {
1257 if (!Int128Decl)
1258 Int128Decl = buildImplicitTypedef(T: Int128Ty, Name: "__int128_t");
1259 return Int128Decl;
1260}
1261
1262TypedefDecl *ASTContext::getUInt128Decl() const {
1263 if (!UInt128Decl)
1264 UInt128Decl = buildImplicitTypedef(T: UnsignedInt128Ty, Name: "__uint128_t");
1265 return UInt128Decl;
1266}
1267
1268void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) {
1269 auto *Ty = new (*this, alignof(BuiltinType)) BuiltinType(K);
1270 R = CanQualType::CreateUnsafe(Other: QualType(Ty, 0));
1271 Types.push_back(Elt: Ty);
1272}
1273
1274void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
1275 const TargetInfo *AuxTarget) {
1276 assert((!this->Target || this->Target == &Target) &&
1277 "Incorrect target reinitialization");
1278 assert(VoidTy.isNull() && "Context reinitialized?");
1279
1280 this->Target = &Target;
1281 this->AuxTarget = AuxTarget;
1282
1283 ABI.reset(p: createCXXABI(T: Target));
1284 AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(TI: Target, LangOpts);
1285
1286 // C99 6.2.5p19.
1287 InitBuiltinType(R&: VoidTy, K: BuiltinType::Void);
1288
1289 // C99 6.2.5p2.
1290 InitBuiltinType(R&: BoolTy, K: BuiltinType::Bool);
1291 // C99 6.2.5p3.
1292 if (LangOpts.CharIsSigned)
1293 InitBuiltinType(R&: CharTy, K: BuiltinType::Char_S);
1294 else
1295 InitBuiltinType(R&: CharTy, K: BuiltinType::Char_U);
1296 // C99 6.2.5p4.
1297 InitBuiltinType(R&: SignedCharTy, K: BuiltinType::SChar);
1298 InitBuiltinType(R&: ShortTy, K: BuiltinType::Short);
1299 InitBuiltinType(R&: IntTy, K: BuiltinType::Int);
1300 InitBuiltinType(R&: LongTy, K: BuiltinType::Long);
1301 InitBuiltinType(R&: LongLongTy, K: BuiltinType::LongLong);
1302
1303 // C99 6.2.5p6.
1304 InitBuiltinType(R&: UnsignedCharTy, K: BuiltinType::UChar);
1305 InitBuiltinType(R&: UnsignedShortTy, K: BuiltinType::UShort);
1306 InitBuiltinType(R&: UnsignedIntTy, K: BuiltinType::UInt);
1307 InitBuiltinType(R&: UnsignedLongTy, K: BuiltinType::ULong);
1308 InitBuiltinType(R&: UnsignedLongLongTy, K: BuiltinType::ULongLong);
1309
1310 // C99 6.2.5p10.
1311 InitBuiltinType(R&: FloatTy, K: BuiltinType::Float);
1312 InitBuiltinType(R&: DoubleTy, K: BuiltinType::Double);
1313 InitBuiltinType(R&: LongDoubleTy, K: BuiltinType::LongDouble);
1314
1315 // GNU extension, __float128 for IEEE quadruple precision
1316 InitBuiltinType(R&: Float128Ty, K: BuiltinType::Float128);
1317
1318 // __ibm128 for IBM extended precision
1319 InitBuiltinType(R&: Ibm128Ty, K: BuiltinType::Ibm128);
1320
1321 // C11 extension ISO/IEC TS 18661-3
1322 InitBuiltinType(R&: Float16Ty, K: BuiltinType::Float16);
1323
1324 // ISO/IEC JTC1 SC22 WG14 N1169 Extension
1325 InitBuiltinType(R&: ShortAccumTy, K: BuiltinType::ShortAccum);
1326 InitBuiltinType(R&: AccumTy, K: BuiltinType::Accum);
1327 InitBuiltinType(R&: LongAccumTy, K: BuiltinType::LongAccum);
1328 InitBuiltinType(R&: UnsignedShortAccumTy, K: BuiltinType::UShortAccum);
1329 InitBuiltinType(R&: UnsignedAccumTy, K: BuiltinType::UAccum);
1330 InitBuiltinType(R&: UnsignedLongAccumTy, K: BuiltinType::ULongAccum);
1331 InitBuiltinType(R&: ShortFractTy, K: BuiltinType::ShortFract);
1332 InitBuiltinType(R&: FractTy, K: BuiltinType::Fract);
1333 InitBuiltinType(R&: LongFractTy, K: BuiltinType::LongFract);
1334 InitBuiltinType(R&: UnsignedShortFractTy, K: BuiltinType::UShortFract);
1335 InitBuiltinType(R&: UnsignedFractTy, K: BuiltinType::UFract);
1336 InitBuiltinType(R&: UnsignedLongFractTy, K: BuiltinType::ULongFract);
1337 InitBuiltinType(R&: SatShortAccumTy, K: BuiltinType::SatShortAccum);
1338 InitBuiltinType(R&: SatAccumTy, K: BuiltinType::SatAccum);
1339 InitBuiltinType(R&: SatLongAccumTy, K: BuiltinType::SatLongAccum);
1340 InitBuiltinType(R&: SatUnsignedShortAccumTy, K: BuiltinType::SatUShortAccum);
1341 InitBuiltinType(R&: SatUnsignedAccumTy, K: BuiltinType::SatUAccum);
1342 InitBuiltinType(R&: SatUnsignedLongAccumTy, K: BuiltinType::SatULongAccum);
1343 InitBuiltinType(R&: SatShortFractTy, K: BuiltinType::SatShortFract);
1344 InitBuiltinType(R&: SatFractTy, K: BuiltinType::SatFract);
1345 InitBuiltinType(R&: SatLongFractTy, K: BuiltinType::SatLongFract);
1346 InitBuiltinType(R&: SatUnsignedShortFractTy, K: BuiltinType::SatUShortFract);
1347 InitBuiltinType(R&: SatUnsignedFractTy, K: BuiltinType::SatUFract);
1348 InitBuiltinType(R&: SatUnsignedLongFractTy, K: BuiltinType::SatULongFract);
1349
1350 // GNU extension, 128-bit integers.
1351 InitBuiltinType(R&: Int128Ty, K: BuiltinType::Int128);
1352 InitBuiltinType(R&: UnsignedInt128Ty, K: BuiltinType::UInt128);
1353
1354 // C++ 3.9.1p5
1355 if (TargetInfo::isTypeSigned(T: Target.getWCharType()))
1356 InitBuiltinType(R&: WCharTy, K: BuiltinType::WChar_S);
1357 else // -fshort-wchar makes wchar_t be unsigned.
1358 InitBuiltinType(R&: WCharTy, K: BuiltinType::WChar_U);
1359 if (LangOpts.CPlusPlus && LangOpts.WChar)
1360 WideCharTy = WCharTy;
1361 else {
1362 // C99 (or C++ using -fno-wchar).
1363 WideCharTy = getFromTargetType(Type: Target.getWCharType());
1364 }
1365
1366 WIntTy = getFromTargetType(Type: Target.getWIntType());
1367
1368 // C++20 (proposed)
1369 InitBuiltinType(R&: Char8Ty, K: BuiltinType::Char8);
1370
1371 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1372 InitBuiltinType(R&: Char16Ty, K: BuiltinType::Char16);
1373 else // C99
1374 Char16Ty = getFromTargetType(Type: Target.getChar16Type());
1375
1376 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1377 InitBuiltinType(R&: Char32Ty, K: BuiltinType::Char32);
1378 else // C99
1379 Char32Ty = getFromTargetType(Type: Target.getChar32Type());
1380
1381 // Placeholder type for type-dependent expressions whose type is
1382 // completely unknown. No code should ever check a type against
1383 // DependentTy and users should never see it; however, it is here to
1384 // help diagnose failures to properly check for type-dependent
1385 // expressions.
1386 InitBuiltinType(R&: DependentTy, K: BuiltinType::Dependent);
1387
1388 // Placeholder type for functions.
1389 InitBuiltinType(R&: OverloadTy, K: BuiltinType::Overload);
1390
1391 // Placeholder type for bound members.
1392 InitBuiltinType(R&: BoundMemberTy, K: BuiltinType::BoundMember);
1393
1394 // Placeholder type for unresolved templates.
1395 InitBuiltinType(R&: UnresolvedTemplateTy, K: BuiltinType::UnresolvedTemplate);
1396
1397 // Placeholder type for pseudo-objects.
1398 InitBuiltinType(R&: PseudoObjectTy, K: BuiltinType::PseudoObject);
1399
1400 // "any" type; useful for debugger-like clients.
1401 InitBuiltinType(R&: UnknownAnyTy, K: BuiltinType::UnknownAny);
1402
1403 // Placeholder type for unbridged ARC casts.
1404 InitBuiltinType(R&: ARCUnbridgedCastTy, K: BuiltinType::ARCUnbridgedCast);
1405
1406 // Placeholder type for builtin functions.
1407 InitBuiltinType(R&: BuiltinFnTy, K: BuiltinType::BuiltinFn);
1408
1409 // Placeholder type for OMP array sections.
1410 if (LangOpts.OpenMP) {
1411 InitBuiltinType(R&: ArraySectionTy, K: BuiltinType::ArraySection);
1412 InitBuiltinType(R&: OMPArrayShapingTy, K: BuiltinType::OMPArrayShaping);
1413 InitBuiltinType(R&: OMPIteratorTy, K: BuiltinType::OMPIterator);
1414 }
1415 // Placeholder type for OpenACC array sections, if we are ALSO in OMP mode,
1416 // don't bother, as we're just using the same type as OMP.
1417 if (LangOpts.OpenACC && !LangOpts.OpenMP) {
1418 InitBuiltinType(R&: ArraySectionTy, K: BuiltinType::ArraySection);
1419 }
1420 if (LangOpts.MatrixTypes)
1421 InitBuiltinType(R&: IncompleteMatrixIdxTy, K: BuiltinType::IncompleteMatrixIdx);
1422
1423 // Builtin types for 'id', 'Class', and 'SEL'.
1424 InitBuiltinType(R&: ObjCBuiltinIdTy, K: BuiltinType::ObjCId);
1425 InitBuiltinType(R&: ObjCBuiltinClassTy, K: BuiltinType::ObjCClass);
1426 InitBuiltinType(R&: ObjCBuiltinSelTy, K: BuiltinType::ObjCSel);
1427
1428 if (LangOpts.OpenCL) {
1429#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
1430 InitBuiltinType(SingletonId, BuiltinType::Id);
1431#include "clang/Basic/OpenCLImageTypes.def"
1432
1433 InitBuiltinType(R&: OCLSamplerTy, K: BuiltinType::OCLSampler);
1434 InitBuiltinType(R&: OCLEventTy, K: BuiltinType::OCLEvent);
1435 InitBuiltinType(R&: OCLClkEventTy, K: BuiltinType::OCLClkEvent);
1436 InitBuiltinType(R&: OCLQueueTy, K: BuiltinType::OCLQueue);
1437 InitBuiltinType(R&: OCLReserveIDTy, K: BuiltinType::OCLReserveID);
1438
1439#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
1440 InitBuiltinType(Id##Ty, BuiltinType::Id);
1441#include "clang/Basic/OpenCLExtensionTypes.def"
1442 }
1443
1444 if (LangOpts.HLSL) {
1445#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) \
1446 InitBuiltinType(SingletonId, BuiltinType::Id);
1447#include "clang/Basic/HLSLIntangibleTypes.def"
1448 }
1449
1450 if (Target.hasAArch64ACLETypes() ||
1451 (AuxTarget && AuxTarget->hasAArch64ACLETypes())) {
1452#define SVE_TYPE(Name, Id, SingletonId) \
1453 InitBuiltinType(SingletonId, BuiltinType::Id);
1454#include "clang/Basic/AArch64ACLETypes.def"
1455 }
1456
1457 if (Target.getTriple().isPPC64()) {
1458#define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
1459 InitBuiltinType(Id##Ty, BuiltinType::Id);
1460#include "clang/Basic/PPCTypes.def"
1461#define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
1462 InitBuiltinType(Id##Ty, BuiltinType::Id);
1463#include "clang/Basic/PPCTypes.def"
1464 }
1465
1466 if (Target.hasRISCVVTypes()) {
1467#define RVV_TYPE(Name, Id, SingletonId) \
1468 InitBuiltinType(SingletonId, BuiltinType::Id);
1469#include "clang/Basic/RISCVVTypes.def"
1470 }
1471
1472 if (Target.getTriple().isWasm() && Target.hasFeature(Feature: "reference-types")) {
1473#define WASM_TYPE(Name, Id, SingletonId) \
1474 InitBuiltinType(SingletonId, BuiltinType::Id);
1475#include "clang/Basic/WebAssemblyReferenceTypes.def"
1476 }
1477
1478 if (Target.getTriple().isAMDGPU() ||
1479 (AuxTarget && AuxTarget->getTriple().isAMDGPU())) {
1480#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) \
1481 InitBuiltinType(SingletonId, BuiltinType::Id);
1482#include "clang/Basic/AMDGPUTypes.def"
1483 }
1484
1485 // Builtin type for __objc_yes and __objc_no
1486 ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ?
1487 SignedCharTy : BoolTy);
1488
1489 ObjCConstantStringType = QualType();
1490
1491 ObjCSuperType = QualType();
1492
1493 // void * type
1494 if (LangOpts.OpenCLGenericAddressSpace) {
1495 auto Q = VoidTy.getQualifiers();
1496 Q.setAddressSpace(LangAS::opencl_generic);
1497 VoidPtrTy = getPointerType(T: getCanonicalType(
1498 T: getQualifiedType(T: VoidTy.getUnqualifiedType(), Qs: Q)));
1499 } else {
1500 VoidPtrTy = getPointerType(T: VoidTy);
1501 }
1502
1503 // nullptr type (C++0x 2.14.7)
1504 InitBuiltinType(R&: NullPtrTy, K: BuiltinType::NullPtr);
1505
1506 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16
1507 InitBuiltinType(R&: HalfTy, K: BuiltinType::Half);
1508
1509 InitBuiltinType(R&: BFloat16Ty, K: BuiltinType::BFloat16);
1510
1511 // Builtin type used to help define __builtin_va_list.
1512 VaListTagDecl = nullptr;
1513
1514 // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls.
1515 if (LangOpts.MicrosoftExt || LangOpts.Borland) {
1516 MSGuidTagDecl = buildImplicitRecord(Name: "_GUID");
1517 getTranslationUnitDecl()->addDecl(D: MSGuidTagDecl);
1518 }
1519}
1520
1521DiagnosticsEngine &ASTContext::getDiagnostics() const {
1522 return SourceMgr.getDiagnostics();
1523}
1524
1525AttrVec& ASTContext::getDeclAttrs(const Decl *D) {
1526 AttrVec *&Result = DeclAttrs[D];
1527 if (!Result) {
1528 void *Mem = Allocate(Size: sizeof(AttrVec));
1529 Result = new (Mem) AttrVec;
1530 }
1531
1532 return *Result;
1533}
1534
1535/// Erase the attributes corresponding to the given declaration.
1536void ASTContext::eraseDeclAttrs(const Decl *D) {
1537 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(Val: D);
1538 if (Pos != DeclAttrs.end()) {
1539 Pos->second->~AttrVec();
1540 DeclAttrs.erase(I: Pos);
1541 }
1542}
1543
1544// FIXME: Remove ?
1545MemberSpecializationInfo *
1546ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) {
1547 assert(Var->isStaticDataMember() && "Not a static data member");
1548 return getTemplateOrSpecializationInfo(Var)
1549 .dyn_cast<MemberSpecializationInfo *>();
1550}
1551
1552ASTContext::TemplateOrSpecializationInfo
1553ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) {
1554 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos =
1555 TemplateOrInstantiation.find(Val: Var);
1556 if (Pos == TemplateOrInstantiation.end())
1557 return {};
1558
1559 return Pos->second;
1560}
1561
1562void
1563ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl,
1564 TemplateSpecializationKind TSK,
1565 SourceLocation PointOfInstantiation) {
1566 assert(Inst->isStaticDataMember() && "Not a static data member");
1567 assert(Tmpl->isStaticDataMember() && "Not a static data member");
1568 setTemplateOrSpecializationInfo(Inst, TSI: new (*this) MemberSpecializationInfo(
1569 Tmpl, TSK, PointOfInstantiation));
1570}
1571
1572void
1573ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst,
1574 TemplateOrSpecializationInfo TSI) {
1575 assert(!TemplateOrInstantiation[Inst] &&
1576 "Already noted what the variable was instantiated from");
1577 TemplateOrInstantiation[Inst] = TSI;
1578}
1579
1580NamedDecl *
1581ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) {
1582 return InstantiatedFromUsingDecl.lookup(Val: UUD);
1583}
1584
1585void
1586ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) {
1587 assert((isa<UsingDecl>(Pattern) ||
1588 isa<UnresolvedUsingValueDecl>(Pattern) ||
1589 isa<UnresolvedUsingTypenameDecl>(Pattern)) &&
1590 "pattern decl is not a using decl");
1591 assert((isa<UsingDecl>(Inst) ||
1592 isa<UnresolvedUsingValueDecl>(Inst) ||
1593 isa<UnresolvedUsingTypenameDecl>(Inst)) &&
1594 "instantiation did not produce a using decl");
1595 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists");
1596 InstantiatedFromUsingDecl[Inst] = Pattern;
1597}
1598
1599UsingEnumDecl *
1600ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) {
1601 return InstantiatedFromUsingEnumDecl.lookup(Val: UUD);
1602}
1603
1604void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst,
1605 UsingEnumDecl *Pattern) {
1606 assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists");
1607 InstantiatedFromUsingEnumDecl[Inst] = Pattern;
1608}
1609
1610UsingShadowDecl *
1611ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) {
1612 return InstantiatedFromUsingShadowDecl.lookup(Val: Inst);
1613}
1614
1615void
1616ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst,
1617 UsingShadowDecl *Pattern) {
1618 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists");
1619 InstantiatedFromUsingShadowDecl[Inst] = Pattern;
1620}
1621
1622FieldDecl *
1623ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) const {
1624 return InstantiatedFromUnnamedFieldDecl.lookup(Val: Field);
1625}
1626
1627void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst,
1628 FieldDecl *Tmpl) {
1629 assert((!Inst->getDeclName() || Inst->isPlaceholderVar(getLangOpts())) &&
1630 "Instantiated field decl is not unnamed");
1631 assert((!Inst->getDeclName() || Inst->isPlaceholderVar(getLangOpts())) &&
1632 "Template field decl is not unnamed");
1633 assert(!InstantiatedFromUnnamedFieldDecl[Inst] &&
1634 "Already noted what unnamed field was instantiated from");
1635
1636 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl;
1637}
1638
1639ASTContext::overridden_cxx_method_iterator
1640ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const {
1641 return overridden_methods(Method).begin();
1642}
1643
1644ASTContext::overridden_cxx_method_iterator
1645ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const {
1646 return overridden_methods(Method).end();
1647}
1648
1649unsigned
1650ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const {
1651 auto Range = overridden_methods(Method);
1652 return Range.end() - Range.begin();
1653}
1654
1655ASTContext::overridden_method_range
1656ASTContext::overridden_methods(const CXXMethodDecl *Method) const {
1657 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos =
1658 OverriddenMethods.find(Val: Method->getCanonicalDecl());
1659 if (Pos == OverriddenMethods.end())
1660 return overridden_method_range(nullptr, nullptr);
1661 return overridden_method_range(Pos->second.begin(), Pos->second.end());
1662}
1663
1664void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method,
1665 const CXXMethodDecl *Overridden) {
1666 assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl());
1667 OverriddenMethods[Method].push_back(NewVal: Overridden);
1668}
1669
1670void ASTContext::getOverriddenMethods(
1671 const NamedDecl *D,
1672 SmallVectorImpl<const NamedDecl *> &Overridden) const {
1673 assert(D);
1674
1675 if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(Val: D)) {
1676 Overridden.append(in_start: overridden_methods_begin(Method: CXXMethod),
1677 in_end: overridden_methods_end(Method: CXXMethod));
1678 return;
1679 }
1680
1681 const auto *Method = dyn_cast<ObjCMethodDecl>(Val: D);
1682 if (!Method)
1683 return;
1684
1685 SmallVector<const ObjCMethodDecl *, 8> OverDecls;
1686 Method->getOverriddenMethods(Overridden&: OverDecls);
1687 Overridden.append(in_start: OverDecls.begin(), in_end: OverDecls.end());
1688}
1689
1690std::optional<ASTContext::CXXRecordDeclRelocationInfo>
1691ASTContext::getRelocationInfoForCXXRecord(const CXXRecordDecl *RD) const {
1692 assert(RD);
1693 CXXRecordDecl *D = RD->getDefinition();
1694 auto it = RelocatableClasses.find(Val: D);
1695 if (it != RelocatableClasses.end())
1696 return it->getSecond();
1697 return std::nullopt;
1698}
1699
1700void ASTContext::setRelocationInfoForCXXRecord(
1701 const CXXRecordDecl *RD, CXXRecordDeclRelocationInfo Info) {
1702 assert(RD);
1703 CXXRecordDecl *D = RD->getDefinition();
1704 assert(RelocatableClasses.find(D) == RelocatableClasses.end());
1705 RelocatableClasses.insert(KV: {D, Info});
1706}
1707
1708static bool primaryBaseHaseAddressDiscriminatedVTableAuthentication(
1709 ASTContext &Context, const CXXRecordDecl *Class) {
1710 if (!Class->isPolymorphic())
1711 return false;
1712 const CXXRecordDecl *BaseType = Context.baseForVTableAuthentication(ThisClass: Class);
1713 using AuthAttr = VTablePointerAuthenticationAttr;
1714 const AuthAttr *ExplicitAuth = BaseType->getAttr<AuthAttr>();
1715 if (!ExplicitAuth)
1716 return Context.getLangOpts().PointerAuthVTPtrAddressDiscrimination;
1717 AuthAttr::AddressDiscriminationMode AddressDiscrimination =
1718 ExplicitAuth->getAddressDiscrimination();
1719 if (AddressDiscrimination == AuthAttr::DefaultAddressDiscrimination)
1720 return Context.getLangOpts().PointerAuthVTPtrAddressDiscrimination;
1721 return AddressDiscrimination == AuthAttr::AddressDiscrimination;
1722}
1723
1724ASTContext::PointerAuthContent ASTContext::findPointerAuthContent(QualType T) {
1725 assert(isPointerAuthenticationAvailable());
1726
1727 T = T.getCanonicalType();
1728 if (T.hasAddressDiscriminatedPointerAuth())
1729 return PointerAuthContent::AddressDiscriminatedData;
1730 const RecordDecl *RD = T->getAsRecordDecl();
1731 if (!RD)
1732 return PointerAuthContent::None;
1733
1734 if (auto Existing = RecordContainsAddressDiscriminatedPointerAuth.find(Val: RD);
1735 Existing != RecordContainsAddressDiscriminatedPointerAuth.end())
1736 return Existing->second;
1737
1738 PointerAuthContent Result = PointerAuthContent::None;
1739
1740 auto SaveResultAndReturn = [&]() -> PointerAuthContent {
1741 auto [ResultIter, DidAdd] =
1742 RecordContainsAddressDiscriminatedPointerAuth.try_emplace(Key: RD, Args&: Result);
1743 (void)ResultIter;
1744 (void)DidAdd;
1745 assert(DidAdd);
1746 return Result;
1747 };
1748 auto ShouldContinueAfterUpdate = [&](PointerAuthContent NewResult) {
1749 static_assert(PointerAuthContent::None <
1750 PointerAuthContent::AddressDiscriminatedVTable);
1751 static_assert(PointerAuthContent::AddressDiscriminatedVTable <
1752 PointerAuthContent::AddressDiscriminatedData);
1753 if (NewResult > Result)
1754 Result = NewResult;
1755 return Result != PointerAuthContent::AddressDiscriminatedData;
1756 };
1757 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) {
1758 if (primaryBaseHaseAddressDiscriminatedVTableAuthentication(Context&: *this, Class: CXXRD) &&
1759 !ShouldContinueAfterUpdate(
1760 PointerAuthContent::AddressDiscriminatedVTable))
1761 return SaveResultAndReturn();
1762 for (auto Base : CXXRD->bases()) {
1763 if (!ShouldContinueAfterUpdate(findPointerAuthContent(T: Base.getType())))
1764 return SaveResultAndReturn();
1765 }
1766 }
1767 for (auto *FieldDecl : RD->fields()) {
1768 if (!ShouldContinueAfterUpdate(
1769 findPointerAuthContent(T: FieldDecl->getType())))
1770 return SaveResultAndReturn();
1771 }
1772 return SaveResultAndReturn();
1773}
1774
1775void ASTContext::addedLocalImportDecl(ImportDecl *Import) {
1776 assert(!Import->getNextLocalImport() &&
1777 "Import declaration already in the chain");
1778 assert(!Import->isFromASTFile() && "Non-local import declaration");
1779 if (!FirstLocalImport) {
1780 FirstLocalImport = Import;
1781 LastLocalImport = Import;
1782 return;
1783 }
1784
1785 LastLocalImport->setNextLocalImport(Import);
1786 LastLocalImport = Import;
1787}
1788
1789//===----------------------------------------------------------------------===//
1790// Type Sizing and Analysis
1791//===----------------------------------------------------------------------===//
1792
1793/// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
1794/// scalar floating point type.
1795const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
1796 switch (T->castAs<BuiltinType>()->getKind()) {
1797 default:
1798 llvm_unreachable("Not a floating point type!");
1799 case BuiltinType::BFloat16:
1800 return Target->getBFloat16Format();
1801 case BuiltinType::Float16:
1802 return Target->getHalfFormat();
1803 case BuiltinType::Half:
1804 return Target->getHalfFormat();
1805 case BuiltinType::Float: return Target->getFloatFormat();
1806 case BuiltinType::Double: return Target->getDoubleFormat();
1807 case BuiltinType::Ibm128:
1808 return Target->getIbm128Format();
1809 case BuiltinType::LongDouble:
1810 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice)
1811 return AuxTarget->getLongDoubleFormat();
1812 return Target->getLongDoubleFormat();
1813 case BuiltinType::Float128:
1814 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice)
1815 return AuxTarget->getFloat128Format();
1816 return Target->getFloat128Format();
1817 }
1818}
1819
1820CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
1821 unsigned Align = Target->getCharWidth();
1822
1823 const unsigned AlignFromAttr = D->getMaxAlignment();
1824 if (AlignFromAttr)
1825 Align = AlignFromAttr;
1826
1827 // __attribute__((aligned)) can increase or decrease alignment
1828 // *except* on a struct or struct member, where it only increases
1829 // alignment unless 'packed' is also specified.
1830 //
1831 // It is an error for alignas to decrease alignment, so we can
1832 // ignore that possibility; Sema should diagnose it.
1833 bool UseAlignAttrOnly;
1834 if (const FieldDecl *FD = dyn_cast<FieldDecl>(Val: D))
1835 UseAlignAttrOnly =
1836 FD->hasAttr<PackedAttr>() || FD->getParent()->hasAttr<PackedAttr>();
1837 else
1838 UseAlignAttrOnly = AlignFromAttr != 0;
1839 // If we're using the align attribute only, just ignore everything
1840 // else about the declaration and its type.
1841 if (UseAlignAttrOnly) {
1842 // do nothing
1843 } else if (const auto *VD = dyn_cast<ValueDecl>(Val: D)) {
1844 QualType T = VD->getType();
1845 if (const auto *RT = T->getAs<ReferenceType>()) {
1846 if (ForAlignof)
1847 T = RT->getPointeeType();
1848 else
1849 T = getPointerType(T: RT->getPointeeType());
1850 }
1851 QualType BaseT = getBaseElementType(QT: T);
1852 if (T->isFunctionType())
1853 Align = getTypeInfoImpl(T: T.getTypePtr()).Align;
1854 else if (!BaseT->isIncompleteType()) {
1855 // Adjust alignments of declarations with array type by the
1856 // large-array alignment on the target.
1857 if (const ArrayType *arrayType = getAsArrayType(T)) {
1858 unsigned MinWidth = Target->getLargeArrayMinWidth();
1859 if (!ForAlignof && MinWidth) {
1860 if (isa<VariableArrayType>(Val: arrayType))
1861 Align = std::max(a: Align, b: Target->getLargeArrayAlign());
1862 else if (isa<ConstantArrayType>(Val: arrayType) &&
1863 MinWidth <= getTypeSize(T: cast<ConstantArrayType>(Val: arrayType)))
1864 Align = std::max(a: Align, b: Target->getLargeArrayAlign());
1865 }
1866 }
1867 Align = std::max(a: Align, b: getPreferredTypeAlign(T: T.getTypePtr()));
1868 if (BaseT.getQualifiers().hasUnaligned())
1869 Align = Target->getCharWidth();
1870 }
1871
1872 // Ensure minimum alignment for global variables.
1873 if (const auto *VD = dyn_cast<VarDecl>(Val: D))
1874 if (VD->hasGlobalStorage() && !ForAlignof) {
1875 uint64_t TypeSize =
1876 !BaseT->isIncompleteType() ? getTypeSize(T: T.getTypePtr()) : 0;
1877 Align = std::max(a: Align, b: getMinGlobalAlignOfVar(Size: TypeSize, VD));
1878 }
1879
1880 // Fields can be subject to extra alignment constraints, like if
1881 // the field is packed, the struct is packed, or the struct has a
1882 // a max-field-alignment constraint (#pragma pack). So calculate
1883 // the actual alignment of the field within the struct, and then
1884 // (as we're expected to) constrain that by the alignment of the type.
1885 if (const auto *Field = dyn_cast<FieldDecl>(Val: VD)) {
1886 const RecordDecl *Parent = Field->getParent();
1887 // We can only produce a sensible answer if the record is valid.
1888 if (!Parent->isInvalidDecl()) {
1889 const ASTRecordLayout &Layout = getASTRecordLayout(D: Parent);
1890
1891 // Start with the record's overall alignment.
1892 unsigned FieldAlign = toBits(CharSize: Layout.getAlignment());
1893
1894 // Use the GCD of that and the offset within the record.
1895 uint64_t Offset = Layout.getFieldOffset(FieldNo: Field->getFieldIndex());
1896 if (Offset > 0) {
1897 // Alignment is always a power of 2, so the GCD will be a power of 2,
1898 // which means we get to do this crazy thing instead of Euclid's.
1899 uint64_t LowBitOfOffset = Offset & (~Offset + 1);
1900 if (LowBitOfOffset < FieldAlign)
1901 FieldAlign = static_cast<unsigned>(LowBitOfOffset);
1902 }
1903
1904 Align = std::min(a: Align, b: FieldAlign);
1905 }
1906 }
1907 }
1908
1909 // Some targets have hard limitation on the maximum requestable alignment in
1910 // aligned attribute for static variables.
1911 const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute();
1912 const auto *VD = dyn_cast<VarDecl>(Val: D);
1913 if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static)
1914 Align = std::min(a: Align, b: MaxAlignedAttr);
1915
1916 return toCharUnitsFromBits(BitSize: Align);
1917}
1918
1919CharUnits ASTContext::getExnObjectAlignment() const {
1920 return toCharUnitsFromBits(BitSize: Target->getExnObjectAlignment());
1921}
1922
1923// getTypeInfoDataSizeInChars - Return the size of a type, in
1924// chars. If the type is a record, its data size is returned. This is
1925// the size of the memcpy that's performed when assigning this type
1926// using a trivial copy/move assignment operator.
1927TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const {
1928 TypeInfoChars Info = getTypeInfoInChars(T);
1929
1930 // In C++, objects can sometimes be allocated into the tail padding
1931 // of a base-class subobject. We decide whether that's possible
1932 // during class layout, so here we can just trust the layout results.
1933 if (getLangOpts().CPlusPlus) {
1934 if (const auto *RT = T->getAs<RecordType>();
1935 RT && !RT->getDecl()->isInvalidDecl()) {
1936 const ASTRecordLayout &layout = getASTRecordLayout(D: RT->getDecl());
1937 Info.Width = layout.getDataSize();
1938 }
1939 }
1940
1941 return Info;
1942}
1943
1944/// getConstantArrayInfoInChars - Performing the computation in CharUnits
1945/// instead of in bits prevents overflowing the uint64_t for some large arrays.
1946TypeInfoChars
1947static getConstantArrayInfoInChars(const ASTContext &Context,
1948 const ConstantArrayType *CAT) {
1949 TypeInfoChars EltInfo = Context.getTypeInfoInChars(T: CAT->getElementType());
1950 uint64_t Size = CAT->getZExtSize();
1951 assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <=
1952 (uint64_t)(-1)/Size) &&
1953 "Overflow in array type char size evaluation");
1954 uint64_t Width = EltInfo.Width.getQuantity() * Size;
1955 unsigned Align = EltInfo.Align.getQuantity();
1956 if (!Context.getTargetInfo().getCXXABI().isMicrosoft() ||
1957 Context.getTargetInfo().getPointerWidth(AddrSpace: LangAS::Default) == 64)
1958 Width = llvm::alignTo(Value: Width, Align);
1959 return TypeInfoChars(CharUnits::fromQuantity(Quantity: Width),
1960 CharUnits::fromQuantity(Quantity: Align),
1961 EltInfo.AlignRequirement);
1962}
1963
1964TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const {
1965 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: T))
1966 return getConstantArrayInfoInChars(Context: *this, CAT);
1967 TypeInfo Info = getTypeInfo(T);
1968 return TypeInfoChars(toCharUnitsFromBits(BitSize: Info.Width),
1969 toCharUnitsFromBits(BitSize: Info.Align), Info.AlignRequirement);
1970}
1971
1972TypeInfoChars ASTContext::getTypeInfoInChars(QualType T) const {
1973 return getTypeInfoInChars(T: T.getTypePtr());
1974}
1975
1976bool ASTContext::isPromotableIntegerType(QualType T) const {
1977 // HLSL doesn't promote all small integer types to int, it
1978 // just uses the rank-based promotion rules for all types.
1979 if (getLangOpts().HLSL)
1980 return false;
1981
1982 if (const auto *BT = T->getAs<BuiltinType>())
1983 switch (BT->getKind()) {
1984 case BuiltinType::Bool:
1985 case BuiltinType::Char_S:
1986 case BuiltinType::Char_U:
1987 case BuiltinType::SChar:
1988 case BuiltinType::UChar:
1989 case BuiltinType::Short:
1990 case BuiltinType::UShort:
1991 case BuiltinType::WChar_S:
1992 case BuiltinType::WChar_U:
1993 case BuiltinType::Char8:
1994 case BuiltinType::Char16:
1995 case BuiltinType::Char32:
1996 return true;
1997 default:
1998 return false;
1999 }
2000
2001 // Enumerated types are promotable to their compatible integer types
2002 // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2).
2003 if (const auto *ET = T->getAs<EnumType>()) {
2004 if (T->isDependentType() || ET->getDecl()->getPromotionType().isNull() ||
2005 ET->getDecl()->isScoped())
2006 return false;
2007
2008 return true;
2009 }
2010
2011 return false;
2012}
2013
2014bool ASTContext::isAlignmentRequired(const Type *T) const {
2015 return getTypeInfo(T).AlignRequirement != AlignRequirementKind::None;
2016}
2017
2018bool ASTContext::isAlignmentRequired(QualType T) const {
2019 return isAlignmentRequired(T: T.getTypePtr());
2020}
2021
2022unsigned ASTContext::getTypeAlignIfKnown(QualType T,
2023 bool NeedsPreferredAlignment) const {
2024 // An alignment on a typedef overrides anything else.
2025 if (const auto *TT = T->getAs<TypedefType>())
2026 if (unsigned Align = TT->getDecl()->getMaxAlignment())
2027 return Align;
2028
2029 // If we have an (array of) complete type, we're done.
2030 T = getBaseElementType(QT: T);
2031 if (!T->isIncompleteType())
2032 return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T);
2033
2034 // If we had an array type, its element type might be a typedef
2035 // type with an alignment attribute.
2036 if (const auto *TT = T->getAs<TypedefType>())
2037 if (unsigned Align = TT->getDecl()->getMaxAlignment())
2038 return Align;
2039
2040 // Otherwise, see if the declaration of the type had an attribute.
2041 if (const auto *TT = T->getAs<TagType>())
2042 return TT->getDecl()->getMaxAlignment();
2043
2044 return 0;
2045}
2046
2047TypeInfo ASTContext::getTypeInfo(const Type *T) const {
2048 TypeInfoMap::iterator I = MemoizedTypeInfo.find(Val: T);
2049 if (I != MemoizedTypeInfo.end())
2050 return I->second;
2051
2052 // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup.
2053 TypeInfo TI = getTypeInfoImpl(T);
2054 MemoizedTypeInfo[T] = TI;
2055 return TI;
2056}
2057
2058/// getTypeInfoImpl - Return the size of the specified type, in bits. This
2059/// method does not work on incomplete types.
2060///
2061/// FIXME: Pointers into different addr spaces could have different sizes and
2062/// alignment requirements: getPointerInfo should take an AddrSpace, this
2063/// should take a QualType, &c.
2064TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
2065 uint64_t Width = 0;
2066 unsigned Align = 8;
2067 AlignRequirementKind AlignRequirement = AlignRequirementKind::None;
2068 LangAS AS = LangAS::Default;
2069 switch (T->getTypeClass()) {
2070#define TYPE(Class, Base)
2071#define ABSTRACT_TYPE(Class, Base)
2072#define NON_CANONICAL_TYPE(Class, Base)
2073#define DEPENDENT_TYPE(Class, Base) case Type::Class:
2074#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \
2075 case Type::Class: \
2076 assert(!T->isDependentType() && "should not see dependent types here"); \
2077 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr());
2078#include "clang/AST/TypeNodes.inc"
2079 llvm_unreachable("Should not see dependent types");
2080
2081 case Type::FunctionNoProto:
2082 case Type::FunctionProto:
2083 // GCC extension: alignof(function) = 32 bits
2084 Width = 0;
2085 Align = 32;
2086 break;
2087
2088 case Type::IncompleteArray:
2089 case Type::VariableArray:
2090 case Type::ConstantArray:
2091 case Type::ArrayParameter: {
2092 // Model non-constant sized arrays as size zero, but track the alignment.
2093 uint64_t Size = 0;
2094 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: T))
2095 Size = CAT->getZExtSize();
2096
2097 TypeInfo EltInfo = getTypeInfo(T: cast<ArrayType>(Val: T)->getElementType());
2098 assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&
2099 "Overflow in array type bit size evaluation");
2100 Width = EltInfo.Width * Size;
2101 Align = EltInfo.Align;
2102 AlignRequirement = EltInfo.AlignRequirement;
2103 if (!getTargetInfo().getCXXABI().isMicrosoft() ||
2104 getTargetInfo().getPointerWidth(AddrSpace: LangAS::Default) == 64)
2105 Width = llvm::alignTo(Value: Width, Align);
2106 break;
2107 }
2108
2109 case Type::ExtVector:
2110 case Type::Vector: {
2111 const auto *VT = cast<VectorType>(Val: T);
2112 TypeInfo EltInfo = getTypeInfo(T: VT->getElementType());
2113 Width = VT->isPackedVectorBoolType(ctx: *this)
2114 ? VT->getNumElements()
2115 : EltInfo.Width * VT->getNumElements();
2116 // Enforce at least byte size and alignment.
2117 Width = std::max<unsigned>(a: 8, b: Width);
2118 Align = std::max<unsigned>(a: 8, b: Width);
2119
2120 // If the alignment is not a power of 2, round up to the next power of 2.
2121 // This happens for non-power-of-2 length vectors.
2122 if (Align & (Align-1)) {
2123 Align = llvm::bit_ceil(Value: Align);
2124 Width = llvm::alignTo(Value: Width, Align);
2125 }
2126 // Adjust the alignment based on the target max.
2127 uint64_t TargetVectorAlign = Target->getMaxVectorAlign();
2128 if (TargetVectorAlign && TargetVectorAlign < Align)
2129 Align = TargetVectorAlign;
2130 if (VT->getVectorKind() == VectorKind::SveFixedLengthData)
2131 // Adjust the alignment for fixed-length SVE vectors. This is important
2132 // for non-power-of-2 vector lengths.
2133 Align = 128;
2134 else if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
2135 // Adjust the alignment for fixed-length SVE predicates.
2136 Align = 16;
2137 else if (VT->getVectorKind() == VectorKind::RVVFixedLengthData ||
2138 VT->getVectorKind() == VectorKind::RVVFixedLengthMask ||
2139 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_1 ||
2140 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_2 ||
2141 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_4)
2142 // Adjust the alignment for fixed-length RVV vectors.
2143 Align = std::min<unsigned>(a: 64, b: Width);
2144 break;
2145 }
2146
2147 case Type::ConstantMatrix: {
2148 const auto *MT = cast<ConstantMatrixType>(Val: T);
2149 TypeInfo ElementInfo = getTypeInfo(T: MT->getElementType());
2150 // The internal layout of a matrix value is implementation defined.
2151 // Initially be ABI compatible with arrays with respect to alignment and
2152 // size.
2153 Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns();
2154 Align = ElementInfo.Align;
2155 break;
2156 }
2157
2158 case Type::Builtin:
2159 switch (cast<BuiltinType>(Val: T)->getKind()) {
2160 default: llvm_unreachable("Unknown builtin type!");
2161 case BuiltinType::Void:
2162 // GCC extension: alignof(void) = 8 bits.
2163 Width = 0;
2164 Align = 8;
2165 break;
2166 case BuiltinType::Bool:
2167 Width = Target->getBoolWidth();
2168 Align = Target->getBoolAlign();
2169 break;
2170 case BuiltinType::Char_S:
2171 case BuiltinType::Char_U:
2172 case BuiltinType::UChar:
2173 case BuiltinType::SChar:
2174 case BuiltinType::Char8:
2175 Width = Target->getCharWidth();
2176 Align = Target->getCharAlign();
2177 break;
2178 case BuiltinType::WChar_S:
2179 case BuiltinType::WChar_U:
2180 Width = Target->getWCharWidth();
2181 Align = Target->getWCharAlign();
2182 break;
2183 case BuiltinType::Char16:
2184 Width = Target->getChar16Width();
2185 Align = Target->getChar16Align();
2186 break;
2187 case BuiltinType::Char32:
2188 Width = Target->getChar32Width();
2189 Align = Target->getChar32Align();
2190 break;
2191 case BuiltinType::UShort:
2192 case BuiltinType::Short:
2193 Width = Target->getShortWidth();
2194 Align = Target->getShortAlign();
2195 break;
2196 case BuiltinType::UInt:
2197 case BuiltinType::Int:
2198 Width = Target->getIntWidth();
2199 Align = Target->getIntAlign();
2200 break;
2201 case BuiltinType::ULong:
2202 case BuiltinType::Long:
2203 Width = Target->getLongWidth();
2204 Align = Target->getLongAlign();
2205 break;
2206 case BuiltinType::ULongLong:
2207 case BuiltinType::LongLong:
2208 Width = Target->getLongLongWidth();
2209 Align = Target->getLongLongAlign();
2210 break;
2211 case BuiltinType::Int128:
2212 case BuiltinType::UInt128:
2213 Width = 128;
2214 Align = Target->getInt128Align();
2215 break;
2216 case BuiltinType::ShortAccum:
2217 case BuiltinType::UShortAccum:
2218 case BuiltinType::SatShortAccum:
2219 case BuiltinType::SatUShortAccum:
2220 Width = Target->getShortAccumWidth();
2221 Align = Target->getShortAccumAlign();
2222 break;
2223 case BuiltinType::Accum:
2224 case BuiltinType::UAccum:
2225 case BuiltinType::SatAccum:
2226 case BuiltinType::SatUAccum:
2227 Width = Target->getAccumWidth();
2228 Align = Target->getAccumAlign();
2229 break;
2230 case BuiltinType::LongAccum:
2231 case BuiltinType::ULongAccum:
2232 case BuiltinType::SatLongAccum:
2233 case BuiltinType::SatULongAccum:
2234 Width = Target->getLongAccumWidth();
2235 Align = Target->getLongAccumAlign();
2236 break;
2237 case BuiltinType::ShortFract:
2238 case BuiltinType::UShortFract:
2239 case BuiltinType::SatShortFract:
2240 case BuiltinType::SatUShortFract:
2241 Width = Target->getShortFractWidth();
2242 Align = Target->getShortFractAlign();
2243 break;
2244 case BuiltinType::Fract:
2245 case BuiltinType::UFract:
2246 case BuiltinType::SatFract:
2247 case BuiltinType::SatUFract:
2248 Width = Target->getFractWidth();
2249 Align = Target->getFractAlign();
2250 break;
2251 case BuiltinType::LongFract:
2252 case BuiltinType::ULongFract:
2253 case BuiltinType::SatLongFract:
2254 case BuiltinType::SatULongFract:
2255 Width = Target->getLongFractWidth();
2256 Align = Target->getLongFractAlign();
2257 break;
2258 case BuiltinType::BFloat16:
2259 if (Target->hasBFloat16Type()) {
2260 Width = Target->getBFloat16Width();
2261 Align = Target->getBFloat16Align();
2262 } else if ((getLangOpts().SYCLIsDevice ||
2263 (getLangOpts().OpenMP &&
2264 getLangOpts().OpenMPIsTargetDevice)) &&
2265 AuxTarget->hasBFloat16Type()) {
2266 Width = AuxTarget->getBFloat16Width();
2267 Align = AuxTarget->getBFloat16Align();
2268 }
2269 break;
2270 case BuiltinType::Float16:
2271 case BuiltinType::Half:
2272 if (Target->hasFloat16Type() || !getLangOpts().OpenMP ||
2273 !getLangOpts().OpenMPIsTargetDevice) {
2274 Width = Target->getHalfWidth();
2275 Align = Target->getHalfAlign();
2276 } else {
2277 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2278 "Expected OpenMP device compilation.");
2279 Width = AuxTarget->getHalfWidth();
2280 Align = AuxTarget->getHalfAlign();
2281 }
2282 break;
2283 case BuiltinType::Float:
2284 Width = Target->getFloatWidth();
2285 Align = Target->getFloatAlign();
2286 break;
2287 case BuiltinType::Double:
2288 Width = Target->getDoubleWidth();
2289 Align = Target->getDoubleAlign();
2290 break;
2291 case BuiltinType::Ibm128:
2292 Width = Target->getIbm128Width();
2293 Align = Target->getIbm128Align();
2294 break;
2295 case BuiltinType::LongDouble:
2296 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2297 (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() ||
2298 Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) {
2299 Width = AuxTarget->getLongDoubleWidth();
2300 Align = AuxTarget->getLongDoubleAlign();
2301 } else {
2302 Width = Target->getLongDoubleWidth();
2303 Align = Target->getLongDoubleAlign();
2304 }
2305 break;
2306 case BuiltinType::Float128:
2307 if (Target->hasFloat128Type() || !getLangOpts().OpenMP ||
2308 !getLangOpts().OpenMPIsTargetDevice) {
2309 Width = Target->getFloat128Width();
2310 Align = Target->getFloat128Align();
2311 } else {
2312 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2313 "Expected OpenMP device compilation.");
2314 Width = AuxTarget->getFloat128Width();
2315 Align = AuxTarget->getFloat128Align();
2316 }
2317 break;
2318 case BuiltinType::NullPtr:
2319 // C++ 3.9.1p11: sizeof(nullptr_t) == sizeof(void*)
2320 Width = Target->getPointerWidth(AddrSpace: LangAS::Default);
2321 Align = Target->getPointerAlign(AddrSpace: LangAS::Default);
2322 break;
2323 case BuiltinType::ObjCId:
2324 case BuiltinType::ObjCClass:
2325 case BuiltinType::ObjCSel:
2326 Width = Target->getPointerWidth(AddrSpace: LangAS::Default);
2327 Align = Target->getPointerAlign(AddrSpace: LangAS::Default);
2328 break;
2329 case BuiltinType::OCLSampler:
2330 case BuiltinType::OCLEvent:
2331 case BuiltinType::OCLClkEvent:
2332 case BuiltinType::OCLQueue:
2333 case BuiltinType::OCLReserveID:
2334#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
2335 case BuiltinType::Id:
2336#include "clang/Basic/OpenCLImageTypes.def"
2337#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
2338 case BuiltinType::Id:
2339#include "clang/Basic/OpenCLExtensionTypes.def"
2340 AS = Target->getOpenCLTypeAddrSpace(TK: getOpenCLTypeKind(T));
2341 Width = Target->getPointerWidth(AddrSpace: AS);
2342 Align = Target->getPointerAlign(AddrSpace: AS);
2343 break;
2344 // The SVE types are effectively target-specific. The length of an
2345 // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple
2346 // of 128 bits. There is one predicate bit for each vector byte, so the
2347 // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits.
2348 //
2349 // Because the length is only known at runtime, we use a dummy value
2350 // of 0 for the static length. The alignment values are those defined
2351 // by the Procedure Call Standard for the Arm Architecture.
2352#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId) \
2353 case BuiltinType::Id: \
2354 Width = 0; \
2355 Align = 128; \
2356 break;
2357#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId) \
2358 case BuiltinType::Id: \
2359 Width = 0; \
2360 Align = 16; \
2361 break;
2362#define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingletonId) \
2363 case BuiltinType::Id: \
2364 Width = 0; \
2365 Align = 16; \
2366 break;
2367#define SVE_SCALAR_TYPE(Name, MangledName, Id, SingletonId, Bits) \
2368 case BuiltinType::Id: \
2369 Width = Bits; \
2370 Align = Bits; \
2371 break;
2372#include "clang/Basic/AArch64ACLETypes.def"
2373#define PPC_VECTOR_TYPE(Name, Id, Size) \
2374 case BuiltinType::Id: \
2375 Width = Size; \
2376 Align = Size; \
2377 break;
2378#include "clang/Basic/PPCTypes.def"
2379#define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \
2380 IsFP, IsBF) \
2381 case BuiltinType::Id: \
2382 Width = 0; \
2383 Align = ElBits; \
2384 break;
2385#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \
2386 case BuiltinType::Id: \
2387 Width = 0; \
2388 Align = 8; \
2389 break;
2390#include "clang/Basic/RISCVVTypes.def"
2391#define WASM_TYPE(Name, Id, SingletonId) \
2392 case BuiltinType::Id: \
2393 Width = 0; \
2394 Align = 8; \
2395 break;
2396#include "clang/Basic/WebAssemblyReferenceTypes.def"
2397#define AMDGPU_TYPE(NAME, ID, SINGLETONID, WIDTH, ALIGN) \
2398 case BuiltinType::ID: \
2399 Width = WIDTH; \
2400 Align = ALIGN; \
2401 break;
2402#include "clang/Basic/AMDGPUTypes.def"
2403#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
2404#include "clang/Basic/HLSLIntangibleTypes.def"
2405 Width = Target->getPointerWidth(AddrSpace: LangAS::Default);
2406 Align = Target->getPointerAlign(AddrSpace: LangAS::Default);
2407 break;
2408 }
2409 break;
2410 case Type::ObjCObjectPointer:
2411 Width = Target->getPointerWidth(AddrSpace: LangAS::Default);
2412 Align = Target->getPointerAlign(AddrSpace: LangAS::Default);
2413 break;
2414 case Type::BlockPointer:
2415 AS = cast<BlockPointerType>(Val: T)->getPointeeType().getAddressSpace();
2416 Width = Target->getPointerWidth(AddrSpace: AS);
2417 Align = Target->getPointerAlign(AddrSpace: AS);
2418 break;
2419 case Type::LValueReference:
2420 case Type::RValueReference:
2421 // alignof and sizeof should never enter this code path here, so we go
2422 // the pointer route.
2423 AS = cast<ReferenceType>(Val: T)->getPointeeType().getAddressSpace();
2424 Width = Target->getPointerWidth(AddrSpace: AS);
2425 Align = Target->getPointerAlign(AddrSpace: AS);
2426 break;
2427 case Type::Pointer:
2428 AS = cast<PointerType>(Val: T)->getPointeeType().getAddressSpace();
2429 Width = Target->getPointerWidth(AddrSpace: AS);
2430 Align = Target->getPointerAlign(AddrSpace: AS);
2431 break;
2432 case Type::MemberPointer: {
2433 const auto *MPT = cast<MemberPointerType>(Val: T);
2434 CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT);
2435 Width = MPI.Width;
2436 Align = MPI.Align;
2437 break;
2438 }
2439 case Type::Complex: {
2440 // Complex types have the same alignment as their elements, but twice the
2441 // size.
2442 TypeInfo EltInfo = getTypeInfo(T: cast<ComplexType>(Val: T)->getElementType());
2443 Width = EltInfo.Width * 2;
2444 Align = EltInfo.Align;
2445 break;
2446 }
2447 case Type::ObjCObject:
2448 return getTypeInfo(T: cast<ObjCObjectType>(Val: T)->getBaseType().getTypePtr());
2449 case Type::Adjusted:
2450 case Type::Decayed:
2451 return getTypeInfo(T: cast<AdjustedType>(Val: T)->getAdjustedType().getTypePtr());
2452 case Type::ObjCInterface: {
2453 const auto *ObjCI = cast<ObjCInterfaceType>(Val: T);
2454 if (ObjCI->getDecl()->isInvalidDecl()) {
2455 Width = 8;
2456 Align = 8;
2457 break;
2458 }
2459 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(D: ObjCI->getDecl());
2460 Width = toBits(CharSize: Layout.getSize());
2461 Align = toBits(CharSize: Layout.getAlignment());
2462 break;
2463 }
2464 case Type::BitInt: {
2465 const auto *EIT = cast<BitIntType>(Val: T);
2466 Align = Target->getBitIntAlign(NumBits: EIT->getNumBits());
2467 Width = Target->getBitIntWidth(NumBits: EIT->getNumBits());
2468 break;
2469 }
2470 case Type::Record:
2471 case Type::Enum: {
2472 const auto *TT = cast<TagType>(Val: T);
2473
2474 if (TT->getDecl()->isInvalidDecl()) {
2475 Width = 8;
2476 Align = 8;
2477 break;
2478 }
2479
2480 if (const auto *ET = dyn_cast<EnumType>(Val: TT)) {
2481 const EnumDecl *ED = ET->getDecl();
2482 TypeInfo Info =
2483 getTypeInfo(T: ED->getIntegerType()->getUnqualifiedDesugaredType());
2484 if (unsigned AttrAlign = ED->getMaxAlignment()) {
2485 Info.Align = AttrAlign;
2486 Info.AlignRequirement = AlignRequirementKind::RequiredByEnum;
2487 }
2488 return Info;
2489 }
2490
2491 const auto *RT = cast<RecordType>(Val: TT);
2492 const RecordDecl *RD = RT->getDecl();
2493 const ASTRecordLayout &Layout = getASTRecordLayout(D: RD);
2494 Width = toBits(CharSize: Layout.getSize());
2495 Align = toBits(CharSize: Layout.getAlignment());
2496 AlignRequirement = RD->hasAttr<AlignedAttr>()
2497 ? AlignRequirementKind::RequiredByRecord
2498 : AlignRequirementKind::None;
2499 break;
2500 }
2501
2502 case Type::SubstTemplateTypeParm:
2503 return getTypeInfo(T: cast<SubstTemplateTypeParmType>(Val: T)->
2504 getReplacementType().getTypePtr());
2505
2506 case Type::Auto:
2507 case Type::DeducedTemplateSpecialization: {
2508 const auto *A = cast<DeducedType>(Val: T);
2509 assert(!A->getDeducedType().isNull() &&
2510 "cannot request the size of an undeduced or dependent auto type");
2511 return getTypeInfo(T: A->getDeducedType().getTypePtr());
2512 }
2513
2514 case Type::Paren:
2515 return getTypeInfo(T: cast<ParenType>(Val: T)->getInnerType().getTypePtr());
2516
2517 case Type::MacroQualified:
2518 return getTypeInfo(
2519 T: cast<MacroQualifiedType>(Val: T)->getUnderlyingType().getTypePtr());
2520
2521 case Type::ObjCTypeParam:
2522 return getTypeInfo(T: cast<ObjCTypeParamType>(Val: T)->desugar().getTypePtr());
2523
2524 case Type::Using:
2525 return getTypeInfo(T: cast<UsingType>(Val: T)->desugar().getTypePtr());
2526
2527 case Type::Typedef: {
2528 const auto *TT = cast<TypedefType>(Val: T);
2529 TypeInfo Info = getTypeInfo(T: TT->desugar().getTypePtr());
2530 // If the typedef has an aligned attribute on it, it overrides any computed
2531 // alignment we have. This violates the GCC documentation (which says that
2532 // attribute(aligned) can only round up) but matches its implementation.
2533 if (unsigned AttrAlign = TT->getDecl()->getMaxAlignment()) {
2534 Align = AttrAlign;
2535 AlignRequirement = AlignRequirementKind::RequiredByTypedef;
2536 } else {
2537 Align = Info.Align;
2538 AlignRequirement = Info.AlignRequirement;
2539 }
2540 Width = Info.Width;
2541 break;
2542 }
2543
2544 case Type::Elaborated:
2545 return getTypeInfo(T: cast<ElaboratedType>(Val: T)->getNamedType().getTypePtr());
2546
2547 case Type::Attributed:
2548 return getTypeInfo(
2549 T: cast<AttributedType>(Val: T)->getEquivalentType().getTypePtr());
2550
2551 case Type::CountAttributed:
2552 return getTypeInfo(T: cast<CountAttributedType>(Val: T)->desugar().getTypePtr());
2553
2554 case Type::BTFTagAttributed:
2555 return getTypeInfo(
2556 T: cast<BTFTagAttributedType>(Val: T)->getWrappedType().getTypePtr());
2557
2558 case Type::HLSLAttributedResource:
2559 return getTypeInfo(
2560 T: cast<HLSLAttributedResourceType>(Val: T)->getWrappedType().getTypePtr());
2561
2562 case Type::HLSLInlineSpirv: {
2563 const auto *ST = cast<HLSLInlineSpirvType>(Val: T);
2564 // Size is specified in bytes, convert to bits
2565 Width = ST->getSize() * 8;
2566 Align = ST->getAlignment();
2567 if (Width == 0 && Align == 0) {
2568 // We are defaulting to laying out opaque SPIR-V types as 32-bit ints.
2569 Width = 32;
2570 Align = 32;
2571 }
2572 break;
2573 }
2574
2575 case Type::Atomic: {
2576 // Start with the base type information.
2577 TypeInfo Info = getTypeInfo(T: cast<AtomicType>(Val: T)->getValueType());
2578 Width = Info.Width;
2579 Align = Info.Align;
2580
2581 if (!Width) {
2582 // An otherwise zero-sized type should still generate an
2583 // atomic operation.
2584 Width = Target->getCharWidth();
2585 assert(Align);
2586 } else if (Width <= Target->getMaxAtomicPromoteWidth()) {
2587 // If the size of the type doesn't exceed the platform's max
2588 // atomic promotion width, make the size and alignment more
2589 // favorable to atomic operations:
2590
2591 // Round the size up to a power of 2.
2592 Width = llvm::bit_ceil(Value: Width);
2593
2594 // Set the alignment equal to the size.
2595 Align = static_cast<unsigned>(Width);
2596 }
2597 }
2598 break;
2599
2600 case Type::Pipe:
2601 Width = Target->getPointerWidth(AddrSpace: LangAS::opencl_global);
2602 Align = Target->getPointerAlign(AddrSpace: LangAS::opencl_global);
2603 break;
2604 }
2605
2606 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2");
2607 return TypeInfo(Width, Align, AlignRequirement);
2608}
2609
2610unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const {
2611 UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(Val: T);
2612 if (I != MemoizedUnadjustedAlign.end())
2613 return I->second;
2614
2615 unsigned UnadjustedAlign;
2616 if (const auto *RT = T->getAs<RecordType>()) {
2617 const RecordDecl *RD = RT->getDecl();
2618 const ASTRecordLayout &Layout = getASTRecordLayout(D: RD);
2619 UnadjustedAlign = toBits(CharSize: Layout.getUnadjustedAlignment());
2620 } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) {
2621 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(D: ObjCI->getDecl());
2622 UnadjustedAlign = toBits(CharSize: Layout.getUnadjustedAlignment());
2623 } else {
2624 UnadjustedAlign = getTypeAlign(T: T->getUnqualifiedDesugaredType());
2625 }
2626
2627 MemoizedUnadjustedAlign[T] = UnadjustedAlign;
2628 return UnadjustedAlign;
2629}
2630
2631unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const {
2632 unsigned SimdAlign = llvm::OpenMPIRBuilder::getOpenMPDefaultSimdAlign(
2633 TargetTriple: getTargetInfo().getTriple(), Features: Target->getTargetOpts().FeatureMap);
2634 return SimdAlign;
2635}
2636
2637/// toCharUnitsFromBits - Convert a size in bits to a size in characters.
2638CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const {
2639 return CharUnits::fromQuantity(Quantity: BitSize / getCharWidth());
2640}
2641
2642/// toBits - Convert a size in characters to a size in characters.
2643int64_t ASTContext::toBits(CharUnits CharSize) const {
2644 return CharSize.getQuantity() * getCharWidth();
2645}
2646
2647/// getTypeSizeInChars - Return the size of the specified type, in characters.
2648/// This method does not work on incomplete types.
2649CharUnits ASTContext::getTypeSizeInChars(QualType T) const {
2650 return getTypeInfoInChars(T).Width;
2651}
2652CharUnits ASTContext::getTypeSizeInChars(const Type *T) const {
2653 return getTypeInfoInChars(T).Width;
2654}
2655
2656/// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
2657/// characters. This method does not work on incomplete types.
2658CharUnits ASTContext::getTypeAlignInChars(QualType T) const {
2659 return toCharUnitsFromBits(BitSize: getTypeAlign(T));
2660}
2661CharUnits ASTContext::getTypeAlignInChars(const Type *T) const {
2662 return toCharUnitsFromBits(BitSize: getTypeAlign(T));
2663}
2664
2665/// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a
2666/// type, in characters, before alignment adjustments. This method does
2667/// not work on incomplete types.
2668CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const {
2669 return toCharUnitsFromBits(BitSize: getTypeUnadjustedAlign(T));
2670}
2671CharUnits ASTContext::getTypeUnadjustedAlignInChars(const Type *T) const {
2672 return toCharUnitsFromBits(BitSize: getTypeUnadjustedAlign(T));
2673}
2674
2675/// getPreferredTypeAlign - Return the "preferred" alignment of the specified
2676/// type for the current target in bits. This can be different than the ABI
2677/// alignment in cases where it is beneficial for performance or backwards
2678/// compatibility preserving to overalign a data type. (Note: despite the name,
2679/// the preferred alignment is ABI-impacting, and not an optimization.)
2680unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
2681 TypeInfo TI = getTypeInfo(T);
2682 unsigned ABIAlign = TI.Align;
2683
2684 T = T->getBaseElementTypeUnsafe();
2685
2686 // The preferred alignment of member pointers is that of a pointer.
2687 if (T->isMemberPointerType())
2688 return getPreferredTypeAlign(T: getPointerDiffType().getTypePtr());
2689
2690 if (!Target->allowsLargerPreferedTypeAlignment())
2691 return ABIAlign;
2692
2693 if (const auto *RT = T->getAs<RecordType>()) {
2694 const RecordDecl *RD = RT->getDecl();
2695
2696 // When used as part of a typedef, or together with a 'packed' attribute,
2697 // the 'aligned' attribute can be used to decrease alignment. Note that the
2698 // 'packed' case is already taken into consideration when computing the
2699 // alignment, we only need to handle the typedef case here.
2700 if (TI.AlignRequirement == AlignRequirementKind::RequiredByTypedef ||
2701 RD->isInvalidDecl())
2702 return ABIAlign;
2703
2704 unsigned PreferredAlign = static_cast<unsigned>(
2705 toBits(CharSize: getASTRecordLayout(D: RD).PreferredAlignment));
2706 assert(PreferredAlign >= ABIAlign &&
2707 "PreferredAlign should be at least as large as ABIAlign.");
2708 return PreferredAlign;
2709 }
2710
2711 // Double (and, for targets supporting AIX `power` alignment, long double) and
2712 // long long should be naturally aligned (despite requiring less alignment) if
2713 // possible.
2714 if (const auto *CT = T->getAs<ComplexType>())
2715 T = CT->getElementType().getTypePtr();
2716 if (const auto *ET = T->getAs<EnumType>())
2717 T = ET->getDecl()->getIntegerType().getTypePtr();
2718 if (T->isSpecificBuiltinType(K: BuiltinType::Double) ||
2719 T->isSpecificBuiltinType(K: BuiltinType::LongLong) ||
2720 T->isSpecificBuiltinType(K: BuiltinType::ULongLong) ||
2721 (T->isSpecificBuiltinType(K: BuiltinType::LongDouble) &&
2722 Target->defaultsToAIXPowerAlignment()))
2723 // Don't increase the alignment if an alignment attribute was specified on a
2724 // typedef declaration.
2725 if (!TI.isAlignRequired())
2726 return std::max(a: ABIAlign, b: (unsigned)getTypeSize(T));
2727
2728 return ABIAlign;
2729}
2730
2731/// getTargetDefaultAlignForAttributeAligned - Return the default alignment
2732/// for __attribute__((aligned)) on this target, to be used if no alignment
2733/// value is specified.
2734unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const {
2735 return getTargetInfo().getDefaultAlignForAttributeAligned();
2736}
2737
2738/// getAlignOfGlobalVar - Return the alignment in bits that should be given
2739/// to a global variable of the specified type.
2740unsigned ASTContext::getAlignOfGlobalVar(QualType T, const VarDecl *VD) const {
2741 uint64_t TypeSize = getTypeSize(T: T.getTypePtr());
2742 return std::max(a: getPreferredTypeAlign(T),
2743 b: getMinGlobalAlignOfVar(Size: TypeSize, VD));
2744}
2745
2746/// getAlignOfGlobalVarInChars - Return the alignment in characters that
2747/// should be given to a global variable of the specified type.
2748CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T,
2749 const VarDecl *VD) const {
2750 return toCharUnitsFromBits(BitSize: getAlignOfGlobalVar(T, VD));
2751}
2752
2753unsigned ASTContext::getMinGlobalAlignOfVar(uint64_t Size,
2754 const VarDecl *VD) const {
2755 // Make the default handling as that of a non-weak definition in the
2756 // current translation unit.
2757 bool HasNonWeakDef = !VD || (VD->hasDefinition() && !VD->isWeak());
2758 return getTargetInfo().getMinGlobalAlign(Size, HasNonWeakDef);
2759}
2760
2761CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const {
2762 CharUnits Offset = CharUnits::Zero();
2763 const ASTRecordLayout *Layout = &getASTRecordLayout(D: RD);
2764 while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) {
2765 Offset += Layout->getBaseClassOffset(Base);
2766 Layout = &getASTRecordLayout(D: Base);
2767 }
2768 return Offset;
2769}
2770
2771CharUnits ASTContext::getMemberPointerPathAdjustment(const APValue &MP) const {
2772 const ValueDecl *MPD = MP.getMemberPointerDecl();
2773 CharUnits ThisAdjustment = CharUnits::Zero();
2774 ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath();
2775 bool DerivedMember = MP.isMemberPointerToDerivedMember();
2776 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Val: MPD->getDeclContext());
2777 for (unsigned I = 0, N = Path.size(); I != N; ++I) {
2778 const CXXRecordDecl *Base = RD;
2779 const CXXRecordDecl *Derived = Path[I];
2780 if (DerivedMember)
2781 std::swap(a&: Base, b&: Derived);
2782 ThisAdjustment += getASTRecordLayout(D: Derived).getBaseClassOffset(Base);
2783 RD = Path[I];
2784 }
2785 if (DerivedMember)
2786 ThisAdjustment = -ThisAdjustment;
2787 return ThisAdjustment;
2788}
2789
2790/// DeepCollectObjCIvars -
2791/// This routine first collects all declared, but not synthesized, ivars in
2792/// super class and then collects all ivars, including those synthesized for
2793/// current class. This routine is used for implementation of current class
2794/// when all ivars, declared and synthesized are known.
2795void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI,
2796 bool leafClass,
2797 SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const {
2798 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
2799 DeepCollectObjCIvars(OI: SuperClass, leafClass: false, Ivars);
2800 if (!leafClass) {
2801 llvm::append_range(C&: Ivars, R: OI->ivars());
2802 } else {
2803 auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
2804 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
2805 Iv= Iv->getNextIvar())
2806 Ivars.push_back(Elt: Iv);
2807 }
2808}
2809
2810/// CollectInheritedProtocols - Collect all protocols in current class and
2811/// those inherited by it.
2812void ASTContext::CollectInheritedProtocols(const Decl *CDecl,
2813 llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) {
2814 if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(Val: CDecl)) {
2815 // We can use protocol_iterator here instead of
2816 // all_referenced_protocol_iterator since we are walking all categories.
2817 for (auto *Proto : OI->all_referenced_protocols()) {
2818 CollectInheritedProtocols(CDecl: Proto, Protocols);
2819 }
2820
2821 // Categories of this Interface.
2822 for (const auto *Cat : OI->visible_categories())
2823 CollectInheritedProtocols(CDecl: Cat, Protocols);
2824
2825 if (ObjCInterfaceDecl *SD = OI->getSuperClass())
2826 while (SD) {
2827 CollectInheritedProtocols(CDecl: SD, Protocols);
2828 SD = SD->getSuperClass();
2829 }
2830 } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(Val: CDecl)) {
2831 for (auto *Proto : OC->protocols()) {
2832 CollectInheritedProtocols(CDecl: Proto, Protocols);
2833 }
2834 } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(Val: CDecl)) {
2835 // Insert the protocol.
2836 if (!Protocols.insert(
2837 Ptr: const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second)
2838 return;
2839
2840 for (auto *Proto : OP->protocols())
2841 CollectInheritedProtocols(CDecl: Proto, Protocols);
2842 }
2843}
2844
2845static bool unionHasUniqueObjectRepresentations(const ASTContext &Context,
2846 const RecordDecl *RD,
2847 bool CheckIfTriviallyCopyable) {
2848 assert(RD->isUnion() && "Must be union type");
2849 CharUnits UnionSize = Context.getTypeSizeInChars(T: RD->getTypeForDecl());
2850
2851 for (const auto *Field : RD->fields()) {
2852 if (!Context.hasUniqueObjectRepresentations(Ty: Field->getType(),
2853 CheckIfTriviallyCopyable))
2854 return false;
2855 CharUnits FieldSize = Context.getTypeSizeInChars(T: Field->getType());
2856 if (FieldSize != UnionSize)
2857 return false;
2858 }
2859 return !RD->field_empty();
2860}
2861
2862static int64_t getSubobjectOffset(const FieldDecl *Field,
2863 const ASTContext &Context,
2864 const clang::ASTRecordLayout & /*Layout*/) {
2865 return Context.getFieldOffset(FD: Field);
2866}
2867
2868static int64_t getSubobjectOffset(const CXXRecordDecl *RD,
2869 const ASTContext &Context,
2870 const clang::ASTRecordLayout &Layout) {
2871 return Context.toBits(CharSize: Layout.getBaseClassOffset(Base: RD));
2872}
2873
2874static std::optional<int64_t>
2875structHasUniqueObjectRepresentations(const ASTContext &Context,
2876 const RecordDecl *RD,
2877 bool CheckIfTriviallyCopyable);
2878
2879static std::optional<int64_t>
2880getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context,
2881 bool CheckIfTriviallyCopyable) {
2882 if (Field->getType()->isRecordType()) {
2883 const RecordDecl *RD = Field->getType()->getAsRecordDecl();
2884 if (!RD->isUnion())
2885 return structHasUniqueObjectRepresentations(Context, RD,
2886 CheckIfTriviallyCopyable);
2887 }
2888
2889 // A _BitInt type may not be unique if it has padding bits
2890 // but if it is a bitfield the padding bits are not used.
2891 bool IsBitIntType = Field->getType()->isBitIntType();
2892 if (!Field->getType()->isReferenceType() && !IsBitIntType &&
2893 !Context.hasUniqueObjectRepresentations(Ty: Field->getType(),
2894 CheckIfTriviallyCopyable))
2895 return std::nullopt;
2896
2897 int64_t FieldSizeInBits =
2898 Context.toBits(CharSize: Context.getTypeSizeInChars(T: Field->getType()));
2899 if (Field->isBitField()) {
2900 // If we have explicit padding bits, they don't contribute bits
2901 // to the actual object representation, so return 0.
2902 if (Field->isUnnamedBitField())
2903 return 0;
2904
2905 int64_t BitfieldSize = Field->getBitWidthValue();
2906 if (IsBitIntType) {
2907 if ((unsigned)BitfieldSize >
2908 cast<BitIntType>(Val: Field->getType())->getNumBits())
2909 return std::nullopt;
2910 } else if (BitfieldSize > FieldSizeInBits) {
2911 return std::nullopt;
2912 }
2913 FieldSizeInBits = BitfieldSize;
2914 } else if (IsBitIntType && !Context.hasUniqueObjectRepresentations(
2915 Ty: Field->getType(), CheckIfTriviallyCopyable)) {
2916 return std::nullopt;
2917 }
2918 return FieldSizeInBits;
2919}
2920
2921static std::optional<int64_t>
2922getSubobjectSizeInBits(const CXXRecordDecl *RD, const ASTContext &Context,
2923 bool CheckIfTriviallyCopyable) {
2924 return structHasUniqueObjectRepresentations(Context, RD,
2925 CheckIfTriviallyCopyable);
2926}
2927
2928template <typename RangeT>
2929static std::optional<int64_t> structSubobjectsHaveUniqueObjectRepresentations(
2930 const RangeT &Subobjects, int64_t CurOffsetInBits,
2931 const ASTContext &Context, const clang::ASTRecordLayout &Layout,
2932 bool CheckIfTriviallyCopyable) {
2933 for (const auto *Subobject : Subobjects) {
2934 std::optional<int64_t> SizeInBits =
2935 getSubobjectSizeInBits(Subobject, Context, CheckIfTriviallyCopyable);
2936 if (!SizeInBits)
2937 return std::nullopt;
2938 if (*SizeInBits != 0) {
2939 int64_t Offset = getSubobjectOffset(Subobject, Context, Layout);
2940 if (Offset != CurOffsetInBits)
2941 return std::nullopt;
2942 CurOffsetInBits += *SizeInBits;
2943 }
2944 }
2945 return CurOffsetInBits;
2946}
2947
2948static std::optional<int64_t>
2949structHasUniqueObjectRepresentations(const ASTContext &Context,
2950 const RecordDecl *RD,
2951 bool CheckIfTriviallyCopyable) {
2952 assert(!RD->isUnion() && "Must be struct/class type");
2953 const auto &Layout = Context.getASTRecordLayout(D: RD);
2954
2955 int64_t CurOffsetInBits = 0;
2956 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(Val: RD)) {
2957 if (ClassDecl->isDynamicClass())
2958 return std::nullopt;
2959
2960 SmallVector<CXXRecordDecl *, 4> Bases;
2961 for (const auto &Base : ClassDecl->bases()) {
2962 // Empty types can be inherited from, and non-empty types can potentially
2963 // have tail padding, so just make sure there isn't an error.
2964 Bases.emplace_back(Args: Base.getType()->getAsCXXRecordDecl());
2965 }
2966
2967 llvm::sort(C&: Bases, Comp: [&](const CXXRecordDecl *L, const CXXRecordDecl *R) {
2968 return Layout.getBaseClassOffset(Base: L) < Layout.getBaseClassOffset(Base: R);
2969 });
2970
2971 std::optional<int64_t> OffsetAfterBases =
2972 structSubobjectsHaveUniqueObjectRepresentations(
2973 Subobjects: Bases, CurOffsetInBits, Context, Layout, CheckIfTriviallyCopyable);
2974 if (!OffsetAfterBases)
2975 return std::nullopt;
2976 CurOffsetInBits = *OffsetAfterBases;
2977 }
2978
2979 std::optional<int64_t> OffsetAfterFields =
2980 structSubobjectsHaveUniqueObjectRepresentations(
2981 Subobjects: RD->fields(), CurOffsetInBits, Context, Layout,
2982 CheckIfTriviallyCopyable);
2983 if (!OffsetAfterFields)
2984 return std::nullopt;
2985 CurOffsetInBits = *OffsetAfterFields;
2986
2987 return CurOffsetInBits;
2988}
2989
2990bool ASTContext::hasUniqueObjectRepresentations(
2991 QualType Ty, bool CheckIfTriviallyCopyable) const {
2992 // C++17 [meta.unary.prop]:
2993 // The predicate condition for a template specialization
2994 // has_unique_object_representations<T> shall be satisfied if and only if:
2995 // (9.1) - T is trivially copyable, and
2996 // (9.2) - any two objects of type T with the same value have the same
2997 // object representation, where:
2998 // - two objects of array or non-union class type are considered to have
2999 // the same value if their respective sequences of direct subobjects
3000 // have the same values, and
3001 // - two objects of union type are considered to have the same value if
3002 // they have the same active member and the corresponding members have
3003 // the same value.
3004 // The set of scalar types for which this condition holds is
3005 // implementation-defined. [ Note: If a type has padding bits, the condition
3006 // does not hold; otherwise, the condition holds true for unsigned integral
3007 // types. -- end note ]
3008 assert(!Ty.isNull() && "Null QualType sent to unique object rep check");
3009
3010 // Arrays are unique only if their element type is unique.
3011 if (Ty->isArrayType())
3012 return hasUniqueObjectRepresentations(Ty: getBaseElementType(QT: Ty),
3013 CheckIfTriviallyCopyable);
3014
3015 assert((Ty->isVoidType() || !Ty->isIncompleteType()) &&
3016 "hasUniqueObjectRepresentations should not be called with an "
3017 "incomplete type");
3018
3019 // (9.1) - T is trivially copyable...
3020 if (CheckIfTriviallyCopyable && !Ty.isTriviallyCopyableType(Context: *this))
3021 return false;
3022
3023 // All integrals and enums are unique.
3024 if (Ty->isIntegralOrEnumerationType()) {
3025 // Address discriminated integer types are not unique.
3026 if (Ty.hasAddressDiscriminatedPointerAuth())
3027 return false;
3028 // Except _BitInt types that have padding bits.
3029 if (const auto *BIT = Ty->getAs<BitIntType>())
3030 return getTypeSize(T: BIT) == BIT->getNumBits();
3031
3032 return true;
3033 }
3034
3035 // All other pointers (except __ptrauth pointers) are unique.
3036 if (Ty->isPointerType())
3037 return !Ty.hasAddressDiscriminatedPointerAuth();
3038
3039 if (const auto *MPT = Ty->getAs<MemberPointerType>())
3040 return !ABI->getMemberPointerInfo(MPT).HasPadding;
3041
3042 if (Ty->isRecordType()) {
3043 const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl();
3044
3045 if (Record->isInvalidDecl())
3046 return false;
3047
3048 if (Record->isUnion())
3049 return unionHasUniqueObjectRepresentations(Context: *this, RD: Record,
3050 CheckIfTriviallyCopyable);
3051
3052 std::optional<int64_t> StructSize = structHasUniqueObjectRepresentations(
3053 Context: *this, RD: Record, CheckIfTriviallyCopyable);
3054
3055 return StructSize && *StructSize == static_cast<int64_t>(getTypeSize(T: Ty));
3056 }
3057
3058 // FIXME: More cases to handle here (list by rsmith):
3059 // vectors (careful about, eg, vector of 3 foo)
3060 // _Complex int and friends
3061 // _Atomic T
3062 // Obj-C block pointers
3063 // Obj-C object pointers
3064 // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t,
3065 // clk_event_t, queue_t, reserve_id_t)
3066 // There're also Obj-C class types and the Obj-C selector type, but I think it
3067 // makes sense for those to return false here.
3068
3069 return false;
3070}
3071
3072unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const {
3073 unsigned count = 0;
3074 // Count ivars declared in class extension.
3075 for (const auto *Ext : OI->known_extensions())
3076 count += Ext->ivar_size();
3077
3078 // Count ivar defined in this class's implementation. This
3079 // includes synthesized ivars.
3080 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation())
3081 count += ImplDecl->ivar_size();
3082
3083 return count;
3084}
3085
3086bool ASTContext::isSentinelNullExpr(const Expr *E) {
3087 if (!E)
3088 return false;
3089
3090 // nullptr_t is always treated as null.
3091 if (E->getType()->isNullPtrType()) return true;
3092
3093 if (E->getType()->isAnyPointerType() &&
3094 E->IgnoreParenCasts()->isNullPointerConstant(Ctx&: *this,
3095 NPC: Expr::NPC_ValueDependentIsNull))
3096 return true;
3097
3098 // Unfortunately, __null has type 'int'.
3099 if (isa<GNUNullExpr>(Val: E)) return true;
3100
3101 return false;
3102}
3103
3104/// Get the implementation of ObjCInterfaceDecl, or nullptr if none
3105/// exists.
3106ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) {
3107 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
3108 I = ObjCImpls.find(Val: D);
3109 if (I != ObjCImpls.end())
3110 return cast<ObjCImplementationDecl>(Val: I->second);
3111 return nullptr;
3112}
3113
3114/// Get the implementation of ObjCCategoryDecl, or nullptr if none
3115/// exists.
3116ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) {
3117 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
3118 I = ObjCImpls.find(Val: D);
3119 if (I != ObjCImpls.end())
3120 return cast<ObjCCategoryImplDecl>(Val: I->second);
3121 return nullptr;
3122}
3123
3124/// Set the implementation of ObjCInterfaceDecl.
3125void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD,
3126 ObjCImplementationDecl *ImplD) {
3127 assert(IFaceD && ImplD && "Passed null params");
3128 ObjCImpls[IFaceD] = ImplD;
3129}
3130
3131/// Set the implementation of ObjCCategoryDecl.
3132void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD,
3133 ObjCCategoryImplDecl *ImplD) {
3134 assert(CatD && ImplD && "Passed null params");
3135 ObjCImpls[CatD] = ImplD;
3136}
3137
3138const ObjCMethodDecl *
3139ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const {
3140 return ObjCMethodRedecls.lookup(Val: MD);
3141}
3142
3143void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD,
3144 const ObjCMethodDecl *Redecl) {
3145 assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration");
3146 ObjCMethodRedecls[MD] = Redecl;
3147}
3148
3149const ObjCInterfaceDecl *ASTContext::getObjContainingInterface(
3150 const NamedDecl *ND) const {
3151 if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(Val: ND->getDeclContext()))
3152 return ID;
3153 if (const auto *CD = dyn_cast<ObjCCategoryDecl>(Val: ND->getDeclContext()))
3154 return CD->getClassInterface();
3155 if (const auto *IMD = dyn_cast<ObjCImplDecl>(Val: ND->getDeclContext()))
3156 return IMD->getClassInterface();
3157
3158 return nullptr;
3159}
3160
3161/// Get the copy initialization expression of VarDecl, or nullptr if
3162/// none exists.
3163BlockVarCopyInit ASTContext::getBlockVarCopyInit(const VarDecl *VD) const {
3164 assert(VD && "Passed null params");
3165 assert(VD->hasAttr<BlocksAttr>() &&
3166 "getBlockVarCopyInits - not __block var");
3167 auto I = BlockVarCopyInits.find(Val: VD);
3168 if (I != BlockVarCopyInits.end())
3169 return I->second;
3170 return {nullptr, false};
3171}
3172
3173/// Set the copy initialization expression of a block var decl.
3174void ASTContext::setBlockVarCopyInit(const VarDecl*VD, Expr *CopyExpr,
3175 bool CanThrow) {
3176 assert(VD && CopyExpr && "Passed null params");
3177 assert(VD->hasAttr<BlocksAttr>() &&
3178 "setBlockVarCopyInits - not __block var");
3179 BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow);
3180}
3181
3182TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T,
3183 unsigned DataSize) const {
3184 if (!DataSize)
3185 DataSize = TypeLoc::getFullDataSizeForType(Ty: T);
3186 else
3187 assert(DataSize == TypeLoc::getFullDataSizeForType(T) &&
3188 "incorrect data size provided to CreateTypeSourceInfo!");
3189
3190 auto *TInfo =
3191 (TypeSourceInfo*)BumpAlloc.Allocate(Size: sizeof(TypeSourceInfo) + DataSize, Alignment: 8);
3192 new (TInfo) TypeSourceInfo(T, DataSize);
3193 return TInfo;
3194}
3195
3196TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T,
3197 SourceLocation L) const {
3198 TypeSourceInfo *DI = CreateTypeSourceInfo(T);
3199 DI->getTypeLoc().initialize(Context&: const_cast<ASTContext &>(*this), Loc: L);
3200 return DI;
3201}
3202
3203const ASTRecordLayout &
3204ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const {
3205 return getObjCLayout(D);
3206}
3207
3208static auto getCanonicalTemplateArguments(const ASTContext &C,
3209 ArrayRef<TemplateArgument> Args,
3210 bool &AnyNonCanonArgs) {
3211 SmallVector<TemplateArgument, 16> CanonArgs(Args);
3212 AnyNonCanonArgs |= C.canonicalizeTemplateArguments(Args: CanonArgs);
3213 return CanonArgs;
3214}
3215
3216bool ASTContext::canonicalizeTemplateArguments(
3217 MutableArrayRef<TemplateArgument> Args) const {
3218 bool AnyNonCanonArgs = false;
3219 for (auto &Arg : Args) {
3220 TemplateArgument OrigArg = Arg;
3221 Arg = getCanonicalTemplateArgument(Arg);
3222 AnyNonCanonArgs |= !Arg.structurallyEquals(Other: OrigArg);
3223 }
3224 return AnyNonCanonArgs;
3225}
3226
3227//===----------------------------------------------------------------------===//
3228// Type creation/memoization methods
3229//===----------------------------------------------------------------------===//
3230
3231QualType
3232ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const {
3233 unsigned fastQuals = quals.getFastQualifiers();
3234 quals.removeFastQualifiers();
3235
3236 // Check if we've already instantiated this type.
3237 llvm::FoldingSetNodeID ID;
3238 ExtQuals::Profile(ID, BaseType: baseType, Quals: quals);
3239 void *insertPos = nullptr;
3240 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, InsertPos&: insertPos)) {
3241 assert(eq->getQualifiers() == quals);
3242 return QualType(eq, fastQuals);
3243 }
3244
3245 // If the base type is not canonical, make the appropriate canonical type.
3246 QualType canon;
3247 if (!baseType->isCanonicalUnqualified()) {
3248 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split();
3249 canonSplit.Quals.addConsistentQualifiers(qs: quals);
3250 canon = getExtQualType(baseType: canonSplit.Ty, quals: canonSplit.Quals);
3251
3252 // Re-find the insert position.
3253 (void) ExtQualNodes.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
3254 }
3255
3256 auto *eq = new (*this, alignof(ExtQuals)) ExtQuals(baseType, canon, quals);
3257 ExtQualNodes.InsertNode(N: eq, InsertPos: insertPos);
3258 return QualType(eq, fastQuals);
3259}
3260
3261QualType ASTContext::getAddrSpaceQualType(QualType T,
3262 LangAS AddressSpace) const {
3263 QualType CanT = getCanonicalType(T);
3264 if (CanT.getAddressSpace() == AddressSpace)
3265 return T;
3266
3267 // If we are composing extended qualifiers together, merge together
3268 // into one ExtQuals node.
3269 QualifierCollector Quals;
3270 const Type *TypeNode = Quals.strip(type: T);
3271
3272 // If this type already has an address space specified, it cannot get
3273 // another one.
3274 assert(!Quals.hasAddressSpace() &&
3275 "Type cannot be in multiple addr spaces!");
3276 Quals.addAddressSpace(space: AddressSpace);
3277
3278 return getExtQualType(baseType: TypeNode, quals: Quals);
3279}
3280
3281QualType ASTContext::removeAddrSpaceQualType(QualType T) const {
3282 // If the type is not qualified with an address space, just return it
3283 // immediately.
3284 if (!T.hasAddressSpace())
3285 return T;
3286
3287 QualifierCollector Quals;
3288 const Type *TypeNode;
3289 // For arrays, strip the qualifier off the element type, then reconstruct the
3290 // array type
3291 if (T.getTypePtr()->isArrayType()) {
3292 T = getUnqualifiedArrayType(T, Quals);
3293 TypeNode = T.getTypePtr();
3294 } else {
3295 // If we are composing extended qualifiers together, merge together
3296 // into one ExtQuals node.
3297 while (T.hasAddressSpace()) {
3298 TypeNode = Quals.strip(type: T);
3299
3300 // If the type no longer has an address space after stripping qualifiers,
3301 // jump out.
3302 if (!QualType(TypeNode, 0).hasAddressSpace())
3303 break;
3304
3305 // There might be sugar in the way. Strip it and try again.
3306 T = T.getSingleStepDesugaredType(Context: *this);
3307 }
3308 }
3309
3310 Quals.removeAddressSpace();
3311
3312 // Removal of the address space can mean there are no longer any
3313 // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts)
3314 // or required.
3315 if (Quals.hasNonFastQualifiers())
3316 return getExtQualType(baseType: TypeNode, quals: Quals);
3317 else
3318 return QualType(TypeNode, Quals.getFastQualifiers());
3319}
3320
3321uint16_t
3322ASTContext::getPointerAuthVTablePointerDiscriminator(const CXXRecordDecl *RD) {
3323 assert(RD->isPolymorphic() &&
3324 "Attempted to get vtable pointer discriminator on a monomorphic type");
3325 std::unique_ptr<MangleContext> MC(createMangleContext());
3326 SmallString<256> Str;
3327 llvm::raw_svector_ostream Out(Str);
3328 MC->mangleCXXVTable(RD, Out);
3329 return llvm::getPointerAuthStableSipHash(S: Str);
3330}
3331
3332/// Encode a function type for use in the discriminator of a function pointer
3333/// type. We can't use the itanium scheme for this since C has quite permissive
3334/// rules for type compatibility that we need to be compatible with.
3335///
3336/// Formally, this function associates every function pointer type T with an
3337/// encoded string E(T). Let the equivalence relation T1 ~ T2 be defined as
3338/// E(T1) == E(T2). E(T) is part of the ABI of values of type T. C type
3339/// compatibility requires equivalent treatment under the ABI, so
3340/// CCompatible(T1, T2) must imply E(T1) == E(T2), that is, CCompatible must be
3341/// a subset of ~. Crucially, however, it must be a proper subset because
3342/// CCompatible is not an equivalence relation: for example, int[] is compatible
3343/// with both int[1] and int[2], but the latter are not compatible with each
3344/// other. Therefore this encoding function must be careful to only distinguish
3345/// types if there is no third type with which they are both required to be
3346/// compatible.
3347static void encodeTypeForFunctionPointerAuth(const ASTContext &Ctx,
3348 raw_ostream &OS, QualType QT) {
3349 // FIXME: Consider address space qualifiers.
3350 const Type *T = QT.getCanonicalType().getTypePtr();
3351
3352 // FIXME: Consider using the C++ type mangling when we encounter a construct
3353 // that is incompatible with C.
3354
3355 switch (T->getTypeClass()) {
3356 case Type::Atomic:
3357 return encodeTypeForFunctionPointerAuth(
3358 Ctx, OS, QT: cast<AtomicType>(Val: T)->getValueType());
3359
3360 case Type::LValueReference:
3361 OS << "R";
3362 encodeTypeForFunctionPointerAuth(Ctx, OS,
3363 QT: cast<ReferenceType>(Val: T)->getPointeeType());
3364 return;
3365 case Type::RValueReference:
3366 OS << "O";
3367 encodeTypeForFunctionPointerAuth(Ctx, OS,
3368 QT: cast<ReferenceType>(Val: T)->getPointeeType());
3369 return;
3370
3371 case Type::Pointer:
3372 // C11 6.7.6.1p2:
3373 // For two pointer types to be compatible, both shall be identically
3374 // qualified and both shall be pointers to compatible types.
3375 // FIXME: we should also consider pointee types.
3376 OS << "P";
3377 return;
3378
3379 case Type::ObjCObjectPointer:
3380 case Type::BlockPointer:
3381 OS << "P";
3382 return;
3383
3384 case Type::Complex:
3385 OS << "C";
3386 return encodeTypeForFunctionPointerAuth(
3387 Ctx, OS, QT: cast<ComplexType>(Val: T)->getElementType());
3388
3389 case Type::VariableArray:
3390 case Type::ConstantArray:
3391 case Type::IncompleteArray:
3392 case Type::ArrayParameter:
3393 // C11 6.7.6.2p6:
3394 // For two array types to be compatible, both shall have compatible
3395 // element types, and if both size specifiers are present, and are integer
3396 // constant expressions, then both size specifiers shall have the same
3397 // constant value [...]
3398 //
3399 // So since ElemType[N] has to be compatible ElemType[], we can't encode the
3400 // width of the array.
3401 OS << "A";
3402 return encodeTypeForFunctionPointerAuth(
3403 Ctx, OS, QT: cast<ArrayType>(Val: T)->getElementType());
3404
3405 case Type::ObjCInterface:
3406 case Type::ObjCObject:
3407 OS << "<objc_object>";
3408 return;
3409
3410 case Type::Enum: {
3411 // C11 6.7.2.2p4:
3412 // Each enumerated type shall be compatible with char, a signed integer
3413 // type, or an unsigned integer type.
3414 //
3415 // So we have to treat enum types as integers.
3416 QualType UnderlyingType = cast<EnumType>(Val: T)->getDecl()->getIntegerType();
3417 return encodeTypeForFunctionPointerAuth(
3418 Ctx, OS, QT: UnderlyingType.isNull() ? Ctx.IntTy : UnderlyingType);
3419 }
3420
3421 case Type::FunctionNoProto:
3422 case Type::FunctionProto: {
3423 // C11 6.7.6.3p15:
3424 // For two function types to be compatible, both shall specify compatible
3425 // return types. Moreover, the parameter type lists, if both are present,
3426 // shall agree in the number of parameters and in the use of the ellipsis
3427 // terminator; corresponding parameters shall have compatible types.
3428 //
3429 // That paragraph goes on to describe how unprototyped functions are to be
3430 // handled, which we ignore here. Unprototyped function pointers are hashed
3431 // as though they were prototyped nullary functions since thats probably
3432 // what the user meant. This behavior is non-conforming.
3433 // FIXME: If we add a "custom discriminator" function type attribute we
3434 // should encode functions as their discriminators.
3435 OS << "F";
3436 const auto *FuncType = cast<FunctionType>(Val: T);
3437 encodeTypeForFunctionPointerAuth(Ctx, OS, QT: FuncType->getReturnType());
3438 if (const auto *FPT = dyn_cast<FunctionProtoType>(Val: FuncType)) {
3439 for (QualType Param : FPT->param_types()) {
3440 Param = Ctx.getSignatureParameterType(T: Param);
3441 encodeTypeForFunctionPointerAuth(Ctx, OS, QT: Param);
3442 }
3443 if (FPT->isVariadic())
3444 OS << "z";
3445 }
3446 OS << "E";
3447 return;
3448 }
3449
3450 case Type::MemberPointer: {
3451 OS << "M";
3452 const auto *MPT = T->castAs<MemberPointerType>();
3453 encodeTypeForFunctionPointerAuth(
3454 Ctx, OS, QT: QualType(MPT->getQualifier()->getAsType(), 0));
3455 encodeTypeForFunctionPointerAuth(Ctx, OS, QT: MPT->getPointeeType());
3456 return;
3457 }
3458 case Type::ExtVector:
3459 case Type::Vector:
3460 OS << "Dv" << Ctx.getTypeSizeInChars(T).getQuantity();
3461 break;
3462
3463 // Don't bother discriminating based on these types.
3464 case Type::Pipe:
3465 case Type::BitInt:
3466 case Type::ConstantMatrix:
3467 OS << "?";
3468 return;
3469
3470 case Type::Builtin: {
3471 const auto *BTy = T->castAs<BuiltinType>();
3472 switch (BTy->getKind()) {
3473#define SIGNED_TYPE(Id, SingletonId) \
3474 case BuiltinType::Id: \
3475 OS << "i"; \
3476 return;
3477#define UNSIGNED_TYPE(Id, SingletonId) \
3478 case BuiltinType::Id: \
3479 OS << "i"; \
3480 return;
3481#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id:
3482#define BUILTIN_TYPE(Id, SingletonId)
3483#include "clang/AST/BuiltinTypes.def"
3484 llvm_unreachable("placeholder types should not appear here.");
3485
3486 case BuiltinType::Half:
3487 OS << "Dh";
3488 return;
3489 case BuiltinType::Float:
3490 OS << "f";
3491 return;
3492 case BuiltinType::Double:
3493 OS << "d";
3494 return;
3495 case BuiltinType::LongDouble:
3496 OS << "e";
3497 return;
3498 case BuiltinType::Float16:
3499 OS << "DF16_";
3500 return;
3501 case BuiltinType::Float128:
3502 OS << "g";
3503 return;
3504
3505 case BuiltinType::Void:
3506 OS << "v";
3507 return;
3508
3509 case BuiltinType::ObjCId:
3510 case BuiltinType::ObjCClass:
3511 case BuiltinType::ObjCSel:
3512 case BuiltinType::NullPtr:
3513 OS << "P";
3514 return;
3515
3516 // Don't bother discriminating based on OpenCL types.
3517 case BuiltinType::OCLSampler:
3518 case BuiltinType::OCLEvent:
3519 case BuiltinType::OCLClkEvent:
3520 case BuiltinType::OCLQueue:
3521 case BuiltinType::OCLReserveID:
3522 case BuiltinType::BFloat16:
3523 case BuiltinType::VectorQuad:
3524 case BuiltinType::VectorPair:
3525 case BuiltinType::DMR1024:
3526 OS << "?";
3527 return;
3528
3529 // Don't bother discriminating based on these seldom-used types.
3530 case BuiltinType::Ibm128:
3531 return;
3532#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3533 case BuiltinType::Id: \
3534 return;
3535#include "clang/Basic/OpenCLImageTypes.def"
3536#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3537 case BuiltinType::Id: \
3538 return;
3539#include "clang/Basic/OpenCLExtensionTypes.def"
3540#define SVE_TYPE(Name, Id, SingletonId) \
3541 case BuiltinType::Id: \
3542 return;
3543#include "clang/Basic/AArch64ACLETypes.def"
3544#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) \
3545 case BuiltinType::Id: \
3546 return;
3547#include "clang/Basic/HLSLIntangibleTypes.def"
3548 case BuiltinType::Dependent:
3549 llvm_unreachable("should never get here");
3550#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) case BuiltinType::Id:
3551#include "clang/Basic/AMDGPUTypes.def"
3552 case BuiltinType::WasmExternRef:
3553#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3554#include "clang/Basic/RISCVVTypes.def"
3555 llvm_unreachable("not yet implemented");
3556 }
3557 llvm_unreachable("should never get here");
3558 }
3559 case Type::Record: {
3560 const RecordDecl *RD = T->castAs<RecordType>()->getDecl();
3561 const IdentifierInfo *II = RD->getIdentifier();
3562
3563 // In C++, an immediate typedef of an anonymous struct or union
3564 // is considered to name it for ODR purposes, but C's specification
3565 // of type compatibility does not have a similar rule. Using the typedef
3566 // name in function type discriminators anyway, as we do here,
3567 // therefore technically violates the C standard: two function pointer
3568 // types defined in terms of two typedef'd anonymous structs with
3569 // different names are formally still compatible, but we are assigning
3570 // them different discriminators and therefore incompatible ABIs.
3571 //
3572 // This is a relatively minor violation that significantly improves
3573 // discrimination in some cases and has not caused problems in
3574 // practice. Regardless, it is now part of the ABI in places where
3575 // function type discrimination is used, and it can no longer be
3576 // changed except on new platforms.
3577
3578 if (!II)
3579 if (const TypedefNameDecl *Typedef = RD->getTypedefNameForAnonDecl())
3580 II = Typedef->getDeclName().getAsIdentifierInfo();
3581
3582 if (!II) {
3583 OS << "<anonymous_record>";
3584 return;
3585 }
3586 OS << II->getLength() << II->getName();
3587 return;
3588 }
3589 case Type::HLSLAttributedResource:
3590 case Type::HLSLInlineSpirv:
3591 llvm_unreachable("should never get here");
3592 break;
3593 case Type::DeducedTemplateSpecialization:
3594 case Type::Auto:
3595#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3596#define DEPENDENT_TYPE(Class, Base) case Type::Class:
3597#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3598#define ABSTRACT_TYPE(Class, Base)
3599#define TYPE(Class, Base)
3600#include "clang/AST/TypeNodes.inc"
3601 llvm_unreachable("unexpected non-canonical or dependent type!");
3602 return;
3603 }
3604}
3605
3606uint16_t ASTContext::getPointerAuthTypeDiscriminator(QualType T) {
3607 assert(!T->isDependentType() &&
3608 "cannot compute type discriminator of a dependent type");
3609
3610 SmallString<256> Str;
3611 llvm::raw_svector_ostream Out(Str);
3612
3613 if (T->isFunctionPointerType() || T->isFunctionReferenceType())
3614 T = T->getPointeeType();
3615
3616 if (T->isFunctionType()) {
3617 encodeTypeForFunctionPointerAuth(Ctx: *this, OS&: Out, QT: T);
3618 } else {
3619 T = T.getUnqualifiedType();
3620 // Calls to member function pointers don't need to worry about
3621 // language interop or the laxness of the C type compatibility rules.
3622 // We just mangle the member pointer type directly, which is
3623 // implicitly much stricter about type matching. However, we do
3624 // strip any top-level exception specification before this mangling.
3625 // C++23 requires calls to work when the function type is convertible
3626 // to the pointer type by a function pointer conversion, which can
3627 // change the exception specification. This does not technically
3628 // require the exception specification to not affect representation,
3629 // because the function pointer conversion is still always a direct
3630 // value conversion and therefore an opportunity to resign the
3631 // pointer. (This is in contrast to e.g. qualification conversions,
3632 // which can be applied in nested pointer positions, effectively
3633 // requiring qualified and unqualified representations to match.)
3634 // However, it is pragmatic to ignore exception specifications
3635 // because it allows a certain amount of `noexcept` mismatching
3636 // to not become a visible ODR problem. This also leaves some
3637 // room for the committee to add laxness to function pointer
3638 // conversions in future standards.
3639 if (auto *MPT = T->getAs<MemberPointerType>())
3640 if (MPT->isMemberFunctionPointer()) {
3641 QualType PointeeType = MPT->getPointeeType();
3642 if (PointeeType->castAs<FunctionProtoType>()->getExceptionSpecType() !=
3643 EST_None) {
3644 QualType FT = getFunctionTypeWithExceptionSpec(Orig: PointeeType, ESI: EST_None);
3645 T = getMemberPointerType(T: FT, Qualifier: MPT->getQualifier(),
3646 Cls: MPT->getMostRecentCXXRecordDecl());
3647 }
3648 }
3649 std::unique_ptr<MangleContext> MC(createMangleContext());
3650 MC->mangleCanonicalTypeName(T, Out);
3651 }
3652
3653 return llvm::getPointerAuthStableSipHash(S: Str);
3654}
3655
3656QualType ASTContext::getObjCGCQualType(QualType T,
3657 Qualifiers::GC GCAttr) const {
3658 QualType CanT = getCanonicalType(T);
3659 if (CanT.getObjCGCAttr() == GCAttr)
3660 return T;
3661
3662 if (const auto *ptr = T->getAs<PointerType>()) {
3663 QualType Pointee = ptr->getPointeeType();
3664 if (Pointee->isAnyPointerType()) {
3665 QualType ResultType = getObjCGCQualType(T: Pointee, GCAttr);
3666 return getPointerType(T: ResultType);
3667 }
3668 }
3669
3670 // If we are composing extended qualifiers together, merge together
3671 // into one ExtQuals node.
3672 QualifierCollector Quals;
3673 const Type *TypeNode = Quals.strip(type: T);
3674
3675 // If this type already has an ObjCGC specified, it cannot get
3676 // another one.
3677 assert(!Quals.hasObjCGCAttr() &&
3678 "Type cannot have multiple ObjCGCs!");
3679 Quals.addObjCGCAttr(type: GCAttr);
3680
3681 return getExtQualType(baseType: TypeNode, quals: Quals);
3682}
3683
3684QualType ASTContext::removePtrSizeAddrSpace(QualType T) const {
3685 if (const PointerType *Ptr = T->getAs<PointerType>()) {
3686 QualType Pointee = Ptr->getPointeeType();
3687 if (isPtrSizeAddressSpace(AS: Pointee.getAddressSpace())) {
3688 return getPointerType(T: removeAddrSpaceQualType(T: Pointee));
3689 }
3690 }
3691 return T;
3692}
3693
3694QualType ASTContext::getCountAttributedType(
3695 QualType WrappedTy, Expr *CountExpr, bool CountInBytes, bool OrNull,
3696 ArrayRef<TypeCoupledDeclRefInfo> DependentDecls) const {
3697 assert(WrappedTy->isPointerType() || WrappedTy->isArrayType());
3698
3699 llvm::FoldingSetNodeID ID;
3700 CountAttributedType::Profile(ID, WrappedTy, CountExpr, CountInBytes, Nullable: OrNull);
3701
3702 void *InsertPos = nullptr;
3703 CountAttributedType *CATy =
3704 CountAttributedTypes.FindNodeOrInsertPos(ID, InsertPos);
3705 if (CATy)
3706 return QualType(CATy, 0);
3707
3708 QualType CanonTy = getCanonicalType(T: WrappedTy);
3709 size_t Size = CountAttributedType::totalSizeToAlloc<TypeCoupledDeclRefInfo>(
3710 Counts: DependentDecls.size());
3711 CATy = (CountAttributedType *)Allocate(Size, Align: TypeAlignment);
3712 new (CATy) CountAttributedType(WrappedTy, CanonTy, CountExpr, CountInBytes,
3713 OrNull, DependentDecls);
3714 Types.push_back(Elt: CATy);
3715 CountAttributedTypes.InsertNode(N: CATy, InsertPos);
3716
3717 return QualType(CATy, 0);
3718}
3719
3720QualType
3721ASTContext::adjustType(QualType Orig,
3722 llvm::function_ref<QualType(QualType)> Adjust) const {
3723 switch (Orig->getTypeClass()) {
3724 case Type::Attributed: {
3725 const auto *AT = cast<AttributedType>(Val&: Orig);
3726 return getAttributedType(attrKind: AT->getAttrKind(),
3727 modifiedType: adjustType(Orig: AT->getModifiedType(), Adjust),
3728 equivalentType: adjustType(Orig: AT->getEquivalentType(), Adjust),
3729 attr: AT->getAttr());
3730 }
3731
3732 case Type::BTFTagAttributed: {
3733 const auto *BTFT = dyn_cast<BTFTagAttributedType>(Val&: Orig);
3734 return getBTFTagAttributedType(BTFAttr: BTFT->getAttr(),
3735 Wrapped: adjustType(Orig: BTFT->getWrappedType(), Adjust));
3736 }
3737
3738 case Type::Elaborated: {
3739 const auto *ET = cast<ElaboratedType>(Val&: Orig);
3740 return getElaboratedType(Keyword: ET->getKeyword(), NNS: ET->getQualifier(),
3741 NamedType: adjustType(Orig: ET->getNamedType(), Adjust));
3742 }
3743
3744 case Type::Paren:
3745 return getParenType(
3746 NamedType: adjustType(Orig: cast<ParenType>(Val&: Orig)->getInnerType(), Adjust));
3747
3748 case Type::Adjusted: {
3749 const auto *AT = cast<AdjustedType>(Val&: Orig);
3750 return getAdjustedType(Orig: AT->getOriginalType(),
3751 New: adjustType(Orig: AT->getAdjustedType(), Adjust));
3752 }
3753
3754 case Type::MacroQualified: {
3755 const auto *MQT = cast<MacroQualifiedType>(Val&: Orig);
3756 return getMacroQualifiedType(UnderlyingTy: adjustType(Orig: MQT->getUnderlyingType(), Adjust),
3757 MacroII: MQT->getMacroIdentifier());
3758 }
3759
3760 default:
3761 return Adjust(Orig);
3762 }
3763}
3764
3765const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T,
3766 FunctionType::ExtInfo Info) {
3767 if (T->getExtInfo() == Info)
3768 return T;
3769
3770 QualType Result;
3771 if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(Val: T)) {
3772 Result = getFunctionNoProtoType(ResultTy: FNPT->getReturnType(), Info);
3773 } else {
3774 const auto *FPT = cast<FunctionProtoType>(Val: T);
3775 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
3776 EPI.ExtInfo = Info;
3777 Result = getFunctionType(ResultTy: FPT->getReturnType(), Args: FPT->getParamTypes(), EPI);
3778 }
3779
3780 return cast<FunctionType>(Val: Result.getTypePtr());
3781}
3782
3783QualType ASTContext::adjustFunctionResultType(QualType FunctionType,
3784 QualType ResultType) {
3785 return adjustType(Orig: FunctionType, Adjust: [&](QualType Orig) {
3786 if (const auto *FNPT = Orig->getAs<FunctionNoProtoType>())
3787 return getFunctionNoProtoType(ResultTy: ResultType, Info: FNPT->getExtInfo());
3788
3789 const auto *FPT = Orig->castAs<FunctionProtoType>();
3790 return getFunctionType(ResultTy: ResultType, Args: FPT->getParamTypes(),
3791 EPI: FPT->getExtProtoInfo());
3792 });
3793}
3794
3795void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD,
3796 QualType ResultType) {
3797 FD = FD->getMostRecentDecl();
3798 while (true) {
3799 FD->setType(adjustFunctionResultType(FunctionType: FD->getType(), ResultType));
3800 if (FunctionDecl *Next = FD->getPreviousDecl())
3801 FD = Next;
3802 else
3803 break;
3804 }
3805 if (ASTMutationListener *L = getASTMutationListener())
3806 L->DeducedReturnType(FD, ReturnType: ResultType);
3807}
3808
3809/// Get a function type and produce the equivalent function type with the
3810/// specified exception specification. Type sugar that can be present on a
3811/// declaration of a function with an exception specification is permitted
3812/// and preserved. Other type sugar (for instance, typedefs) is not.
3813QualType ASTContext::getFunctionTypeWithExceptionSpec(
3814 QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const {
3815 return adjustType(Orig, Adjust: [&](QualType Ty) {
3816 const auto *Proto = Ty->castAs<FunctionProtoType>();
3817 return getFunctionType(ResultTy: Proto->getReturnType(), Args: Proto->getParamTypes(),
3818 EPI: Proto->getExtProtoInfo().withExceptionSpec(ESI));
3819 });
3820}
3821
3822bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T,
3823 QualType U) const {
3824 return hasSameType(T1: T, T2: U) ||
3825 (getLangOpts().CPlusPlus17 &&
3826 hasSameType(T1: getFunctionTypeWithExceptionSpec(Orig: T, ESI: EST_None),
3827 T2: getFunctionTypeWithExceptionSpec(Orig: U, ESI: EST_None)));
3828}
3829
3830QualType ASTContext::getFunctionTypeWithoutPtrSizes(QualType T) {
3831 if (const auto *Proto = T->getAs<FunctionProtoType>()) {
3832 QualType RetTy = removePtrSizeAddrSpace(T: Proto->getReturnType());
3833 SmallVector<QualType, 16> Args(Proto->param_types().size());
3834 for (unsigned i = 0, n = Args.size(); i != n; ++i)
3835 Args[i] = removePtrSizeAddrSpace(T: Proto->param_types()[i]);
3836 return getFunctionType(ResultTy: RetTy, Args, EPI: Proto->getExtProtoInfo());
3837 }
3838
3839 if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) {
3840 QualType RetTy = removePtrSizeAddrSpace(T: Proto->getReturnType());
3841 return getFunctionNoProtoType(ResultTy: RetTy, Info: Proto->getExtInfo());
3842 }
3843
3844 return T;
3845}
3846
3847bool ASTContext::hasSameFunctionTypeIgnoringPtrSizes(QualType T, QualType U) {
3848 return hasSameType(T1: T, T2: U) ||
3849 hasSameType(T1: getFunctionTypeWithoutPtrSizes(T),
3850 T2: getFunctionTypeWithoutPtrSizes(T: U));
3851}
3852
3853QualType ASTContext::getFunctionTypeWithoutParamABIs(QualType T) const {
3854 if (const auto *Proto = T->getAs<FunctionProtoType>()) {
3855 FunctionProtoType::ExtProtoInfo EPI = Proto->getExtProtoInfo();
3856 EPI.ExtParameterInfos = nullptr;
3857 return getFunctionType(ResultTy: Proto->getReturnType(), Args: Proto->param_types(), EPI);
3858 }
3859 return T;
3860}
3861
3862bool ASTContext::hasSameFunctionTypeIgnoringParamABI(QualType T,
3863 QualType U) const {
3864 return hasSameType(T1: T, T2: U) || hasSameType(T1: getFunctionTypeWithoutParamABIs(T),
3865 T2: getFunctionTypeWithoutParamABIs(T: U));
3866}
3867
3868void ASTContext::adjustExceptionSpec(
3869 FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI,
3870 bool AsWritten) {
3871 // Update the type.
3872 QualType Updated =
3873 getFunctionTypeWithExceptionSpec(Orig: FD->getType(), ESI);
3874 FD->setType(Updated);
3875
3876 if (!AsWritten)
3877 return;
3878
3879 // Update the type in the type source information too.
3880 if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) {
3881 // If the type and the type-as-written differ, we may need to update
3882 // the type-as-written too.
3883 if (TSInfo->getType() != FD->getType())
3884 Updated = getFunctionTypeWithExceptionSpec(Orig: TSInfo->getType(), ESI);
3885
3886 // FIXME: When we get proper type location information for exceptions,
3887 // we'll also have to rebuild the TypeSourceInfo. For now, we just patch
3888 // up the TypeSourceInfo;
3889 assert(TypeLoc::getFullDataSizeForType(Updated) ==
3890 TypeLoc::getFullDataSizeForType(TSInfo->getType()) &&
3891 "TypeLoc size mismatch from updating exception specification");
3892 TSInfo->overrideType(T: Updated);
3893 }
3894}
3895
3896/// getComplexType - Return the uniqued reference to the type for a complex
3897/// number with the specified element type.
3898QualType ASTContext::getComplexType(QualType T) const {
3899 // Unique pointers, to guarantee there is only one pointer of a particular
3900 // structure.
3901 llvm::FoldingSetNodeID ID;
3902 ComplexType::Profile(ID, Element: T);
3903
3904 void *InsertPos = nullptr;
3905 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos))
3906 return QualType(CT, 0);
3907
3908 // If the pointee type isn't canonical, this won't be a canonical type either,
3909 // so fill in the canonical type field.
3910 QualType Canonical;
3911 if (!T.isCanonical()) {
3912 Canonical = getComplexType(T: getCanonicalType(T));
3913
3914 // Get the new insert position for the node we care about.
3915 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos);
3916 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3917 }
3918 auto *New = new (*this, alignof(ComplexType)) ComplexType(T, Canonical);
3919 Types.push_back(Elt: New);
3920 ComplexTypes.InsertNode(N: New, InsertPos);
3921 return QualType(New, 0);
3922}
3923
3924/// getPointerType - Return the uniqued reference to the type for a pointer to
3925/// the specified type.
3926QualType ASTContext::getPointerType(QualType T) const {
3927 // Unique pointers, to guarantee there is only one pointer of a particular
3928 // structure.
3929 llvm::FoldingSetNodeID ID;
3930 PointerType::Profile(ID, Pointee: T);
3931
3932 void *InsertPos = nullptr;
3933 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3934 return QualType(PT, 0);
3935
3936 // If the pointee type isn't canonical, this won't be a canonical type either,
3937 // so fill in the canonical type field.
3938 QualType Canonical;
3939 if (!T.isCanonical()) {
3940 Canonical = getPointerType(T: getCanonicalType(T));
3941
3942 // Get the new insert position for the node we care about.
3943 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3944 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3945 }
3946 auto *New = new (*this, alignof(PointerType)) PointerType(T, Canonical);
3947 Types.push_back(Elt: New);
3948 PointerTypes.InsertNode(N: New, InsertPos);
3949 return QualType(New, 0);
3950}
3951
3952QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const {
3953 llvm::FoldingSetNodeID ID;
3954 AdjustedType::Profile(ID, Orig, New);
3955 void *InsertPos = nullptr;
3956 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3957 if (AT)
3958 return QualType(AT, 0);
3959
3960 QualType Canonical = getCanonicalType(T: New);
3961
3962 // Get the new insert position for the node we care about.
3963 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3964 assert(!AT && "Shouldn't be in the map!");
3965
3966 AT = new (*this, alignof(AdjustedType))
3967 AdjustedType(Type::Adjusted, Orig, New, Canonical);
3968 Types.push_back(Elt: AT);
3969 AdjustedTypes.InsertNode(N: AT, InsertPos);
3970 return QualType(AT, 0);
3971}
3972
3973QualType ASTContext::getDecayedType(QualType Orig, QualType Decayed) const {
3974 llvm::FoldingSetNodeID ID;
3975 AdjustedType::Profile(ID, Orig, New: Decayed);
3976 void *InsertPos = nullptr;
3977 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3978 if (AT)
3979 return QualType(AT, 0);
3980
3981 QualType Canonical = getCanonicalType(T: Decayed);
3982
3983 // Get the new insert position for the node we care about.
3984 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3985 assert(!AT && "Shouldn't be in the map!");
3986
3987 AT = new (*this, alignof(DecayedType)) DecayedType(Orig, Decayed, Canonical);
3988 Types.push_back(Elt: AT);
3989 AdjustedTypes.InsertNode(N: AT, InsertPos);
3990 return QualType(AT, 0);
3991}
3992
3993QualType ASTContext::getDecayedType(QualType T) const {
3994 assert((T->isArrayType() || T->isFunctionType()) && "T does not decay");
3995
3996 QualType Decayed;
3997
3998 // C99 6.7.5.3p7:
3999 // A declaration of a parameter as "array of type" shall be
4000 // adjusted to "qualified pointer to type", where the type
4001 // qualifiers (if any) are those specified within the [ and ] of
4002 // the array type derivation.
4003 if (T->isArrayType())
4004 Decayed = getArrayDecayedType(T);
4005
4006 // C99 6.7.5.3p8:
4007 // A declaration of a parameter as "function returning type"
4008 // shall be adjusted to "pointer to function returning type", as
4009 // in 6.3.2.1.
4010 if (T->isFunctionType())
4011 Decayed = getPointerType(T);
4012
4013 return getDecayedType(Orig: T, Decayed);
4014}
4015
4016QualType ASTContext::getArrayParameterType(QualType Ty) const {
4017 if (Ty->isArrayParameterType())
4018 return Ty;
4019 assert(Ty->isConstantArrayType() && "Ty must be an array type.");
4020 QualType DTy = Ty.getDesugaredType(Context: *this);
4021 const auto *ATy = cast<ConstantArrayType>(Val&: DTy);
4022 llvm::FoldingSetNodeID ID;
4023 ATy->Profile(ID, Ctx: *this, ET: ATy->getElementType(), ArraySize: ATy->getZExtSize(),
4024 SizeExpr: ATy->getSizeExpr(), SizeMod: ATy->getSizeModifier(),
4025 TypeQuals: ATy->getIndexTypeQualifiers().getAsOpaqueValue());
4026 void *InsertPos = nullptr;
4027 ArrayParameterType *AT =
4028 ArrayParameterTypes.FindNodeOrInsertPos(ID, InsertPos);
4029 if (AT)
4030 return QualType(AT, 0);
4031
4032 QualType Canonical;
4033 if (!DTy.isCanonical()) {
4034 Canonical = getArrayParameterType(Ty: getCanonicalType(T: Ty));
4035
4036 // Get the new insert position for the node we care about.
4037 AT = ArrayParameterTypes.FindNodeOrInsertPos(ID, InsertPos);
4038 assert(!AT && "Shouldn't be in the map!");
4039 }
4040
4041 AT = new (*this, alignof(ArrayParameterType))
4042 ArrayParameterType(ATy, Canonical);
4043 Types.push_back(Elt: AT);
4044 ArrayParameterTypes.InsertNode(N: AT, InsertPos);
4045 return QualType(AT, 0);
4046}
4047
4048/// getBlockPointerType - Return the uniqued reference to the type for
4049/// a pointer to the specified block.
4050QualType ASTContext::getBlockPointerType(QualType T) const {
4051 assert(T->isFunctionType() && "block of function types only");
4052 // Unique pointers, to guarantee there is only one block of a particular
4053 // structure.
4054 llvm::FoldingSetNodeID ID;
4055 BlockPointerType::Profile(ID, Pointee: T);
4056
4057 void *InsertPos = nullptr;
4058 if (BlockPointerType *PT =
4059 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
4060 return QualType(PT, 0);
4061
4062 // If the block pointee type isn't canonical, this won't be a canonical
4063 // type either so fill in the canonical type field.
4064 QualType Canonical;
4065 if (!T.isCanonical()) {
4066 Canonical = getBlockPointerType(T: getCanonicalType(T));
4067
4068 // Get the new insert position for the node we care about.
4069 BlockPointerType *NewIP =
4070 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
4071 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4072 }
4073 auto *New =
4074 new (*this, alignof(BlockPointerType)) BlockPointerType(T, Canonical);
4075 Types.push_back(Elt: New);
4076 BlockPointerTypes.InsertNode(N: New, InsertPos);
4077 return QualType(New, 0);
4078}
4079
4080/// getLValueReferenceType - Return the uniqued reference to the type for an
4081/// lvalue reference to the specified type.
4082QualType
4083ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
4084 assert((!T->isPlaceholderType() ||
4085 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) &&
4086 "Unresolved placeholder type");
4087
4088 // Unique pointers, to guarantee there is only one pointer of a particular
4089 // structure.
4090 llvm::FoldingSetNodeID ID;
4091 ReferenceType::Profile(ID, Referencee: T, SpelledAsLValue);
4092
4093 void *InsertPos = nullptr;
4094 if (LValueReferenceType *RT =
4095 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
4096 return QualType(RT, 0);
4097
4098 const auto *InnerRef = T->getAs<ReferenceType>();
4099
4100 // If the referencee type isn't canonical, this won't be a canonical type
4101 // either, so fill in the canonical type field.
4102 QualType Canonical;
4103 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) {
4104 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
4105 Canonical = getLValueReferenceType(T: getCanonicalType(T: PointeeType));
4106
4107 // Get the new insert position for the node we care about.
4108 LValueReferenceType *NewIP =
4109 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
4110 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4111 }
4112
4113 auto *New = new (*this, alignof(LValueReferenceType))
4114 LValueReferenceType(T, Canonical, SpelledAsLValue);
4115 Types.push_back(Elt: New);
4116 LValueReferenceTypes.InsertNode(N: New, InsertPos);
4117
4118 return QualType(New, 0);
4119}
4120
4121/// getRValueReferenceType - Return the uniqued reference to the type for an
4122/// rvalue reference to the specified type.
4123QualType ASTContext::getRValueReferenceType(QualType T) const {
4124 assert((!T->isPlaceholderType() ||
4125 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) &&
4126 "Unresolved placeholder type");
4127
4128 // Unique pointers, to guarantee there is only one pointer of a particular
4129 // structure.
4130 llvm::FoldingSetNodeID ID;
4131 ReferenceType::Profile(ID, Referencee: T, SpelledAsLValue: false);
4132
4133 void *InsertPos = nullptr;
4134 if (RValueReferenceType *RT =
4135 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
4136 return QualType(RT, 0);
4137
4138 const auto *InnerRef = T->getAs<ReferenceType>();
4139
4140 // If the referencee type isn't canonical, this won't be a canonical type
4141 // either, so fill in the canonical type field.
4142 QualType Canonical;
4143 if (InnerRef || !T.isCanonical()) {
4144 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
4145 Canonical = getRValueReferenceType(T: getCanonicalType(T: PointeeType));
4146
4147 // Get the new insert position for the node we care about.
4148 RValueReferenceType *NewIP =
4149 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
4150 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4151 }
4152
4153 auto *New = new (*this, alignof(RValueReferenceType))
4154 RValueReferenceType(T, Canonical);
4155 Types.push_back(Elt: New);
4156 RValueReferenceTypes.InsertNode(N: New, InsertPos);
4157 return QualType(New, 0);
4158}
4159
4160QualType ASTContext::getMemberPointerType(QualType T,
4161 NestedNameSpecifier *Qualifier,
4162 const CXXRecordDecl *Cls) const {
4163 if (!Qualifier) {
4164 assert(Cls && "At least one of Qualifier or Cls must be provided");
4165 Qualifier = NestedNameSpecifier::Create(Context: *this, /*Prefix=*/nullptr,
4166 T: getTypeDeclType(Decl: Cls).getTypePtr());
4167 } else if (!Cls) {
4168 Cls = Qualifier->getAsRecordDecl();
4169 }
4170 // Unique pointers, to guarantee there is only one pointer of a particular
4171 // structure.
4172 llvm::FoldingSetNodeID ID;
4173 MemberPointerType::Profile(ID, Pointee: T, Qualifier, Cls);
4174
4175 void *InsertPos = nullptr;
4176 if (MemberPointerType *PT =
4177 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
4178 return QualType(PT, 0);
4179
4180 NestedNameSpecifier *CanonicalQualifier = [&] {
4181 if (!Cls)
4182 return getCanonicalNestedNameSpecifier(NNS: Qualifier);
4183 NestedNameSpecifier *R = NestedNameSpecifier::Create(
4184 Context: *this, /*Prefix=*/nullptr, T: Cls->getCanonicalDecl()->getTypeForDecl());
4185 assert(R == getCanonicalNestedNameSpecifier(R));
4186 return R;
4187 }();
4188 // If the pointee or class type isn't canonical, this won't be a canonical
4189 // type either, so fill in the canonical type field.
4190 QualType Canonical;
4191 if (!T.isCanonical() || Qualifier != CanonicalQualifier) {
4192 Canonical =
4193 getMemberPointerType(T: getCanonicalType(T), Qualifier: CanonicalQualifier, Cls);
4194 assert(!cast<MemberPointerType>(Canonical)->isSugared());
4195 // Get the new insert position for the node we care about.
4196 [[maybe_unused]] MemberPointerType *NewIP =
4197 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
4198 assert(!NewIP && "Shouldn't be in the map!");
4199 }
4200 auto *New = new (*this, alignof(MemberPointerType))
4201 MemberPointerType(T, Qualifier, Canonical);
4202 Types.push_back(Elt: New);
4203 MemberPointerTypes.InsertNode(N: New, InsertPos);
4204 return QualType(New, 0);
4205}
4206
4207/// getConstantArrayType - Return the unique reference to the type for an
4208/// array of the specified element type.
4209QualType ASTContext::getConstantArrayType(QualType EltTy,
4210 const llvm::APInt &ArySizeIn,
4211 const Expr *SizeExpr,
4212 ArraySizeModifier ASM,
4213 unsigned IndexTypeQuals) const {
4214 assert((EltTy->isDependentType() ||
4215 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) &&
4216 "Constant array of VLAs is illegal!");
4217
4218 // We only need the size as part of the type if it's instantiation-dependent.
4219 if (SizeExpr && !SizeExpr->isInstantiationDependent())
4220 SizeExpr = nullptr;
4221
4222 // Convert the array size into a canonical width matching the pointer size for
4223 // the target.
4224 llvm::APInt ArySize(ArySizeIn);
4225 ArySize = ArySize.zextOrTrunc(width: Target->getMaxPointerWidth());
4226
4227 llvm::FoldingSetNodeID ID;
4228 ConstantArrayType::Profile(ID, Ctx: *this, ET: EltTy, ArraySize: ArySize.getZExtValue(), SizeExpr,
4229 SizeMod: ASM, TypeQuals: IndexTypeQuals);
4230
4231 void *InsertPos = nullptr;
4232 if (ConstantArrayType *ATP =
4233 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos))
4234 return QualType(ATP, 0);
4235
4236 // If the element type isn't canonical or has qualifiers, or the array bound
4237 // is instantiation-dependent, this won't be a canonical type either, so fill
4238 // in the canonical type field.
4239 QualType Canon;
4240 // FIXME: Check below should look for qualifiers behind sugar.
4241 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) {
4242 SplitQualType canonSplit = getCanonicalType(T: EltTy).split();
4243 Canon = getConstantArrayType(EltTy: QualType(canonSplit.Ty, 0), ArySizeIn: ArySize, SizeExpr: nullptr,
4244 ASM, IndexTypeQuals);
4245 Canon = getQualifiedType(T: Canon, Qs: canonSplit.Quals);
4246
4247 // Get the new insert position for the node we care about.
4248 ConstantArrayType *NewIP =
4249 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
4250 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4251 }
4252
4253 auto *New = ConstantArrayType::Create(Ctx: *this, ET: EltTy, Can: Canon, Sz: ArySize, SzExpr: SizeExpr,
4254 SzMod: ASM, Qual: IndexTypeQuals);
4255 ConstantArrayTypes.InsertNode(N: New, InsertPos);
4256 Types.push_back(Elt: New);
4257 return QualType(New, 0);
4258}
4259
4260/// getVariableArrayDecayedType - Turns the given type, which may be
4261/// variably-modified, into the corresponding type with all the known
4262/// sizes replaced with [*].
4263QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
4264 // Vastly most common case.
4265 if (!type->isVariablyModifiedType()) return type;
4266
4267 QualType result;
4268
4269 SplitQualType split = type.getSplitDesugaredType();
4270 const Type *ty = split.Ty;
4271 switch (ty->getTypeClass()) {
4272#define TYPE(Class, Base)
4273#define ABSTRACT_TYPE(Class, Base)
4274#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
4275#include "clang/AST/TypeNodes.inc"
4276 llvm_unreachable("didn't desugar past all non-canonical types?");
4277
4278 // These types should never be variably-modified.
4279 case Type::Builtin:
4280 case Type::Complex:
4281 case Type::Vector:
4282 case Type::DependentVector:
4283 case Type::ExtVector:
4284 case Type::DependentSizedExtVector:
4285 case Type::ConstantMatrix:
4286 case Type::DependentSizedMatrix:
4287 case Type::DependentAddressSpace:
4288 case Type::ObjCObject:
4289 case Type::ObjCInterface:
4290 case Type::ObjCObjectPointer:
4291 case Type::Record:
4292 case Type::Enum:
4293 case Type::UnresolvedUsing:
4294 case Type::TypeOfExpr:
4295 case Type::TypeOf:
4296 case Type::Decltype:
4297 case Type::UnaryTransform:
4298 case Type::DependentName:
4299 case Type::InjectedClassName:
4300 case Type::TemplateSpecialization:
4301 case Type::DependentTemplateSpecialization:
4302 case Type::TemplateTypeParm:
4303 case Type::SubstTemplateTypeParmPack:
4304 case Type::Auto:
4305 case Type::DeducedTemplateSpecialization:
4306 case Type::PackExpansion:
4307 case Type::PackIndexing:
4308 case Type::BitInt:
4309 case Type::DependentBitInt:
4310 case Type::ArrayParameter:
4311 case Type::HLSLAttributedResource:
4312 case Type::HLSLInlineSpirv:
4313 llvm_unreachable("type should never be variably-modified");
4314
4315 // These types can be variably-modified but should never need to
4316 // further decay.
4317 case Type::FunctionNoProto:
4318 case Type::FunctionProto:
4319 case Type::BlockPointer:
4320 case Type::MemberPointer:
4321 case Type::Pipe:
4322 return type;
4323
4324 // These types can be variably-modified. All these modifications
4325 // preserve structure except as noted by comments.
4326 // TODO: if we ever care about optimizing VLAs, there are no-op
4327 // optimizations available here.
4328 case Type::Pointer:
4329 result = getPointerType(T: getVariableArrayDecayedType(
4330 type: cast<PointerType>(Val: ty)->getPointeeType()));
4331 break;
4332
4333 case Type::LValueReference: {
4334 const auto *lv = cast<LValueReferenceType>(Val: ty);
4335 result = getLValueReferenceType(
4336 T: getVariableArrayDecayedType(type: lv->getPointeeType()),
4337 SpelledAsLValue: lv->isSpelledAsLValue());
4338 break;
4339 }
4340
4341 case Type::RValueReference: {
4342 const auto *lv = cast<RValueReferenceType>(Val: ty);
4343 result = getRValueReferenceType(
4344 T: getVariableArrayDecayedType(type: lv->getPointeeType()));
4345 break;
4346 }
4347
4348 case Type::Atomic: {
4349 const auto *at = cast<AtomicType>(Val: ty);
4350 result = getAtomicType(T: getVariableArrayDecayedType(type: at->getValueType()));
4351 break;
4352 }
4353
4354 case Type::ConstantArray: {
4355 const auto *cat = cast<ConstantArrayType>(Val: ty);
4356 result = getConstantArrayType(
4357 EltTy: getVariableArrayDecayedType(type: cat->getElementType()),
4358 ArySizeIn: cat->getSize(),
4359 SizeExpr: cat->getSizeExpr(),
4360 ASM: cat->getSizeModifier(),
4361 IndexTypeQuals: cat->getIndexTypeCVRQualifiers());
4362 break;
4363 }
4364
4365 case Type::DependentSizedArray: {
4366 const auto *dat = cast<DependentSizedArrayType>(Val: ty);
4367 result = getDependentSizedArrayType(
4368 EltTy: getVariableArrayDecayedType(type: dat->getElementType()), NumElts: dat->getSizeExpr(),
4369 ASM: dat->getSizeModifier(), IndexTypeQuals: dat->getIndexTypeCVRQualifiers());
4370 break;
4371 }
4372
4373 // Turn incomplete types into [*] types.
4374 case Type::IncompleteArray: {
4375 const auto *iat = cast<IncompleteArrayType>(Val: ty);
4376 result =
4377 getVariableArrayType(EltTy: getVariableArrayDecayedType(type: iat->getElementType()),
4378 /*size*/ NumElts: nullptr, ASM: ArraySizeModifier::Normal,
4379 IndexTypeQuals: iat->getIndexTypeCVRQualifiers());
4380 break;
4381 }
4382
4383 // Turn VLA types into [*] types.
4384 case Type::VariableArray: {
4385 const auto *vat = cast<VariableArrayType>(Val: ty);
4386 result =
4387 getVariableArrayType(EltTy: getVariableArrayDecayedType(type: vat->getElementType()),
4388 /*size*/ NumElts: nullptr, ASM: ArraySizeModifier::Star,
4389 IndexTypeQuals: vat->getIndexTypeCVRQualifiers());
4390 break;
4391 }
4392 }
4393
4394 // Apply the top-level qualifiers from the original.
4395 return getQualifiedType(T: result, Qs: split.Quals);
4396}
4397
4398/// getVariableArrayType - Returns a non-unique reference to the type for a
4399/// variable array of the specified element type.
4400QualType ASTContext::getVariableArrayType(QualType EltTy, Expr *NumElts,
4401 ArraySizeModifier ASM,
4402 unsigned IndexTypeQuals) const {
4403 // Since we don't unique expressions, it isn't possible to unique VLA's
4404 // that have an expression provided for their size.
4405 QualType Canon;
4406
4407 // Be sure to pull qualifiers off the element type.
4408 // FIXME: Check below should look for qualifiers behind sugar.
4409 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
4410 SplitQualType canonSplit = getCanonicalType(T: EltTy).split();
4411 Canon = getVariableArrayType(EltTy: QualType(canonSplit.Ty, 0), NumElts, ASM,
4412 IndexTypeQuals);
4413 Canon = getQualifiedType(T: Canon, Qs: canonSplit.Quals);
4414 }
4415
4416 auto *New = new (*this, alignof(VariableArrayType))
4417 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals);
4418
4419 VariableArrayTypes.push_back(x: New);
4420 Types.push_back(Elt: New);
4421 return QualType(New, 0);
4422}
4423
4424/// getDependentSizedArrayType - Returns a non-unique reference to
4425/// the type for a dependently-sized array of the specified element
4426/// type.
4427QualType
4428ASTContext::getDependentSizedArrayType(QualType elementType, Expr *numElements,
4429 ArraySizeModifier ASM,
4430 unsigned elementTypeQuals) const {
4431 assert((!numElements || numElements->isTypeDependent() ||
4432 numElements->isValueDependent()) &&
4433 "Size must be type- or value-dependent!");
4434
4435 SplitQualType canonElementType = getCanonicalType(T: elementType).split();
4436
4437 void *insertPos = nullptr;
4438 llvm::FoldingSetNodeID ID;
4439 DependentSizedArrayType::Profile(
4440 ID, Context: *this, ET: numElements ? QualType(canonElementType.Ty, 0) : elementType,
4441 SizeMod: ASM, TypeQuals: elementTypeQuals, E: numElements);
4442
4443 // Look for an existing type with these properties.
4444 DependentSizedArrayType *canonTy =
4445 DependentSizedArrayTypes.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
4446
4447 // Dependently-sized array types that do not have a specified number
4448 // of elements will have their sizes deduced from a dependent
4449 // initializer.
4450 if (!numElements) {
4451 if (canonTy)
4452 return QualType(canonTy, 0);
4453
4454 auto *newType = new (*this, alignof(DependentSizedArrayType))
4455 DependentSizedArrayType(elementType, QualType(), numElements, ASM,
4456 elementTypeQuals);
4457 DependentSizedArrayTypes.InsertNode(N: newType, InsertPos: insertPos);
4458 Types.push_back(Elt: newType);
4459 return QualType(newType, 0);
4460 }
4461
4462 // If we don't have one, build one.
4463 if (!canonTy) {
4464 canonTy = new (*this, alignof(DependentSizedArrayType))
4465 DependentSizedArrayType(QualType(canonElementType.Ty, 0), QualType(),
4466 numElements, ASM, elementTypeQuals);
4467 DependentSizedArrayTypes.InsertNode(N: canonTy, InsertPos: insertPos);
4468 Types.push_back(Elt: canonTy);
4469 }
4470
4471 // Apply qualifiers from the element type to the array.
4472 QualType canon = getQualifiedType(T: QualType(canonTy,0),
4473 Qs: canonElementType.Quals);
4474
4475 // If we didn't need extra canonicalization for the element type or the size
4476 // expression, then just use that as our result.
4477 if (QualType(canonElementType.Ty, 0) == elementType &&
4478 canonTy->getSizeExpr() == numElements)
4479 return canon;
4480
4481 // Otherwise, we need to build a type which follows the spelling
4482 // of the element type.
4483 auto *sugaredType = new (*this, alignof(DependentSizedArrayType))
4484 DependentSizedArrayType(elementType, canon, numElements, ASM,
4485 elementTypeQuals);
4486 Types.push_back(Elt: sugaredType);
4487 return QualType(sugaredType, 0);
4488}
4489
4490QualType ASTContext::getIncompleteArrayType(QualType elementType,
4491 ArraySizeModifier ASM,
4492 unsigned elementTypeQuals) const {
4493 llvm::FoldingSetNodeID ID;
4494 IncompleteArrayType::Profile(ID, ET: elementType, SizeMod: ASM, TypeQuals: elementTypeQuals);
4495
4496 void *insertPos = nullptr;
4497 if (IncompleteArrayType *iat =
4498 IncompleteArrayTypes.FindNodeOrInsertPos(ID, InsertPos&: insertPos))
4499 return QualType(iat, 0);
4500
4501 // If the element type isn't canonical, this won't be a canonical type
4502 // either, so fill in the canonical type field. We also have to pull
4503 // qualifiers off the element type.
4504 QualType canon;
4505
4506 // FIXME: Check below should look for qualifiers behind sugar.
4507 if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) {
4508 SplitQualType canonSplit = getCanonicalType(T: elementType).split();
4509 canon = getIncompleteArrayType(elementType: QualType(canonSplit.Ty, 0),
4510 ASM, elementTypeQuals);
4511 canon = getQualifiedType(T: canon, Qs: canonSplit.Quals);
4512
4513 // Get the new insert position for the node we care about.
4514 IncompleteArrayType *existing =
4515 IncompleteArrayTypes.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
4516 assert(!existing && "Shouldn't be in the map!"); (void) existing;
4517 }
4518
4519 auto *newType = new (*this, alignof(IncompleteArrayType))
4520 IncompleteArrayType(elementType, canon, ASM, elementTypeQuals);
4521
4522 IncompleteArrayTypes.InsertNode(N: newType, InsertPos: insertPos);
4523 Types.push_back(Elt: newType);
4524 return QualType(newType, 0);
4525}
4526
4527ASTContext::BuiltinVectorTypeInfo
4528ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const {
4529#define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \
4530 {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \
4531 NUMVECTORS};
4532
4533#define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \
4534 {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS};
4535
4536 switch (Ty->getKind()) {
4537 default:
4538 llvm_unreachable("Unsupported builtin vector type");
4539
4540#define SVE_VECTOR_TYPE_INT(Name, MangledName, Id, SingletonId, NumEls, \
4541 ElBits, NF, IsSigned) \
4542 case BuiltinType::Id: \
4543 return {getIntTypeForBitwidth(ElBits, IsSigned), \
4544 llvm::ElementCount::getScalable(NumEls), NF};
4545#define SVE_VECTOR_TYPE_FLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4546 ElBits, NF) \
4547 case BuiltinType::Id: \
4548 return {ElBits == 16 ? HalfTy : (ElBits == 32 ? FloatTy : DoubleTy), \
4549 llvm::ElementCount::getScalable(NumEls), NF};
4550#define SVE_VECTOR_TYPE_BFLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4551 ElBits, NF) \
4552 case BuiltinType::Id: \
4553 return {BFloat16Ty, llvm::ElementCount::getScalable(NumEls), NF};
4554#define SVE_VECTOR_TYPE_MFLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4555 ElBits, NF) \
4556 case BuiltinType::Id: \
4557 return {MFloat8Ty, llvm::ElementCount::getScalable(NumEls), NF};
4558#define SVE_PREDICATE_TYPE_ALL(Name, MangledName, Id, SingletonId, NumEls, NF) \
4559 case BuiltinType::Id: \
4560 return {BoolTy, llvm::ElementCount::getScalable(NumEls), NF};
4561#include "clang/Basic/AArch64ACLETypes.def"
4562
4563#define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \
4564 IsSigned) \
4565 case BuiltinType::Id: \
4566 return {getIntTypeForBitwidth(ElBits, IsSigned), \
4567 llvm::ElementCount::getScalable(NumEls), NF};
4568#define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
4569 case BuiltinType::Id: \
4570 return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \
4571 llvm::ElementCount::getScalable(NumEls), NF};
4572#define RVV_VECTOR_TYPE_BFLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
4573 case BuiltinType::Id: \
4574 return {BFloat16Ty, llvm::ElementCount::getScalable(NumEls), NF};
4575#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
4576 case BuiltinType::Id: \
4577 return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1};
4578#include "clang/Basic/RISCVVTypes.def"
4579 }
4580}
4581
4582/// getExternrefType - Return a WebAssembly externref type, which represents an
4583/// opaque reference to a host value.
4584QualType ASTContext::getWebAssemblyExternrefType() const {
4585 if (Target->getTriple().isWasm() && Target->hasFeature(Feature: "reference-types")) {
4586#define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \
4587 if (BuiltinType::Id == BuiltinType::WasmExternRef) \
4588 return SingletonId;
4589#include "clang/Basic/WebAssemblyReferenceTypes.def"
4590 }
4591 llvm_unreachable(
4592 "shouldn't try to generate type externref outside WebAssembly target");
4593}
4594
4595/// getScalableVectorType - Return the unique reference to a scalable vector
4596/// type of the specified element type and size. VectorType must be a built-in
4597/// type.
4598QualType ASTContext::getScalableVectorType(QualType EltTy, unsigned NumElts,
4599 unsigned NumFields) const {
4600 if (Target->hasAArch64ACLETypes()) {
4601 uint64_t EltTySize = getTypeSize(T: EltTy);
4602
4603#define SVE_VECTOR_TYPE_INT(Name, MangledName, Id, SingletonId, NumEls, \
4604 ElBits, NF, IsSigned) \
4605 if (EltTy->hasIntegerRepresentation() && !EltTy->isBooleanType() && \
4606 EltTy->hasSignedIntegerRepresentation() == IsSigned && \
4607 EltTySize == ElBits && NumElts == (NumEls * NF) && NumFields == 1) { \
4608 return SingletonId; \
4609 }
4610#define SVE_VECTOR_TYPE_FLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4611 ElBits, NF) \
4612 if (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
4613 EltTySize == ElBits && NumElts == (NumEls * NF) && NumFields == 1) { \
4614 return SingletonId; \
4615 }
4616#define SVE_VECTOR_TYPE_BFLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4617 ElBits, NF) \
4618 if (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
4619 EltTySize == ElBits && NumElts == (NumEls * NF) && NumFields == 1) { \
4620 return SingletonId; \
4621 }
4622#define SVE_VECTOR_TYPE_MFLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4623 ElBits, NF) \
4624 if (EltTy->isMFloat8Type() && EltTySize == ElBits && \
4625 NumElts == (NumEls * NF) && NumFields == 1) { \
4626 return SingletonId; \
4627 }
4628#define SVE_PREDICATE_TYPE_ALL(Name, MangledName, Id, SingletonId, NumEls, NF) \
4629 if (EltTy->isBooleanType() && NumElts == (NumEls * NF) && NumFields == 1) \
4630 return SingletonId;
4631#include "clang/Basic/AArch64ACLETypes.def"
4632 } else if (Target->hasRISCVVTypes()) {
4633 uint64_t EltTySize = getTypeSize(T: EltTy);
4634#define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \
4635 IsFP, IsBF) \
4636 if (!EltTy->isBooleanType() && \
4637 ((EltTy->hasIntegerRepresentation() && \
4638 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
4639 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
4640 IsFP && !IsBF) || \
4641 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
4642 IsBF && !IsFP)) && \
4643 EltTySize == ElBits && NumElts == NumEls && NumFields == NF) \
4644 return SingletonId;
4645#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
4646 if (EltTy->isBooleanType() && NumElts == NumEls) \
4647 return SingletonId;
4648#include "clang/Basic/RISCVVTypes.def"
4649 }
4650 return QualType();
4651}
4652
4653/// getVectorType - Return the unique reference to a vector type of
4654/// the specified element type and size. VectorType must be a built-in type.
4655QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
4656 VectorKind VecKind) const {
4657 assert(vecType->isBuiltinType() ||
4658 (vecType->isBitIntType() &&
4659 // Only support _BitInt elements with byte-sized power of 2 NumBits.
4660 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits())));
4661
4662 // Check if we've already instantiated a vector of this type.
4663 llvm::FoldingSetNodeID ID;
4664 VectorType::Profile(ID, ElementType: vecType, NumElements: NumElts, TypeClass: Type::Vector, VecKind);
4665
4666 void *InsertPos = nullptr;
4667 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
4668 return QualType(VTP, 0);
4669
4670 // If the element type isn't canonical, this won't be a canonical type either,
4671 // so fill in the canonical type field.
4672 QualType Canonical;
4673 if (!vecType.isCanonical()) {
4674 Canonical = getVectorType(vecType: getCanonicalType(T: vecType), NumElts, VecKind);
4675
4676 // Get the new insert position for the node we care about.
4677 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4678 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4679 }
4680 auto *New = new (*this, alignof(VectorType))
4681 VectorType(vecType, NumElts, Canonical, VecKind);
4682 VectorTypes.InsertNode(N: New, InsertPos);
4683 Types.push_back(Elt: New);
4684 return QualType(New, 0);
4685}
4686
4687QualType ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr,
4688 SourceLocation AttrLoc,
4689 VectorKind VecKind) const {
4690 llvm::FoldingSetNodeID ID;
4691 DependentVectorType::Profile(ID, Context: *this, ElementType: getCanonicalType(T: VecType), SizeExpr,
4692 VecKind);
4693 void *InsertPos = nullptr;
4694 DependentVectorType *Canon =
4695 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4696 DependentVectorType *New;
4697
4698 if (Canon) {
4699 New = new (*this, alignof(DependentVectorType)) DependentVectorType(
4700 VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind);
4701 } else {
4702 QualType CanonVecTy = getCanonicalType(T: VecType);
4703 if (CanonVecTy == VecType) {
4704 New = new (*this, alignof(DependentVectorType))
4705 DependentVectorType(VecType, QualType(), SizeExpr, AttrLoc, VecKind);
4706
4707 DependentVectorType *CanonCheck =
4708 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4709 assert(!CanonCheck &&
4710 "Dependent-sized vector_size canonical type broken");
4711 (void)CanonCheck;
4712 DependentVectorTypes.InsertNode(N: New, InsertPos);
4713 } else {
4714 QualType CanonTy = getDependentVectorType(VecType: CanonVecTy, SizeExpr,
4715 AttrLoc: SourceLocation(), VecKind);
4716 New = new (*this, alignof(DependentVectorType))
4717 DependentVectorType(VecType, CanonTy, SizeExpr, AttrLoc, VecKind);
4718 }
4719 }
4720
4721 Types.push_back(Elt: New);
4722 return QualType(New, 0);
4723}
4724
4725/// getExtVectorType - Return the unique reference to an extended vector type of
4726/// the specified element type and size. VectorType must be a built-in type.
4727QualType ASTContext::getExtVectorType(QualType vecType,
4728 unsigned NumElts) const {
4729 assert(vecType->isBuiltinType() || vecType->isDependentType() ||
4730 (vecType->isBitIntType() &&
4731 // Only support _BitInt elements with byte-sized power of 2 NumBits.
4732 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits())));
4733
4734 // Check if we've already instantiated a vector of this type.
4735 llvm::FoldingSetNodeID ID;
4736 VectorType::Profile(ID, ElementType: vecType, NumElements: NumElts, TypeClass: Type::ExtVector,
4737 VecKind: VectorKind::Generic);
4738 void *InsertPos = nullptr;
4739 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
4740 return QualType(VTP, 0);
4741
4742 // If the element type isn't canonical, this won't be a canonical type either,
4743 // so fill in the canonical type field.
4744 QualType Canonical;
4745 if (!vecType.isCanonical()) {
4746 Canonical = getExtVectorType(vecType: getCanonicalType(T: vecType), NumElts);
4747
4748 // Get the new insert position for the node we care about.
4749 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4750 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4751 }
4752 auto *New = new (*this, alignof(ExtVectorType))
4753 ExtVectorType(vecType, NumElts, Canonical);
4754 VectorTypes.InsertNode(N: New, InsertPos);
4755 Types.push_back(Elt: New);
4756 return QualType(New, 0);
4757}
4758
4759QualType
4760ASTContext::getDependentSizedExtVectorType(QualType vecType,
4761 Expr *SizeExpr,
4762 SourceLocation AttrLoc) const {
4763 llvm::FoldingSetNodeID ID;
4764 DependentSizedExtVectorType::Profile(ID, Context: *this, ElementType: getCanonicalType(T: vecType),
4765 SizeExpr);
4766
4767 void *InsertPos = nullptr;
4768 DependentSizedExtVectorType *Canon
4769 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4770 DependentSizedExtVectorType *New;
4771 if (Canon) {
4772 // We already have a canonical version of this array type; use it as
4773 // the canonical type for a newly-built type.
4774 New = new (*this, alignof(DependentSizedExtVectorType))
4775 DependentSizedExtVectorType(vecType, QualType(Canon, 0), SizeExpr,
4776 AttrLoc);
4777 } else {
4778 QualType CanonVecTy = getCanonicalType(T: vecType);
4779 if (CanonVecTy == vecType) {
4780 New = new (*this, alignof(DependentSizedExtVectorType))
4781 DependentSizedExtVectorType(vecType, QualType(), SizeExpr, AttrLoc);
4782
4783 DependentSizedExtVectorType *CanonCheck
4784 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4785 assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken");
4786 (void)CanonCheck;
4787 DependentSizedExtVectorTypes.InsertNode(N: New, InsertPos);
4788 } else {
4789 QualType CanonExtTy = getDependentSizedExtVectorType(vecType: CanonVecTy, SizeExpr,
4790 AttrLoc: SourceLocation());
4791 New = new (*this, alignof(DependentSizedExtVectorType))
4792 DependentSizedExtVectorType(vecType, CanonExtTy, SizeExpr, AttrLoc);
4793 }
4794 }
4795
4796 Types.push_back(Elt: New);
4797 return QualType(New, 0);
4798}
4799
4800QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows,
4801 unsigned NumColumns) const {
4802 llvm::FoldingSetNodeID ID;
4803 ConstantMatrixType::Profile(ID, ElementType: ElementTy, NumRows, NumColumns,
4804 TypeClass: Type::ConstantMatrix);
4805
4806 assert(MatrixType::isValidElementType(ElementTy) &&
4807 "need a valid element type");
4808 assert(ConstantMatrixType::isDimensionValid(NumRows) &&
4809 ConstantMatrixType::isDimensionValid(NumColumns) &&
4810 "need valid matrix dimensions");
4811 void *InsertPos = nullptr;
4812 if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos))
4813 return QualType(MTP, 0);
4814
4815 QualType Canonical;
4816 if (!ElementTy.isCanonical()) {
4817 Canonical =
4818 getConstantMatrixType(ElementTy: getCanonicalType(T: ElementTy), NumRows, NumColumns);
4819
4820 ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4821 assert(!NewIP && "Matrix type shouldn't already exist in the map");
4822 (void)NewIP;
4823 }
4824
4825 auto *New = new (*this, alignof(ConstantMatrixType))
4826 ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical);
4827 MatrixTypes.InsertNode(N: New, InsertPos);
4828 Types.push_back(Elt: New);
4829 return QualType(New, 0);
4830}
4831
4832QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy,
4833 Expr *RowExpr,
4834 Expr *ColumnExpr,
4835 SourceLocation AttrLoc) const {
4836 QualType CanonElementTy = getCanonicalType(T: ElementTy);
4837 llvm::FoldingSetNodeID ID;
4838 DependentSizedMatrixType::Profile(ID, Context: *this, ElementType: CanonElementTy, RowExpr,
4839 ColumnExpr);
4840
4841 void *InsertPos = nullptr;
4842 DependentSizedMatrixType *Canon =
4843 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4844
4845 if (!Canon) {
4846 Canon = new (*this, alignof(DependentSizedMatrixType))
4847 DependentSizedMatrixType(CanonElementTy, QualType(), RowExpr,
4848 ColumnExpr, AttrLoc);
4849#ifndef NDEBUG
4850 DependentSizedMatrixType *CanonCheck =
4851 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4852 assert(!CanonCheck && "Dependent-sized matrix canonical type broken");
4853#endif
4854 DependentSizedMatrixTypes.InsertNode(N: Canon, InsertPos);
4855 Types.push_back(Elt: Canon);
4856 }
4857
4858 // Already have a canonical version of the matrix type
4859 //
4860 // If it exactly matches the requested type, use it directly.
4861 if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr &&
4862 Canon->getRowExpr() == ColumnExpr)
4863 return QualType(Canon, 0);
4864
4865 // Use Canon as the canonical type for newly-built type.
4866 DependentSizedMatrixType *New = new (*this, alignof(DependentSizedMatrixType))
4867 DependentSizedMatrixType(ElementTy, QualType(Canon, 0), RowExpr,
4868 ColumnExpr, AttrLoc);
4869 Types.push_back(Elt: New);
4870 return QualType(New, 0);
4871}
4872
4873QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType,
4874 Expr *AddrSpaceExpr,
4875 SourceLocation AttrLoc) const {
4876 assert(AddrSpaceExpr->isInstantiationDependent());
4877
4878 QualType canonPointeeType = getCanonicalType(T: PointeeType);
4879
4880 void *insertPos = nullptr;
4881 llvm::FoldingSetNodeID ID;
4882 DependentAddressSpaceType::Profile(ID, Context: *this, PointeeType: canonPointeeType,
4883 AddrSpaceExpr);
4884
4885 DependentAddressSpaceType *canonTy =
4886 DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
4887
4888 if (!canonTy) {
4889 canonTy = new (*this, alignof(DependentAddressSpaceType))
4890 DependentAddressSpaceType(canonPointeeType, QualType(), AddrSpaceExpr,
4891 AttrLoc);
4892 DependentAddressSpaceTypes.InsertNode(N: canonTy, InsertPos: insertPos);
4893 Types.push_back(Elt: canonTy);
4894 }
4895
4896 if (canonPointeeType == PointeeType &&
4897 canonTy->getAddrSpaceExpr() == AddrSpaceExpr)
4898 return QualType(canonTy, 0);
4899
4900 auto *sugaredType = new (*this, alignof(DependentAddressSpaceType))
4901 DependentAddressSpaceType(PointeeType, QualType(canonTy, 0),
4902 AddrSpaceExpr, AttrLoc);
4903 Types.push_back(Elt: sugaredType);
4904 return QualType(sugaredType, 0);
4905}
4906
4907/// Determine whether \p T is canonical as the result type of a function.
4908static bool isCanonicalResultType(QualType T) {
4909 return T.isCanonical() &&
4910 (T.getObjCLifetime() == Qualifiers::OCL_None ||
4911 T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone);
4912}
4913
4914/// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
4915QualType
4916ASTContext::getFunctionNoProtoType(QualType ResultTy,
4917 const FunctionType::ExtInfo &Info) const {
4918 // FIXME: This assertion cannot be enabled (yet) because the ObjC rewriter
4919 // functionality creates a function without a prototype regardless of
4920 // language mode (so it makes them even in C++). Once the rewriter has been
4921 // fixed, this assertion can be enabled again.
4922 //assert(!LangOpts.requiresStrictPrototypes() &&
4923 // "strict prototypes are disabled");
4924
4925 // Unique functions, to guarantee there is only one function of a particular
4926 // structure.
4927 llvm::FoldingSetNodeID ID;
4928 FunctionNoProtoType::Profile(ID, ResultType: ResultTy, Info);
4929
4930 void *InsertPos = nullptr;
4931 if (FunctionNoProtoType *FT =
4932 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
4933 return QualType(FT, 0);
4934
4935 QualType Canonical;
4936 if (!isCanonicalResultType(T: ResultTy)) {
4937 Canonical =
4938 getFunctionNoProtoType(ResultTy: getCanonicalFunctionResultType(ResultType: ResultTy), Info);
4939
4940 // Get the new insert position for the node we care about.
4941 FunctionNoProtoType *NewIP =
4942 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
4943 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4944 }
4945
4946 auto *New = new (*this, alignof(FunctionNoProtoType))
4947 FunctionNoProtoType(ResultTy, Canonical, Info);
4948 Types.push_back(Elt: New);
4949 FunctionNoProtoTypes.InsertNode(N: New, InsertPos);
4950 return QualType(New, 0);
4951}
4952
4953CanQualType
4954ASTContext::getCanonicalFunctionResultType(QualType ResultType) const {
4955 CanQualType CanResultType = getCanonicalType(T: ResultType);
4956
4957 // Canonical result types do not have ARC lifetime qualifiers.
4958 if (CanResultType.getQualifiers().hasObjCLifetime()) {
4959 Qualifiers Qs = CanResultType.getQualifiers();
4960 Qs.removeObjCLifetime();
4961 return CanQualType::CreateUnsafe(
4962 Other: getQualifiedType(T: CanResultType.getUnqualifiedType(), Qs));
4963 }
4964
4965 return CanResultType;
4966}
4967
4968static bool isCanonicalExceptionSpecification(
4969 const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) {
4970 if (ESI.Type == EST_None)
4971 return true;
4972 if (!NoexceptInType)
4973 return false;
4974
4975 // C++17 onwards: exception specification is part of the type, as a simple
4976 // boolean "can this function type throw".
4977 if (ESI.Type == EST_BasicNoexcept)
4978 return true;
4979
4980 // A noexcept(expr) specification is (possibly) canonical if expr is
4981 // value-dependent.
4982 if (ESI.Type == EST_DependentNoexcept)
4983 return true;
4984
4985 // A dynamic exception specification is canonical if it only contains pack
4986 // expansions (so we can't tell whether it's non-throwing) and all its
4987 // contained types are canonical.
4988 if (ESI.Type == EST_Dynamic) {
4989 bool AnyPackExpansions = false;
4990 for (QualType ET : ESI.Exceptions) {
4991 if (!ET.isCanonical())
4992 return false;
4993 if (ET->getAs<PackExpansionType>())
4994 AnyPackExpansions = true;
4995 }
4996 return AnyPackExpansions;
4997 }
4998
4999 return false;
5000}
5001
5002QualType ASTContext::getFunctionTypeInternal(
5003 QualType ResultTy, ArrayRef<QualType> ArgArray,
5004 const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const {
5005 size_t NumArgs = ArgArray.size();
5006
5007 // Unique functions, to guarantee there is only one function of a particular
5008 // structure.
5009 llvm::FoldingSetNodeID ID;
5010 FunctionProtoType::Profile(ID, Result: ResultTy, ArgTys: ArgArray.begin(), NumArgs, EPI,
5011 Context: *this, Canonical: true);
5012
5013 QualType Canonical;
5014 bool Unique = false;
5015
5016 void *InsertPos = nullptr;
5017 if (FunctionProtoType *FPT =
5018 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) {
5019 QualType Existing = QualType(FPT, 0);
5020
5021 // If we find a pre-existing equivalent FunctionProtoType, we can just reuse
5022 // it so long as our exception specification doesn't contain a dependent
5023 // noexcept expression, or we're just looking for a canonical type.
5024 // Otherwise, we're going to need to create a type
5025 // sugar node to hold the concrete expression.
5026 if (OnlyWantCanonical || !isComputedNoexcept(ESpecType: EPI.ExceptionSpec.Type) ||
5027 EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr())
5028 return Existing;
5029
5030 // We need a new type sugar node for this one, to hold the new noexcept
5031 // expression. We do no canonicalization here, but that's OK since we don't
5032 // expect to see the same noexcept expression much more than once.
5033 Canonical = getCanonicalType(T: Existing);
5034 Unique = true;
5035 }
5036
5037 bool NoexceptInType = getLangOpts().CPlusPlus17;
5038 bool IsCanonicalExceptionSpec =
5039 isCanonicalExceptionSpecification(ESI: EPI.ExceptionSpec, NoexceptInType);
5040
5041 // Determine whether the type being created is already canonical or not.
5042 bool isCanonical = !Unique && IsCanonicalExceptionSpec &&
5043 isCanonicalResultType(T: ResultTy) && !EPI.HasTrailingReturn;
5044 for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
5045 if (!ArgArray[i].isCanonicalAsParam())
5046 isCanonical = false;
5047
5048 if (OnlyWantCanonical)
5049 assert(isCanonical &&
5050 "given non-canonical parameters constructing canonical type");
5051
5052 // If this type isn't canonical, get the canonical version of it if we don't
5053 // already have it. The exception spec is only partially part of the
5054 // canonical type, and only in C++17 onwards.
5055 if (!isCanonical && Canonical.isNull()) {
5056 SmallVector<QualType, 16> CanonicalArgs;
5057 CanonicalArgs.reserve(N: NumArgs);
5058 for (unsigned i = 0; i != NumArgs; ++i)
5059 CanonicalArgs.push_back(Elt: getCanonicalParamType(T: ArgArray[i]));
5060
5061 llvm::SmallVector<QualType, 8> ExceptionTypeStorage;
5062 FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI;
5063 CanonicalEPI.HasTrailingReturn = false;
5064
5065 if (IsCanonicalExceptionSpec) {
5066 // Exception spec is already OK.
5067 } else if (NoexceptInType) {
5068 switch (EPI.ExceptionSpec.Type) {
5069 case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated:
5070 // We don't know yet. It shouldn't matter what we pick here; no-one
5071 // should ever look at this.
5072 [[fallthrough]];
5073 case EST_None: case EST_MSAny: case EST_NoexceptFalse:
5074 CanonicalEPI.ExceptionSpec.Type = EST_None;
5075 break;
5076
5077 // A dynamic exception specification is almost always "not noexcept",
5078 // with the exception that a pack expansion might expand to no types.
5079 case EST_Dynamic: {
5080 bool AnyPacks = false;
5081 for (QualType ET : EPI.ExceptionSpec.Exceptions) {
5082 if (ET->getAs<PackExpansionType>())
5083 AnyPacks = true;
5084 ExceptionTypeStorage.push_back(Elt: getCanonicalType(T: ET));
5085 }
5086 if (!AnyPacks)
5087 CanonicalEPI.ExceptionSpec.Type = EST_None;
5088 else {
5089 CanonicalEPI.ExceptionSpec.Type = EST_Dynamic;
5090 CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage;
5091 }
5092 break;
5093 }
5094
5095 case EST_DynamicNone:
5096 case EST_BasicNoexcept:
5097 case EST_NoexceptTrue:
5098 case EST_NoThrow:
5099 CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept;
5100 break;
5101
5102 case EST_DependentNoexcept:
5103 llvm_unreachable("dependent noexcept is already canonical");
5104 }
5105 } else {
5106 CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo();
5107 }
5108
5109 // Adjust the canonical function result type.
5110 CanQualType CanResultTy = getCanonicalFunctionResultType(ResultType: ResultTy);
5111 Canonical =
5112 getFunctionTypeInternal(ResultTy: CanResultTy, ArgArray: CanonicalArgs, EPI: CanonicalEPI, OnlyWantCanonical: true);
5113
5114 // Get the new insert position for the node we care about.
5115 FunctionProtoType *NewIP =
5116 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
5117 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
5118 }
5119
5120 // Compute the needed size to hold this FunctionProtoType and the
5121 // various trailing objects.
5122 auto ESH = FunctionProtoType::getExceptionSpecSize(
5123 EST: EPI.ExceptionSpec.Type, NumExceptions: EPI.ExceptionSpec.Exceptions.size());
5124 size_t Size = FunctionProtoType::totalSizeToAlloc<
5125 QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields,
5126 FunctionType::FunctionTypeArmAttributes, FunctionType::ExceptionType,
5127 Expr *, FunctionDecl *, FunctionProtoType::ExtParameterInfo, Qualifiers,
5128 FunctionEffect, EffectConditionExpr>(
5129 Counts: NumArgs, Counts: EPI.Variadic, Counts: EPI.requiresFunctionProtoTypeExtraBitfields(),
5130 Counts: EPI.requiresFunctionProtoTypeArmAttributes(), Counts: ESH.NumExceptionType,
5131 Counts: ESH.NumExprPtr, Counts: ESH.NumFunctionDeclPtr,
5132 Counts: EPI.ExtParameterInfos ? NumArgs : 0,
5133 Counts: EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0, Counts: EPI.FunctionEffects.size(),
5134 Counts: EPI.FunctionEffects.conditions().size());
5135
5136 auto *FTP = (FunctionProtoType *)Allocate(Size, Align: alignof(FunctionProtoType));
5137 FunctionProtoType::ExtProtoInfo newEPI = EPI;
5138 new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI);
5139 Types.push_back(Elt: FTP);
5140 if (!Unique)
5141 FunctionProtoTypes.InsertNode(N: FTP, InsertPos);
5142 if (!EPI.FunctionEffects.empty())
5143 AnyFunctionEffects = true;
5144 return QualType(FTP, 0);
5145}
5146
5147QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const {
5148 llvm::FoldingSetNodeID ID;
5149 PipeType::Profile(ID, T, isRead: ReadOnly);
5150
5151 void *InsertPos = nullptr;
5152 if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos))
5153 return QualType(PT, 0);
5154
5155 // If the pipe element type isn't canonical, this won't be a canonical type
5156 // either, so fill in the canonical type field.
5157 QualType Canonical;
5158 if (!T.isCanonical()) {
5159 Canonical = getPipeType(T: getCanonicalType(T), ReadOnly);
5160
5161 // Get the new insert position for the node we care about.
5162 PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos);
5163 assert(!NewIP && "Shouldn't be in the map!");
5164 (void)NewIP;
5165 }
5166 auto *New = new (*this, alignof(PipeType)) PipeType(T, Canonical, ReadOnly);
5167 Types.push_back(Elt: New);
5168 PipeTypes.InsertNode(N: New, InsertPos);
5169 return QualType(New, 0);
5170}
5171
5172QualType ASTContext::adjustStringLiteralBaseType(QualType Ty) const {
5173 // OpenCL v1.1 s6.5.3: a string literal is in the constant address space.
5174 return LangOpts.OpenCL ? getAddrSpaceQualType(T: Ty, AddressSpace: LangAS::opencl_constant)
5175 : Ty;
5176}
5177
5178QualType ASTContext::getReadPipeType(QualType T) const {
5179 return getPipeType(T, ReadOnly: true);
5180}
5181
5182QualType ASTContext::getWritePipeType(QualType T) const {
5183 return getPipeType(T, ReadOnly: false);
5184}
5185
5186QualType ASTContext::getBitIntType(bool IsUnsigned, unsigned NumBits) const {
5187 llvm::FoldingSetNodeID ID;
5188 BitIntType::Profile(ID, IsUnsigned, NumBits);
5189
5190 void *InsertPos = nullptr;
5191 if (BitIntType *EIT = BitIntTypes.FindNodeOrInsertPos(ID, InsertPos))
5192 return QualType(EIT, 0);
5193
5194 auto *New = new (*this, alignof(BitIntType)) BitIntType(IsUnsigned, NumBits);
5195 BitIntTypes.InsertNode(N: New, InsertPos);
5196 Types.push_back(Elt: New);
5197 return QualType(New, 0);
5198}
5199
5200QualType ASTContext::getDependentBitIntType(bool IsUnsigned,
5201 Expr *NumBitsExpr) const {
5202 assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent");
5203 llvm::FoldingSetNodeID ID;
5204 DependentBitIntType::Profile(ID, Context: *this, IsUnsigned, NumBitsExpr);
5205
5206 void *InsertPos = nullptr;
5207 if (DependentBitIntType *Existing =
5208 DependentBitIntTypes.FindNodeOrInsertPos(ID, InsertPos))
5209 return QualType(Existing, 0);
5210
5211 auto *New = new (*this, alignof(DependentBitIntType))
5212 DependentBitIntType(IsUnsigned, NumBitsExpr);
5213 DependentBitIntTypes.InsertNode(N: New, InsertPos);
5214
5215 Types.push_back(Elt: New);
5216 return QualType(New, 0);
5217}
5218
5219#ifndef NDEBUG
5220static bool NeedsInjectedClassNameType(const RecordDecl *D) {
5221 if (!isa<CXXRecordDecl>(D)) return false;
5222 const auto *RD = cast<CXXRecordDecl>(D);
5223 if (isa<ClassTemplatePartialSpecializationDecl>(RD))
5224 return true;
5225 if (RD->getDescribedClassTemplate() &&
5226 !isa<ClassTemplateSpecializationDecl>(RD))
5227 return true;
5228 return false;
5229}
5230#endif
5231
5232/// getInjectedClassNameType - Return the unique reference to the
5233/// injected class name type for the specified templated declaration.
5234QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl,
5235 QualType TST) const {
5236 assert(NeedsInjectedClassNameType(Decl));
5237 if (Decl->TypeForDecl) {
5238 assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
5239 } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) {
5240 assert(PrevDecl->TypeForDecl && "previous declaration has no type");
5241 Decl->TypeForDecl = PrevDecl->TypeForDecl;
5242 assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
5243 } else {
5244 Type *newType = new (*this, alignof(InjectedClassNameType))
5245 InjectedClassNameType(Decl, TST);
5246 Decl->TypeForDecl = newType;
5247 Types.push_back(Elt: newType);
5248 }
5249 return QualType(Decl->TypeForDecl, 0);
5250}
5251
5252/// getTypeDeclType - Return the unique reference to the type for the
5253/// specified type declaration.
5254QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const {
5255 assert(Decl && "Passed null for Decl param");
5256 assert(!Decl->TypeForDecl && "TypeForDecl present in slow case");
5257
5258 if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Val: Decl))
5259 return getTypedefType(Decl: Typedef);
5260
5261 assert(!isa<TemplateTypeParmDecl>(Decl) &&
5262 "Template type parameter types are always available.");
5263
5264 if (const auto *Record = dyn_cast<RecordDecl>(Val: Decl)) {
5265 assert(Record->isFirstDecl() && "struct/union has previous declaration");
5266 assert(!NeedsInjectedClassNameType(Record));
5267 return getRecordType(Decl: Record);
5268 } else if (const auto *Enum = dyn_cast<EnumDecl>(Val: Decl)) {
5269 assert(Enum->isFirstDecl() && "enum has previous declaration");
5270 return getEnumType(Decl: Enum);
5271 } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Val: Decl)) {
5272 return getUnresolvedUsingType(Decl: Using);
5273 } else
5274 llvm_unreachable("TypeDecl without a type?");
5275
5276 return QualType(Decl->TypeForDecl, 0);
5277}
5278
5279/// getTypedefType - Return the unique reference to the type for the
5280/// specified typedef name decl.
5281QualType ASTContext::getTypedefType(const TypedefNameDecl *Decl,
5282 QualType Underlying) const {
5283 if (!Decl->TypeForDecl) {
5284 if (Underlying.isNull())
5285 Underlying = Decl->getUnderlyingType();
5286 auto *NewType = new (*this, alignof(TypedefType)) TypedefType(
5287 Type::Typedef, Decl, Underlying, /*HasTypeDifferentFromDecl=*/false);
5288 Decl->TypeForDecl = NewType;
5289 Types.push_back(Elt: NewType);
5290 return QualType(NewType, 0);
5291 }
5292 if (Underlying.isNull() || Decl->getUnderlyingType() == Underlying)
5293 return QualType(Decl->TypeForDecl, 0);
5294 assert(hasSameType(Decl->getUnderlyingType(), Underlying));
5295
5296 llvm::FoldingSetNodeID ID;
5297 TypedefType::Profile(ID, Decl, Underlying);
5298
5299 void *InsertPos = nullptr;
5300 if (TypedefType *T = TypedefTypes.FindNodeOrInsertPos(ID, InsertPos)) {
5301 assert(!T->typeMatchesDecl() &&
5302 "non-divergent case should be handled with TypeDecl");
5303 return QualType(T, 0);
5304 }
5305
5306 void *Mem = Allocate(Size: TypedefType::totalSizeToAlloc<QualType>(Counts: true),
5307 Align: alignof(TypedefType));
5308 auto *NewType = new (Mem) TypedefType(Type::Typedef, Decl, Underlying,
5309 /*HasTypeDifferentFromDecl=*/true);
5310 TypedefTypes.InsertNode(N: NewType, InsertPos);
5311 Types.push_back(Elt: NewType);
5312 return QualType(NewType, 0);
5313}
5314
5315QualType ASTContext::getUsingType(const UsingShadowDecl *Found,
5316 QualType Underlying) const {
5317 llvm::FoldingSetNodeID ID;
5318 UsingType::Profile(ID, Found, Underlying);
5319
5320 void *InsertPos = nullptr;
5321 if (UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos))
5322 return QualType(T, 0);
5323
5324 const Type *TypeForDecl =
5325 cast<TypeDecl>(Val: Found->getTargetDecl())->getTypeForDecl();
5326
5327 assert(!Underlying.hasLocalQualifiers());
5328 QualType Canon = Underlying->getCanonicalTypeInternal();
5329 assert(TypeForDecl->getCanonicalTypeInternal() == Canon);
5330
5331 if (Underlying.getTypePtr() == TypeForDecl)
5332 Underlying = QualType();
5333 void *Mem =
5334 Allocate(Size: UsingType::totalSizeToAlloc<QualType>(Counts: !Underlying.isNull()),
5335 Align: alignof(UsingType));
5336 UsingType *NewType = new (Mem) UsingType(Found, Underlying, Canon);
5337 Types.push_back(Elt: NewType);
5338 UsingTypes.InsertNode(N: NewType, InsertPos);
5339 return QualType(NewType, 0);
5340}
5341
5342QualType ASTContext::getRecordType(const RecordDecl *Decl) const {
5343 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
5344
5345 if (const RecordDecl *PrevDecl = Decl->getPreviousDecl())
5346 if (PrevDecl->TypeForDecl)
5347 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
5348
5349 auto *newType = new (*this, alignof(RecordType)) RecordType(Decl);
5350 Decl->TypeForDecl = newType;
5351 Types.push_back(Elt: newType);
5352 return QualType(newType, 0);
5353}
5354
5355QualType ASTContext::getEnumType(const EnumDecl *Decl) const {
5356 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
5357
5358 if (const EnumDecl *PrevDecl = Decl->getPreviousDecl())
5359 if (PrevDecl->TypeForDecl)
5360 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
5361
5362 auto *newType = new (*this, alignof(EnumType)) EnumType(Decl);
5363 Decl->TypeForDecl = newType;
5364 Types.push_back(Elt: newType);
5365 return QualType(newType, 0);
5366}
5367
5368bool ASTContext::computeBestEnumTypes(bool IsPacked, unsigned NumNegativeBits,
5369 unsigned NumPositiveBits,
5370 QualType &BestType,
5371 QualType &BestPromotionType) {
5372 unsigned IntWidth = Target->getIntWidth();
5373 unsigned CharWidth = Target->getCharWidth();
5374 unsigned ShortWidth = Target->getShortWidth();
5375 bool EnumTooLarge = false;
5376 unsigned BestWidth;
5377 if (NumNegativeBits) {
5378 // If there is a negative value, figure out the smallest integer type (of
5379 // int/long/longlong) that fits.
5380 // If it's packed, check also if it fits a char or a short.
5381 if (IsPacked && NumNegativeBits <= CharWidth &&
5382 NumPositiveBits < CharWidth) {
5383 BestType = SignedCharTy;
5384 BestWidth = CharWidth;
5385 } else if (IsPacked && NumNegativeBits <= ShortWidth &&
5386 NumPositiveBits < ShortWidth) {
5387 BestType = ShortTy;
5388 BestWidth = ShortWidth;
5389 } else if (NumNegativeBits <= IntWidth && NumPositiveBits < IntWidth) {
5390 BestType = IntTy;
5391 BestWidth = IntWidth;
5392 } else {
5393 BestWidth = Target->getLongWidth();
5394
5395 if (NumNegativeBits <= BestWidth && NumPositiveBits < BestWidth) {
5396 BestType = LongTy;
5397 } else {
5398 BestWidth = Target->getLongLongWidth();
5399
5400 if (NumNegativeBits > BestWidth || NumPositiveBits >= BestWidth)
5401 EnumTooLarge = true;
5402 BestType = LongLongTy;
5403 }
5404 }
5405 BestPromotionType = (BestWidth <= IntWidth ? IntTy : BestType);
5406 } else {
5407 // If there is no negative value, figure out the smallest type that fits
5408 // all of the enumerator values.
5409 // If it's packed, check also if it fits a char or a short.
5410 if (IsPacked && NumPositiveBits <= CharWidth) {
5411 BestType = UnsignedCharTy;
5412 BestPromotionType = IntTy;
5413 BestWidth = CharWidth;
5414 } else if (IsPacked && NumPositiveBits <= ShortWidth) {
5415 BestType = UnsignedShortTy;
5416 BestPromotionType = IntTy;
5417 BestWidth = ShortWidth;
5418 } else if (NumPositiveBits <= IntWidth) {
5419 BestType = UnsignedIntTy;
5420 BestWidth = IntWidth;
5421 BestPromotionType = (NumPositiveBits == BestWidth || !LangOpts.CPlusPlus)
5422 ? UnsignedIntTy
5423 : IntTy;
5424 } else if (NumPositiveBits <= (BestWidth = Target->getLongWidth())) {
5425 BestType = UnsignedLongTy;
5426 BestPromotionType = (NumPositiveBits == BestWidth || !LangOpts.CPlusPlus)
5427 ? UnsignedLongTy
5428 : LongTy;
5429 } else {
5430 BestWidth = Target->getLongLongWidth();
5431 if (NumPositiveBits > BestWidth) {
5432 // This can happen with bit-precise integer types, but those are not
5433 // allowed as the type for an enumerator per C23 6.7.2.2p4 and p12.
5434 // FIXME: GCC uses __int128_t and __uint128_t for cases that fit within
5435 // a 128-bit integer, we should consider doing the same.
5436 EnumTooLarge = true;
5437 }
5438 BestType = UnsignedLongLongTy;
5439 BestPromotionType = (NumPositiveBits == BestWidth || !LangOpts.CPlusPlus)
5440 ? UnsignedLongLongTy
5441 : LongLongTy;
5442 }
5443 }
5444 return EnumTooLarge;
5445}
5446
5447bool ASTContext::isRepresentableIntegerValue(llvm::APSInt &Value, QualType T) {
5448 assert((T->isIntegralType(*this) || T->isEnumeralType()) &&
5449 "Integral type required!");
5450 unsigned BitWidth = getIntWidth(T);
5451
5452 if (Value.isUnsigned() || Value.isNonNegative()) {
5453 if (T->isSignedIntegerOrEnumerationType())
5454 --BitWidth;
5455 return Value.getActiveBits() <= BitWidth;
5456 }
5457 return Value.getSignificantBits() <= BitWidth;
5458}
5459
5460QualType ASTContext::getUnresolvedUsingType(
5461 const UnresolvedUsingTypenameDecl *Decl) const {
5462 if (Decl->TypeForDecl)
5463 return QualType(Decl->TypeForDecl, 0);
5464
5465 if (const UnresolvedUsingTypenameDecl *CanonicalDecl =
5466 Decl->getCanonicalDecl())
5467 if (CanonicalDecl->TypeForDecl)
5468 return QualType(Decl->TypeForDecl = CanonicalDecl->TypeForDecl, 0);
5469
5470 Type *newType =
5471 new (*this, alignof(UnresolvedUsingType)) UnresolvedUsingType(Decl);
5472 Decl->TypeForDecl = newType;
5473 Types.push_back(Elt: newType);
5474 return QualType(newType, 0);
5475}
5476
5477QualType ASTContext::getAttributedType(attr::Kind attrKind,
5478 QualType modifiedType,
5479 QualType equivalentType,
5480 const Attr *attr) const {
5481 llvm::FoldingSetNodeID id;
5482 AttributedType::Profile(ID&: id, attrKind, modified: modifiedType, equivalent: equivalentType, attr);
5483
5484 void *insertPos = nullptr;
5485 AttributedType *type = AttributedTypes.FindNodeOrInsertPos(ID: id, InsertPos&: insertPos);
5486 if (type) return QualType(type, 0);
5487
5488 assert(!attr || attr->getKind() == attrKind);
5489
5490 QualType canon = getCanonicalType(T: equivalentType);
5491 type = new (*this, alignof(AttributedType))
5492 AttributedType(canon, attrKind, attr, modifiedType, equivalentType);
5493
5494 Types.push_back(Elt: type);
5495 AttributedTypes.InsertNode(N: type, InsertPos: insertPos);
5496
5497 return QualType(type, 0);
5498}
5499
5500QualType ASTContext::getAttributedType(const Attr *attr, QualType modifiedType,
5501 QualType equivalentType) const {
5502 return getAttributedType(attrKind: attr->getKind(), modifiedType, equivalentType, attr);
5503}
5504
5505QualType ASTContext::getAttributedType(NullabilityKind nullability,
5506 QualType modifiedType,
5507 QualType equivalentType) {
5508 switch (nullability) {
5509 case NullabilityKind::NonNull:
5510 return getAttributedType(attrKind: attr::TypeNonNull, modifiedType, equivalentType);
5511
5512 case NullabilityKind::Nullable:
5513 return getAttributedType(attrKind: attr::TypeNullable, modifiedType, equivalentType);
5514
5515 case NullabilityKind::NullableResult:
5516 return getAttributedType(attrKind: attr::TypeNullableResult, modifiedType,
5517 equivalentType);
5518
5519 case NullabilityKind::Unspecified:
5520 return getAttributedType(attrKind: attr::TypeNullUnspecified, modifiedType,
5521 equivalentType);
5522 }
5523
5524 llvm_unreachable("Unknown nullability kind");
5525}
5526
5527QualType ASTContext::getBTFTagAttributedType(const BTFTypeTagAttr *BTFAttr,
5528 QualType Wrapped) const {
5529 llvm::FoldingSetNodeID ID;
5530 BTFTagAttributedType::Profile(ID, Wrapped, BTFAttr);
5531
5532 void *InsertPos = nullptr;
5533 BTFTagAttributedType *Ty =
5534 BTFTagAttributedTypes.FindNodeOrInsertPos(ID, InsertPos);
5535 if (Ty)
5536 return QualType(Ty, 0);
5537
5538 QualType Canon = getCanonicalType(T: Wrapped);
5539 Ty = new (*this, alignof(BTFTagAttributedType))
5540 BTFTagAttributedType(Canon, Wrapped, BTFAttr);
5541
5542 Types.push_back(Elt: Ty);
5543 BTFTagAttributedTypes.InsertNode(N: Ty, InsertPos);
5544
5545 return QualType(Ty, 0);
5546}
5547
5548QualType ASTContext::getHLSLAttributedResourceType(
5549 QualType Wrapped, QualType Contained,
5550 const HLSLAttributedResourceType::Attributes &Attrs) {
5551
5552 llvm::FoldingSetNodeID ID;
5553 HLSLAttributedResourceType::Profile(ID, Wrapped, Contained, Attrs);
5554
5555 void *InsertPos = nullptr;
5556 HLSLAttributedResourceType *Ty =
5557 HLSLAttributedResourceTypes.FindNodeOrInsertPos(ID, InsertPos);
5558 if (Ty)
5559 return QualType(Ty, 0);
5560
5561 Ty = new (*this, alignof(HLSLAttributedResourceType))
5562 HLSLAttributedResourceType(Wrapped, Contained, Attrs);
5563
5564 Types.push_back(Elt: Ty);
5565 HLSLAttributedResourceTypes.InsertNode(N: Ty, InsertPos);
5566
5567 return QualType(Ty, 0);
5568}
5569
5570QualType ASTContext::getHLSLInlineSpirvType(uint32_t Opcode, uint32_t Size,
5571 uint32_t Alignment,
5572 ArrayRef<SpirvOperand> Operands) {
5573 llvm::FoldingSetNodeID ID;
5574 HLSLInlineSpirvType::Profile(ID, Opcode, Size, Alignment, Operands);
5575
5576 void *InsertPos = nullptr;
5577 HLSLInlineSpirvType *Ty =
5578 HLSLInlineSpirvTypes.FindNodeOrInsertPos(ID, InsertPos);
5579 if (Ty)
5580 return QualType(Ty, 0);
5581
5582 void *Mem = Allocate(
5583 Size: HLSLInlineSpirvType::totalSizeToAlloc<SpirvOperand>(Counts: Operands.size()),
5584 Align: alignof(HLSLInlineSpirvType));
5585
5586 Ty = new (Mem) HLSLInlineSpirvType(Opcode, Size, Alignment, Operands);
5587
5588 Types.push_back(Elt: Ty);
5589 HLSLInlineSpirvTypes.InsertNode(N: Ty, InsertPos);
5590
5591 return QualType(Ty, 0);
5592}
5593
5594/// Retrieve a substitution-result type.
5595QualType ASTContext::getSubstTemplateTypeParmType(QualType Replacement,
5596 Decl *AssociatedDecl,
5597 unsigned Index,
5598 UnsignedOrNone PackIndex,
5599 bool Final) const {
5600 llvm::FoldingSetNodeID ID;
5601 SubstTemplateTypeParmType::Profile(ID, Replacement, AssociatedDecl, Index,
5602 PackIndex, Final);
5603 void *InsertPos = nullptr;
5604 SubstTemplateTypeParmType *SubstParm =
5605 SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
5606
5607 if (!SubstParm) {
5608 void *Mem = Allocate(Size: SubstTemplateTypeParmType::totalSizeToAlloc<QualType>(
5609 Counts: !Replacement.isCanonical()),
5610 Align: alignof(SubstTemplateTypeParmType));
5611 SubstParm = new (Mem) SubstTemplateTypeParmType(Replacement, AssociatedDecl,
5612 Index, PackIndex, Final);
5613 Types.push_back(Elt: SubstParm);
5614 SubstTemplateTypeParmTypes.InsertNode(N: SubstParm, InsertPos);
5615 }
5616
5617 return QualType(SubstParm, 0);
5618}
5619
5620/// Retrieve a
5621QualType
5622ASTContext::getSubstTemplateTypeParmPackType(Decl *AssociatedDecl,
5623 unsigned Index, bool Final,
5624 const TemplateArgument &ArgPack) {
5625#ifndef NDEBUG
5626 for (const auto &P : ArgPack.pack_elements())
5627 assert(P.getKind() == TemplateArgument::Type && "Pack contains a non-type");
5628#endif
5629
5630 llvm::FoldingSetNodeID ID;
5631 SubstTemplateTypeParmPackType::Profile(ID, AssociatedDecl, Index, Final,
5632 ArgPack);
5633 void *InsertPos = nullptr;
5634 if (SubstTemplateTypeParmPackType *SubstParm =
5635 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos))
5636 return QualType(SubstParm, 0);
5637
5638 QualType Canon;
5639 {
5640 TemplateArgument CanonArgPack = getCanonicalTemplateArgument(Arg: ArgPack);
5641 if (!AssociatedDecl->isCanonicalDecl() ||
5642 !CanonArgPack.structurallyEquals(Other: ArgPack)) {
5643 Canon = getSubstTemplateTypeParmPackType(
5644 AssociatedDecl: AssociatedDecl->getCanonicalDecl(), Index, Final, ArgPack: CanonArgPack);
5645 [[maybe_unused]] const auto *Nothing =
5646 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos);
5647 assert(!Nothing);
5648 }
5649 }
5650
5651 auto *SubstParm = new (*this, alignof(SubstTemplateTypeParmPackType))
5652 SubstTemplateTypeParmPackType(Canon, AssociatedDecl, Index, Final,
5653 ArgPack);
5654 Types.push_back(Elt: SubstParm);
5655 SubstTemplateTypeParmPackTypes.InsertNode(N: SubstParm, InsertPos);
5656 return QualType(SubstParm, 0);
5657}
5658
5659/// Retrieve the template type parameter type for a template
5660/// parameter or parameter pack with the given depth, index, and (optionally)
5661/// name.
5662QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index,
5663 bool ParameterPack,
5664 TemplateTypeParmDecl *TTPDecl) const {
5665 llvm::FoldingSetNodeID ID;
5666 TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl);
5667 void *InsertPos = nullptr;
5668 TemplateTypeParmType *TypeParm
5669 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
5670
5671 if (TypeParm)
5672 return QualType(TypeParm, 0);
5673
5674 if (TTPDecl) {
5675 QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack);
5676 TypeParm = new (*this, alignof(TemplateTypeParmType))
5677 TemplateTypeParmType(Depth, Index, ParameterPack, TTPDecl, Canon);
5678
5679 TemplateTypeParmType *TypeCheck
5680 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
5681 assert(!TypeCheck && "Template type parameter canonical type broken");
5682 (void)TypeCheck;
5683 } else
5684 TypeParm = new (*this, alignof(TemplateTypeParmType)) TemplateTypeParmType(
5685 Depth, Index, ParameterPack, /*TTPDecl=*/nullptr, /*Canon=*/QualType());
5686
5687 Types.push_back(Elt: TypeParm);
5688 TemplateTypeParmTypes.InsertNode(N: TypeParm, InsertPos);
5689
5690 return QualType(TypeParm, 0);
5691}
5692
5693TypeSourceInfo *ASTContext::getTemplateSpecializationTypeInfo(
5694 TemplateName Name, SourceLocation NameLoc,
5695 const TemplateArgumentListInfo &SpecifiedArgs,
5696 ArrayRef<TemplateArgument> CanonicalArgs, QualType Underlying) const {
5697 QualType TST = getTemplateSpecializationType(T: Name, SpecifiedArgs: SpecifiedArgs.arguments(),
5698 CanonicalArgs, Canon: Underlying);
5699
5700 TypeSourceInfo *DI = CreateTypeSourceInfo(T: TST);
5701 TemplateSpecializationTypeLoc TL =
5702 DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>();
5703 TL.setTemplateKeywordLoc(SourceLocation());
5704 TL.setTemplateNameLoc(NameLoc);
5705 TL.setLAngleLoc(SpecifiedArgs.getLAngleLoc());
5706 TL.setRAngleLoc(SpecifiedArgs.getRAngleLoc());
5707 for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
5708 TL.setArgLocInfo(i, AI: SpecifiedArgs[i].getLocInfo());
5709 return DI;
5710}
5711
5712QualType ASTContext::getTemplateSpecializationType(
5713 TemplateName Template, ArrayRef<TemplateArgumentLoc> SpecifiedArgs,
5714 ArrayRef<TemplateArgument> CanonicalArgs, QualType Underlying) const {
5715 SmallVector<TemplateArgument, 4> SpecifiedArgVec;
5716 SpecifiedArgVec.reserve(N: SpecifiedArgs.size());
5717 for (const TemplateArgumentLoc &Arg : SpecifiedArgs)
5718 SpecifiedArgVec.push_back(Elt: Arg.getArgument());
5719
5720 return getTemplateSpecializationType(T: Template, SpecifiedArgs: SpecifiedArgVec, CanonicalArgs,
5721 Underlying);
5722}
5723
5724[[maybe_unused]] static bool
5725hasAnyPackExpansions(ArrayRef<TemplateArgument> Args) {
5726 for (const TemplateArgument &Arg : Args)
5727 if (Arg.isPackExpansion())
5728 return true;
5729 return false;
5730}
5731
5732QualType ASTContext::getCanonicalTemplateSpecializationType(
5733 TemplateName Template, ArrayRef<TemplateArgument> Args) const {
5734 assert(Template ==
5735 getCanonicalTemplateName(Template, /*IgnoreDeduced=*/true));
5736 assert(!Args.empty());
5737#ifndef NDEBUG
5738 for (const auto &Arg : Args)
5739 assert(Arg.structurallyEquals(getCanonicalTemplateArgument(Arg)));
5740#endif
5741
5742 llvm::FoldingSetNodeID ID;
5743 TemplateSpecializationType::Profile(ID, T: Template, Args, Underlying: QualType(), Context: *this);
5744 void *InsertPos = nullptr;
5745 if (auto *T = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos))
5746 return QualType(T, 0);
5747
5748 void *Mem = Allocate(Size: sizeof(TemplateSpecializationType) +
5749 sizeof(TemplateArgument) * Args.size(),
5750 Align: alignof(TemplateSpecializationType));
5751 auto *Spec = new (Mem)
5752 TemplateSpecializationType(Template, /*IsAlias=*/false, Args, QualType());
5753 assert(Spec->isDependentType() &&
5754 "canonical template specialization must be dependent");
5755 Types.push_back(Elt: Spec);
5756 TemplateSpecializationTypes.InsertNode(N: Spec, InsertPos);
5757 return QualType(Spec, 0);
5758}
5759
5760QualType ASTContext::getTemplateSpecializationType(
5761 TemplateName Template, ArrayRef<TemplateArgument> SpecifiedArgs,
5762 ArrayRef<TemplateArgument> CanonicalArgs, QualType Underlying) const {
5763 assert(!Template.getUnderlying().getAsDependentTemplateName() &&
5764 "No dependent template names here!");
5765
5766 const auto *TD = Template.getAsTemplateDecl(/*IgnoreDeduced=*/true);
5767 bool IsTypeAlias = TD && TD->isTypeAlias();
5768 if (Underlying.isNull()) {
5769 TemplateName CanonTemplate =
5770 getCanonicalTemplateName(Name: Template, /*IgnoreDeduced=*/true);
5771 bool NonCanonical = Template != CanonTemplate;
5772 SmallVector<TemplateArgument, 4> CanonArgsVec;
5773 if (CanonicalArgs.empty()) {
5774 CanonArgsVec = SmallVector<TemplateArgument, 4>(SpecifiedArgs);
5775 NonCanonical |= canonicalizeTemplateArguments(Args: CanonArgsVec);
5776 CanonicalArgs = CanonArgsVec;
5777 } else {
5778 NonCanonical |= !llvm::equal(
5779 LRange&: SpecifiedArgs, RRange&: CanonicalArgs,
5780 P: [](const TemplateArgument &A, const TemplateArgument &B) {
5781 return A.structurallyEquals(Other: B);
5782 });
5783 }
5784
5785 // We can get here with an alias template when the specialization
5786 // contains a pack expansion that does not match up with a parameter
5787 // pack, or a builtin template which cannot be resolved due to dependency.
5788 assert((!isa_and_nonnull<TypeAliasTemplateDecl>(TD) ||
5789 hasAnyPackExpansions(CanonicalArgs)) &&
5790 "Caller must compute aliased type");
5791 IsTypeAlias = false;
5792
5793 Underlying =
5794 getCanonicalTemplateSpecializationType(Template: CanonTemplate, Args: CanonicalArgs);
5795 if (!NonCanonical)
5796 return Underlying;
5797 }
5798 void *Mem = Allocate(Size: sizeof(TemplateSpecializationType) +
5799 sizeof(TemplateArgument) * SpecifiedArgs.size() +
5800 (IsTypeAlias ? sizeof(QualType) : 0),
5801 Align: alignof(TemplateSpecializationType));
5802 auto *Spec = new (Mem) TemplateSpecializationType(Template, IsTypeAlias,
5803 SpecifiedArgs, Underlying);
5804 Types.push_back(Elt: Spec);
5805 return QualType(Spec, 0);
5806}
5807
5808QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword,
5809 NestedNameSpecifier *NNS,
5810 QualType NamedType,
5811 TagDecl *OwnedTagDecl) const {
5812 llvm::FoldingSetNodeID ID;
5813 ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl);
5814
5815 void *InsertPos = nullptr;
5816 ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
5817 if (T)
5818 return QualType(T, 0);
5819
5820 QualType Canon = NamedType;
5821 if (!Canon.isCanonical()) {
5822 Canon = getCanonicalType(T: NamedType);
5823 ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
5824 assert(!CheckT && "Elaborated canonical type broken");
5825 (void)CheckT;
5826 }
5827
5828 void *Mem =
5829 Allocate(Size: ElaboratedType::totalSizeToAlloc<TagDecl *>(Counts: !!OwnedTagDecl),
5830 Align: alignof(ElaboratedType));
5831 T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl);
5832
5833 Types.push_back(Elt: T);
5834 ElaboratedTypes.InsertNode(N: T, InsertPos);
5835 return QualType(T, 0);
5836}
5837
5838QualType
5839ASTContext::getParenType(QualType InnerType) const {
5840 llvm::FoldingSetNodeID ID;
5841 ParenType::Profile(ID, Inner: InnerType);
5842
5843 void *InsertPos = nullptr;
5844 ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
5845 if (T)
5846 return QualType(T, 0);
5847
5848 QualType Canon = InnerType;
5849 if (!Canon.isCanonical()) {
5850 Canon = getCanonicalType(T: InnerType);
5851 ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
5852 assert(!CheckT && "Paren canonical type broken");
5853 (void)CheckT;
5854 }
5855
5856 T = new (*this, alignof(ParenType)) ParenType(InnerType, Canon);
5857 Types.push_back(Elt: T);
5858 ParenTypes.InsertNode(N: T, InsertPos);
5859 return QualType(T, 0);
5860}
5861
5862QualType
5863ASTContext::getMacroQualifiedType(QualType UnderlyingTy,
5864 const IdentifierInfo *MacroII) const {
5865 QualType Canon = UnderlyingTy;
5866 if (!Canon.isCanonical())
5867 Canon = getCanonicalType(T: UnderlyingTy);
5868
5869 auto *newType = new (*this, alignof(MacroQualifiedType))
5870 MacroQualifiedType(UnderlyingTy, Canon, MacroII);
5871 Types.push_back(Elt: newType);
5872 return QualType(newType, 0);
5873}
5874
5875static ElaboratedTypeKeyword
5876getCanonicalElaboratedTypeKeyword(ElaboratedTypeKeyword Keyword) {
5877 switch (Keyword) {
5878 // These are just themselves.
5879 case ElaboratedTypeKeyword::None:
5880 case ElaboratedTypeKeyword::Struct:
5881 case ElaboratedTypeKeyword::Union:
5882 case ElaboratedTypeKeyword::Enum:
5883 case ElaboratedTypeKeyword::Interface:
5884 return Keyword;
5885
5886 // These are equivalent.
5887 case ElaboratedTypeKeyword::Typename:
5888 return ElaboratedTypeKeyword::None;
5889
5890 // These are functionally equivalent, so relying on their equivalence is
5891 // IFNDR. By making them equivalent, we disallow overloading, which at least
5892 // can produce a diagnostic.
5893 case ElaboratedTypeKeyword::Class:
5894 return ElaboratedTypeKeyword::Struct;
5895 }
5896 llvm_unreachable("unexpected keyword kind");
5897}
5898
5899QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword,
5900 NestedNameSpecifier *NNS,
5901 const IdentifierInfo *Name) const {
5902 llvm::FoldingSetNodeID ID;
5903 DependentNameType::Profile(ID, Keyword, NNS, Name);
5904
5905 void *InsertPos = nullptr;
5906 if (DependentNameType *T =
5907 DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos))
5908 return QualType(T, 0);
5909
5910 ElaboratedTypeKeyword CanonKeyword =
5911 getCanonicalElaboratedTypeKeyword(Keyword);
5912 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
5913
5914 QualType Canon;
5915 if (CanonKeyword != Keyword || CanonNNS != NNS) {
5916 Canon = getDependentNameType(Keyword: CanonKeyword, NNS: CanonNNS, Name);
5917 [[maybe_unused]] DependentNameType *T =
5918 DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
5919 assert(!T && "broken canonicalization");
5920 assert(Canon.isCanonical());
5921 }
5922
5923 DependentNameType *T = new (*this, alignof(DependentNameType))
5924 DependentNameType(Keyword, NNS, Name, Canon);
5925 Types.push_back(Elt: T);
5926 DependentNameTypes.InsertNode(N: T, InsertPos);
5927 return QualType(T, 0);
5928}
5929
5930QualType ASTContext::getDependentTemplateSpecializationType(
5931 ElaboratedTypeKeyword Keyword, const DependentTemplateStorage &Name,
5932 ArrayRef<TemplateArgumentLoc> Args) const {
5933 // TODO: avoid this copy
5934 SmallVector<TemplateArgument, 16> ArgCopy;
5935 for (unsigned I = 0, E = Args.size(); I != E; ++I)
5936 ArgCopy.push_back(Elt: Args[I].getArgument());
5937 return getDependentTemplateSpecializationType(Keyword, Name, Args: ArgCopy);
5938}
5939
5940QualType ASTContext::getDependentTemplateSpecializationType(
5941 ElaboratedTypeKeyword Keyword, const DependentTemplateStorage &Name,
5942 ArrayRef<TemplateArgument> Args, bool IsCanonical) const {
5943 llvm::FoldingSetNodeID ID;
5944 DependentTemplateSpecializationType::Profile(ID, Context: *this, Keyword, Name, Args);
5945
5946 void *InsertPos = nullptr;
5947 if (auto *T = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(
5948 ID, InsertPos))
5949 return QualType(T, 0);
5950
5951 NestedNameSpecifier *NNS = Name.getQualifier();
5952
5953 QualType Canon;
5954 if (!IsCanonical) {
5955 ElaboratedTypeKeyword CanonKeyword =
5956 getCanonicalElaboratedTypeKeyword(Keyword);
5957 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
5958 bool AnyNonCanonArgs = false;
5959 auto CanonArgs =
5960 ::getCanonicalTemplateArguments(C: *this, Args, AnyNonCanonArgs);
5961
5962 if (CanonKeyword != Keyword || AnyNonCanonArgs || CanonNNS != NNS ||
5963 !Name.hasTemplateKeyword()) {
5964 Canon = getDependentTemplateSpecializationType(
5965 Keyword: CanonKeyword, Name: {CanonNNS, Name.getName(), /*HasTemplateKeyword=*/true},
5966 Args: CanonArgs,
5967 /*IsCanonical=*/true);
5968 // Find the insert position again.
5969 [[maybe_unused]] auto *Nothing =
5970 DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID,
5971 InsertPos);
5972 assert(!Nothing && "canonical type broken");
5973 }
5974 } else {
5975 assert(Keyword == getCanonicalElaboratedTypeKeyword(Keyword));
5976 assert(Name.hasTemplateKeyword());
5977 assert(NNS == getCanonicalNestedNameSpecifier(NNS));
5978#ifndef NDEBUG
5979 for (const auto &Arg : Args)
5980 assert(Arg.structurallyEquals(getCanonicalTemplateArgument(Arg)));
5981#endif
5982 }
5983 void *Mem = Allocate(Size: (sizeof(DependentTemplateSpecializationType) +
5984 sizeof(TemplateArgument) * Args.size()),
5985 Align: alignof(DependentTemplateSpecializationType));
5986 auto *T =
5987 new (Mem) DependentTemplateSpecializationType(Keyword, Name, Args, Canon);
5988 Types.push_back(Elt: T);
5989 DependentTemplateSpecializationTypes.InsertNode(N: T, InsertPos);
5990 return QualType(T, 0);
5991}
5992
5993TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) const {
5994 TemplateArgument Arg;
5995 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Val: Param)) {
5996 QualType ArgType = getTypeDeclType(Decl: TTP);
5997 if (TTP->isParameterPack())
5998 ArgType = getPackExpansionType(Pattern: ArgType, NumExpansions: std::nullopt);
5999
6000 Arg = TemplateArgument(ArgType);
6001 } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Val: Param)) {
6002 QualType T =
6003 NTTP->getType().getNonPackExpansionType().getNonLValueExprType(Context: *this);
6004 // For class NTTPs, ensure we include the 'const' so the type matches that
6005 // of a real template argument.
6006 // FIXME: It would be more faithful to model this as something like an
6007 // lvalue-to-rvalue conversion applied to a const-qualified lvalue.
6008 ExprValueKind VK;
6009 if (T->isRecordType()) {
6010 // C++ [temp.param]p8: An id-expression naming a non-type
6011 // template-parameter of class type T denotes a static storage duration
6012 // object of type const T.
6013 T.addConst();
6014 VK = VK_LValue;
6015 } else {
6016 VK = Expr::getValueKindForType(T: NTTP->getType());
6017 }
6018 Expr *E = new (*this)
6019 DeclRefExpr(*this, NTTP, /*RefersToEnclosingVariableOrCapture=*/false,
6020 T, VK, NTTP->getLocation());
6021
6022 if (NTTP->isParameterPack())
6023 E = new (*this) PackExpansionExpr(E, NTTP->getLocation(), std::nullopt);
6024 Arg = TemplateArgument(E, /*IsCanonical=*/false);
6025 } else {
6026 auto *TTP = cast<TemplateTemplateParmDecl>(Val: Param);
6027 TemplateName Name = getQualifiedTemplateName(
6028 NNS: nullptr, /*TemplateKeyword=*/false, Template: TemplateName(TTP));
6029 if (TTP->isParameterPack())
6030 Arg = TemplateArgument(Name, /*NumExpansions=*/std::nullopt);
6031 else
6032 Arg = TemplateArgument(Name);
6033 }
6034
6035 if (Param->isTemplateParameterPack())
6036 Arg =
6037 TemplateArgument::CreatePackCopy(Context&: const_cast<ASTContext &>(*this), Args: Arg);
6038
6039 return Arg;
6040}
6041
6042QualType ASTContext::getPackExpansionType(QualType Pattern,
6043 UnsignedOrNone NumExpansions,
6044 bool ExpectPackInType) const {
6045 assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) &&
6046 "Pack expansions must expand one or more parameter packs");
6047
6048 llvm::FoldingSetNodeID ID;
6049 PackExpansionType::Profile(ID, Pattern, NumExpansions);
6050
6051 void *InsertPos = nullptr;
6052 PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
6053 if (T)
6054 return QualType(T, 0);
6055
6056 QualType Canon;
6057 if (!Pattern.isCanonical()) {
6058 Canon = getPackExpansionType(Pattern: getCanonicalType(T: Pattern), NumExpansions,
6059 /*ExpectPackInType=*/false);
6060
6061 // Find the insert position again, in case we inserted an element into
6062 // PackExpansionTypes and invalidated our insert position.
6063 PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
6064 }
6065
6066 T = new (*this, alignof(PackExpansionType))
6067 PackExpansionType(Pattern, Canon, NumExpansions);
6068 Types.push_back(Elt: T);
6069 PackExpansionTypes.InsertNode(N: T, InsertPos);
6070 return QualType(T, 0);
6071}
6072
6073/// CmpProtocolNames - Comparison predicate for sorting protocols
6074/// alphabetically.
6075static int CmpProtocolNames(ObjCProtocolDecl *const *LHS,
6076 ObjCProtocolDecl *const *RHS) {
6077 return DeclarationName::compare(LHS: (*LHS)->getDeclName(), RHS: (*RHS)->getDeclName());
6078}
6079
6080static bool areSortedAndUniqued(ArrayRef<ObjCProtocolDecl *> Protocols) {
6081 if (Protocols.empty()) return true;
6082
6083 if (Protocols[0]->getCanonicalDecl() != Protocols[0])
6084 return false;
6085
6086 for (unsigned i = 1; i != Protocols.size(); ++i)
6087 if (CmpProtocolNames(LHS: &Protocols[i - 1], RHS: &Protocols[i]) >= 0 ||
6088 Protocols[i]->getCanonicalDecl() != Protocols[i])
6089 return false;
6090 return true;
6091}
6092
6093static void
6094SortAndUniqueProtocols(SmallVectorImpl<ObjCProtocolDecl *> &Protocols) {
6095 // Sort protocols, keyed by name.
6096 llvm::array_pod_sort(Start: Protocols.begin(), End: Protocols.end(), Compare: CmpProtocolNames);
6097
6098 // Canonicalize.
6099 for (ObjCProtocolDecl *&P : Protocols)
6100 P = P->getCanonicalDecl();
6101
6102 // Remove duplicates.
6103 auto ProtocolsEnd = llvm::unique(R&: Protocols);
6104 Protocols.erase(CS: ProtocolsEnd, CE: Protocols.end());
6105}
6106
6107QualType ASTContext::getObjCObjectType(QualType BaseType,
6108 ObjCProtocolDecl * const *Protocols,
6109 unsigned NumProtocols) const {
6110 return getObjCObjectType(Base: BaseType, typeArgs: {}, protocols: ArrayRef(Protocols, NumProtocols),
6111 /*isKindOf=*/false);
6112}
6113
6114QualType ASTContext::getObjCObjectType(
6115 QualType baseType,
6116 ArrayRef<QualType> typeArgs,
6117 ArrayRef<ObjCProtocolDecl *> protocols,
6118 bool isKindOf) const {
6119 // If the base type is an interface and there aren't any protocols or
6120 // type arguments to add, then the interface type will do just fine.
6121 if (typeArgs.empty() && protocols.empty() && !isKindOf &&
6122 isa<ObjCInterfaceType>(Val: baseType))
6123 return baseType;
6124
6125 // Look in the folding set for an existing type.
6126 llvm::FoldingSetNodeID ID;
6127 ObjCObjectTypeImpl::Profile(ID, Base: baseType, typeArgs, protocols, isKindOf);
6128 void *InsertPos = nullptr;
6129 if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos))
6130 return QualType(QT, 0);
6131
6132 // Determine the type arguments to be used for canonicalization,
6133 // which may be explicitly specified here or written on the base
6134 // type.
6135 ArrayRef<QualType> effectiveTypeArgs = typeArgs;
6136 if (effectiveTypeArgs.empty()) {
6137 if (const auto *baseObject = baseType->getAs<ObjCObjectType>())
6138 effectiveTypeArgs = baseObject->getTypeArgs();
6139 }
6140
6141 // Build the canonical type, which has the canonical base type and a
6142 // sorted-and-uniqued list of protocols and the type arguments
6143 // canonicalized.
6144 QualType canonical;
6145 bool typeArgsAreCanonical = llvm::all_of(
6146 Range&: effectiveTypeArgs, P: [&](QualType type) { return type.isCanonical(); });
6147 bool protocolsSorted = areSortedAndUniqued(Protocols: protocols);
6148 if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) {
6149 // Determine the canonical type arguments.
6150 ArrayRef<QualType> canonTypeArgs;
6151 SmallVector<QualType, 4> canonTypeArgsVec;
6152 if (!typeArgsAreCanonical) {
6153 canonTypeArgsVec.reserve(N: effectiveTypeArgs.size());
6154 for (auto typeArg : effectiveTypeArgs)
6155 canonTypeArgsVec.push_back(Elt: getCanonicalType(T: typeArg));
6156 canonTypeArgs = canonTypeArgsVec;
6157 } else {
6158 canonTypeArgs = effectiveTypeArgs;
6159 }
6160
6161 ArrayRef<ObjCProtocolDecl *> canonProtocols;
6162 SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec;
6163 if (!protocolsSorted) {
6164 canonProtocolsVec.append(in_start: protocols.begin(), in_end: protocols.end());
6165 SortAndUniqueProtocols(Protocols&: canonProtocolsVec);
6166 canonProtocols = canonProtocolsVec;
6167 } else {
6168 canonProtocols = protocols;
6169 }
6170
6171 canonical = getObjCObjectType(baseType: getCanonicalType(T: baseType), typeArgs: canonTypeArgs,
6172 protocols: canonProtocols, isKindOf);
6173
6174 // Regenerate InsertPos.
6175 ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos);
6176 }
6177
6178 unsigned size = sizeof(ObjCObjectTypeImpl);
6179 size += typeArgs.size() * sizeof(QualType);
6180 size += protocols.size() * sizeof(ObjCProtocolDecl *);
6181 void *mem = Allocate(Size: size, Align: alignof(ObjCObjectTypeImpl));
6182 auto *T =
6183 new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols,
6184 isKindOf);
6185
6186 Types.push_back(Elt: T);
6187 ObjCObjectTypes.InsertNode(N: T, InsertPos);
6188 return QualType(T, 0);
6189}
6190
6191/// Apply Objective-C protocol qualifiers to the given type.
6192/// If this is for the canonical type of a type parameter, we can apply
6193/// protocol qualifiers on the ObjCObjectPointerType.
6194QualType
6195ASTContext::applyObjCProtocolQualifiers(QualType type,
6196 ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError,
6197 bool allowOnPointerType) const {
6198 hasError = false;
6199
6200 if (const auto *objT = dyn_cast<ObjCTypeParamType>(Val: type.getTypePtr())) {
6201 return getObjCTypeParamType(Decl: objT->getDecl(), protocols);
6202 }
6203
6204 // Apply protocol qualifiers to ObjCObjectPointerType.
6205 if (allowOnPointerType) {
6206 if (const auto *objPtr =
6207 dyn_cast<ObjCObjectPointerType>(Val: type.getTypePtr())) {
6208 const ObjCObjectType *objT = objPtr->getObjectType();
6209 // Merge protocol lists and construct ObjCObjectType.
6210 SmallVector<ObjCProtocolDecl*, 8> protocolsVec;
6211 protocolsVec.append(in_start: objT->qual_begin(),
6212 in_end: objT->qual_end());
6213 protocolsVec.append(in_start: protocols.begin(), in_end: protocols.end());
6214 ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec;
6215 type = getObjCObjectType(
6216 baseType: objT->getBaseType(),
6217 typeArgs: objT->getTypeArgsAsWritten(),
6218 protocols,
6219 isKindOf: objT->isKindOfTypeAsWritten());
6220 return getObjCObjectPointerType(OIT: type);
6221 }
6222 }
6223
6224 // Apply protocol qualifiers to ObjCObjectType.
6225 if (const auto *objT = dyn_cast<ObjCObjectType>(Val: type.getTypePtr())){
6226 // FIXME: Check for protocols to which the class type is already
6227 // known to conform.
6228
6229 return getObjCObjectType(baseType: objT->getBaseType(),
6230 typeArgs: objT->getTypeArgsAsWritten(),
6231 protocols,
6232 isKindOf: objT->isKindOfTypeAsWritten());
6233 }
6234
6235 // If the canonical type is ObjCObjectType, ...
6236 if (type->isObjCObjectType()) {
6237 // Silently overwrite any existing protocol qualifiers.
6238 // TODO: determine whether that's the right thing to do.
6239
6240 // FIXME: Check for protocols to which the class type is already
6241 // known to conform.
6242 return getObjCObjectType(baseType: type, typeArgs: {}, protocols, isKindOf: false);
6243 }
6244
6245 // id<protocol-list>
6246 if (type->isObjCIdType()) {
6247 const auto *objPtr = type->castAs<ObjCObjectPointerType>();
6248 type = getObjCObjectType(baseType: ObjCBuiltinIdTy, typeArgs: {}, protocols,
6249 isKindOf: objPtr->isKindOfType());
6250 return getObjCObjectPointerType(OIT: type);
6251 }
6252
6253 // Class<protocol-list>
6254 if (type->isObjCClassType()) {
6255 const auto *objPtr = type->castAs<ObjCObjectPointerType>();
6256 type = getObjCObjectType(baseType: ObjCBuiltinClassTy, typeArgs: {}, protocols,
6257 isKindOf: objPtr->isKindOfType());
6258 return getObjCObjectPointerType(OIT: type);
6259 }
6260
6261 hasError = true;
6262 return type;
6263}
6264
6265QualType
6266ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl,
6267 ArrayRef<ObjCProtocolDecl *> protocols) const {
6268 // Look in the folding set for an existing type.
6269 llvm::FoldingSetNodeID ID;
6270 ObjCTypeParamType::Profile(ID, OTPDecl: Decl, CanonicalType: Decl->getUnderlyingType(), protocols);
6271 void *InsertPos = nullptr;
6272 if (ObjCTypeParamType *TypeParam =
6273 ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos))
6274 return QualType(TypeParam, 0);
6275
6276 // We canonicalize to the underlying type.
6277 QualType Canonical = getCanonicalType(T: Decl->getUnderlyingType());
6278 if (!protocols.empty()) {
6279 // Apply the protocol qualifers.
6280 bool hasError;
6281 Canonical = getCanonicalType(T: applyObjCProtocolQualifiers(
6282 type: Canonical, protocols, hasError, allowOnPointerType: true /*allowOnPointerType*/));
6283 assert(!hasError && "Error when apply protocol qualifier to bound type");
6284 }
6285
6286 unsigned size = sizeof(ObjCTypeParamType);
6287 size += protocols.size() * sizeof(ObjCProtocolDecl *);
6288 void *mem = Allocate(Size: size, Align: alignof(ObjCTypeParamType));
6289 auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols);
6290
6291 Types.push_back(Elt: newType);
6292 ObjCTypeParamTypes.InsertNode(N: newType, InsertPos);
6293 return QualType(newType, 0);
6294}
6295
6296void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig,
6297 ObjCTypeParamDecl *New) const {
6298 New->setTypeSourceInfo(getTrivialTypeSourceInfo(T: Orig->getUnderlyingType()));
6299 // Update TypeForDecl after updating TypeSourceInfo.
6300 auto NewTypeParamTy = cast<ObjCTypeParamType>(Val: New->getTypeForDecl());
6301 SmallVector<ObjCProtocolDecl *, 8> protocols;
6302 protocols.append(in_start: NewTypeParamTy->qual_begin(), in_end: NewTypeParamTy->qual_end());
6303 QualType UpdatedTy = getObjCTypeParamType(Decl: New, protocols);
6304 New->setTypeForDecl(UpdatedTy.getTypePtr());
6305}
6306
6307/// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's
6308/// protocol list adopt all protocols in QT's qualified-id protocol
6309/// list.
6310bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT,
6311 ObjCInterfaceDecl *IC) {
6312 if (!QT->isObjCQualifiedIdType())
6313 return false;
6314
6315 if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) {
6316 // If both the right and left sides have qualifiers.
6317 for (auto *Proto : OPT->quals()) {
6318 if (!IC->ClassImplementsProtocol(lProto: Proto, lookupCategory: false))
6319 return false;
6320 }
6321 return true;
6322 }
6323 return false;
6324}
6325
6326/// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in
6327/// QT's qualified-id protocol list adopt all protocols in IDecl's list
6328/// of protocols.
6329bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT,
6330 ObjCInterfaceDecl *IDecl) {
6331 if (!QT->isObjCQualifiedIdType())
6332 return false;
6333 const auto *OPT = QT->getAs<ObjCObjectPointerType>();
6334 if (!OPT)
6335 return false;
6336 if (!IDecl->hasDefinition())
6337 return false;
6338 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols;
6339 CollectInheritedProtocols(CDecl: IDecl, Protocols&: InheritedProtocols);
6340 if (InheritedProtocols.empty())
6341 return false;
6342 // Check that if every protocol in list of id<plist> conforms to a protocol
6343 // of IDecl's, then bridge casting is ok.
6344 bool Conforms = false;
6345 for (auto *Proto : OPT->quals()) {
6346 Conforms = false;
6347 for (auto *PI : InheritedProtocols) {
6348 if (ProtocolCompatibleWithProtocol(lProto: Proto, rProto: PI)) {
6349 Conforms = true;
6350 break;
6351 }
6352 }
6353 if (!Conforms)
6354 break;
6355 }
6356 if (Conforms)
6357 return true;
6358
6359 for (auto *PI : InheritedProtocols) {
6360 // If both the right and left sides have qualifiers.
6361 bool Adopts = false;
6362 for (auto *Proto : OPT->quals()) {
6363 // return 'true' if 'PI' is in the inheritance hierarchy of Proto
6364 if ((Adopts = ProtocolCompatibleWithProtocol(lProto: PI, rProto: Proto)))
6365 break;
6366 }
6367 if (!Adopts)
6368 return false;
6369 }
6370 return true;
6371}
6372
6373/// getObjCObjectPointerType - Return a ObjCObjectPointerType type for
6374/// the given object type.
6375QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const {
6376 llvm::FoldingSetNodeID ID;
6377 ObjCObjectPointerType::Profile(ID, T: ObjectT);
6378
6379 void *InsertPos = nullptr;
6380 if (ObjCObjectPointerType *QT =
6381 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
6382 return QualType(QT, 0);
6383
6384 // Find the canonical object type.
6385 QualType Canonical;
6386 if (!ObjectT.isCanonical()) {
6387 Canonical = getObjCObjectPointerType(ObjectT: getCanonicalType(T: ObjectT));
6388
6389 // Regenerate InsertPos.
6390 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
6391 }
6392
6393 // No match.
6394 void *Mem =
6395 Allocate(Size: sizeof(ObjCObjectPointerType), Align: alignof(ObjCObjectPointerType));
6396 auto *QType =
6397 new (Mem) ObjCObjectPointerType(Canonical, ObjectT);
6398
6399 Types.push_back(Elt: QType);
6400 ObjCObjectPointerTypes.InsertNode(N: QType, InsertPos);
6401 return QualType(QType, 0);
6402}
6403
6404/// getObjCInterfaceType - Return the unique reference to the type for the
6405/// specified ObjC interface decl. The list of protocols is optional.
6406QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl,
6407 ObjCInterfaceDecl *PrevDecl) const {
6408 if (Decl->TypeForDecl)
6409 return QualType(Decl->TypeForDecl, 0);
6410
6411 if (PrevDecl) {
6412 assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl");
6413 Decl->TypeForDecl = PrevDecl->TypeForDecl;
6414 return QualType(PrevDecl->TypeForDecl, 0);
6415 }
6416
6417 // Prefer the definition, if there is one.
6418 if (const ObjCInterfaceDecl *Def = Decl->getDefinition())
6419 Decl = Def;
6420
6421 void *Mem = Allocate(Size: sizeof(ObjCInterfaceType), Align: alignof(ObjCInterfaceType));
6422 auto *T = new (Mem) ObjCInterfaceType(Decl);
6423 Decl->TypeForDecl = T;
6424 Types.push_back(Elt: T);
6425 return QualType(T, 0);
6426}
6427
6428/// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique
6429/// TypeOfExprType AST's (since expression's are never shared). For example,
6430/// multiple declarations that refer to "typeof(x)" all contain different
6431/// DeclRefExpr's. This doesn't effect the type checker, since it operates
6432/// on canonical type's (which are always unique).
6433QualType ASTContext::getTypeOfExprType(Expr *tofExpr, TypeOfKind Kind) const {
6434 TypeOfExprType *toe;
6435 if (tofExpr->isTypeDependent()) {
6436 llvm::FoldingSetNodeID ID;
6437 DependentTypeOfExprType::Profile(ID, Context: *this, E: tofExpr,
6438 IsUnqual: Kind == TypeOfKind::Unqualified);
6439
6440 void *InsertPos = nullptr;
6441 DependentTypeOfExprType *Canon =
6442 DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos);
6443 if (Canon) {
6444 // We already have a "canonical" version of an identical, dependent
6445 // typeof(expr) type. Use that as our canonical type.
6446 toe = new (*this, alignof(TypeOfExprType)) TypeOfExprType(
6447 *this, tofExpr, Kind, QualType((TypeOfExprType *)Canon, 0));
6448 } else {
6449 // Build a new, canonical typeof(expr) type.
6450 Canon = new (*this, alignof(DependentTypeOfExprType))
6451 DependentTypeOfExprType(*this, tofExpr, Kind);
6452 DependentTypeOfExprTypes.InsertNode(N: Canon, InsertPos);
6453 toe = Canon;
6454 }
6455 } else {
6456 QualType Canonical = getCanonicalType(T: tofExpr->getType());
6457 toe = new (*this, alignof(TypeOfExprType))
6458 TypeOfExprType(*this, tofExpr, Kind, Canonical);
6459 }
6460 Types.push_back(Elt: toe);
6461 return QualType(toe, 0);
6462}
6463
6464/// getTypeOfType - Unlike many "get<Type>" functions, we don't unique
6465/// TypeOfType nodes. The only motivation to unique these nodes would be
6466/// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be
6467/// an issue. This doesn't affect the type checker, since it operates
6468/// on canonical types (which are always unique).
6469QualType ASTContext::getTypeOfType(QualType tofType, TypeOfKind Kind) const {
6470 QualType Canonical = getCanonicalType(T: tofType);
6471 auto *tot = new (*this, alignof(TypeOfType))
6472 TypeOfType(*this, tofType, Canonical, Kind);
6473 Types.push_back(Elt: tot);
6474 return QualType(tot, 0);
6475}
6476
6477/// getReferenceQualifiedType - Given an expr, will return the type for
6478/// that expression, as in [dcl.type.simple]p4 but without taking id-expressions
6479/// and class member access into account.
6480QualType ASTContext::getReferenceQualifiedType(const Expr *E) const {
6481 // C++11 [dcl.type.simple]p4:
6482 // [...]
6483 QualType T = E->getType();
6484 switch (E->getValueKind()) {
6485 // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the
6486 // type of e;
6487 case VK_XValue:
6488 return getRValueReferenceType(T);
6489 // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the
6490 // type of e;
6491 case VK_LValue:
6492 return getLValueReferenceType(T);
6493 // - otherwise, decltype(e) is the type of e.
6494 case VK_PRValue:
6495 return T;
6496 }
6497 llvm_unreachable("Unknown value kind");
6498}
6499
6500/// Unlike many "get<Type>" functions, we don't unique DecltypeType
6501/// nodes. This would never be helpful, since each such type has its own
6502/// expression, and would not give a significant memory saving, since there
6503/// is an Expr tree under each such type.
6504QualType ASTContext::getDecltypeType(Expr *E, QualType UnderlyingType) const {
6505 // C++11 [temp.type]p2:
6506 // If an expression e involves a template parameter, decltype(e) denotes a
6507 // unique dependent type. Two such decltype-specifiers refer to the same
6508 // type only if their expressions are equivalent (14.5.6.1).
6509 QualType CanonType;
6510 if (!E->isInstantiationDependent()) {
6511 CanonType = getCanonicalType(T: UnderlyingType);
6512 } else if (!UnderlyingType.isNull()) {
6513 CanonType = getDecltypeType(E, UnderlyingType: QualType());
6514 } else {
6515 llvm::FoldingSetNodeID ID;
6516 DependentDecltypeType::Profile(ID, Context: *this, E);
6517
6518 void *InsertPos = nullptr;
6519 if (DependentDecltypeType *Canon =
6520 DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos))
6521 return QualType(Canon, 0);
6522
6523 // Build a new, canonical decltype(expr) type.
6524 auto *DT =
6525 new (*this, alignof(DependentDecltypeType)) DependentDecltypeType(E);
6526 DependentDecltypeTypes.InsertNode(N: DT, InsertPos);
6527 Types.push_back(Elt: DT);
6528 return QualType(DT, 0);
6529 }
6530 auto *DT = new (*this, alignof(DecltypeType))
6531 DecltypeType(E, UnderlyingType, CanonType);
6532 Types.push_back(Elt: DT);
6533 return QualType(DT, 0);
6534}
6535
6536QualType ASTContext::getPackIndexingType(QualType Pattern, Expr *IndexExpr,
6537 bool FullySubstituted,
6538 ArrayRef<QualType> Expansions,
6539 UnsignedOrNone Index) const {
6540 QualType Canonical;
6541 if (FullySubstituted && Index) {
6542 Canonical = getCanonicalType(T: Expansions[*Index]);
6543 } else {
6544 llvm::FoldingSetNodeID ID;
6545 PackIndexingType::Profile(ID, Context: *this, Pattern: Pattern.getCanonicalType(), E: IndexExpr,
6546 FullySubstituted, Expansions);
6547 void *InsertPos = nullptr;
6548 PackIndexingType *Canon =
6549 DependentPackIndexingTypes.FindNodeOrInsertPos(ID, InsertPos);
6550 if (!Canon) {
6551 void *Mem = Allocate(
6552 Size: PackIndexingType::totalSizeToAlloc<QualType>(Counts: Expansions.size()),
6553 Align: TypeAlignment);
6554 Canon =
6555 new (Mem) PackIndexingType(QualType(), Pattern.getCanonicalType(),
6556 IndexExpr, FullySubstituted, Expansions);
6557 DependentPackIndexingTypes.InsertNode(N: Canon, InsertPos);
6558 }
6559 Canonical = QualType(Canon, 0);
6560 }
6561
6562 void *Mem =
6563 Allocate(Size: PackIndexingType::totalSizeToAlloc<QualType>(Counts: Expansions.size()),
6564 Align: TypeAlignment);
6565 auto *T = new (Mem) PackIndexingType(Canonical, Pattern, IndexExpr,
6566 FullySubstituted, Expansions);
6567 Types.push_back(Elt: T);
6568 return QualType(T, 0);
6569}
6570
6571/// getUnaryTransformationType - We don't unique these, since the memory
6572/// savings are minimal and these are rare.
6573QualType
6574ASTContext::getUnaryTransformType(QualType BaseType, QualType UnderlyingType,
6575 UnaryTransformType::UTTKind Kind) const {
6576
6577 llvm::FoldingSetNodeID ID;
6578 UnaryTransformType::Profile(ID, BaseType, UnderlyingType, UKind: Kind);
6579
6580 void *InsertPos = nullptr;
6581 if (UnaryTransformType *UT =
6582 UnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos))
6583 return QualType(UT, 0);
6584
6585 QualType CanonType;
6586 if (!BaseType->isDependentType()) {
6587 CanonType = UnderlyingType.getCanonicalType();
6588 } else {
6589 assert(UnderlyingType.isNull() || BaseType == UnderlyingType);
6590 UnderlyingType = QualType();
6591 if (QualType CanonBase = BaseType.getCanonicalType();
6592 BaseType != CanonBase) {
6593 CanonType = getUnaryTransformType(BaseType: CanonBase, UnderlyingType: QualType(), Kind);
6594 assert(CanonType.isCanonical());
6595
6596 // Find the insertion position again.
6597 [[maybe_unused]] UnaryTransformType *UT =
6598 UnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos);
6599 assert(!UT && "broken canonicalization");
6600 }
6601 }
6602
6603 auto *UT = new (*this, alignof(UnaryTransformType))
6604 UnaryTransformType(BaseType, UnderlyingType, Kind, CanonType);
6605 UnaryTransformTypes.InsertNode(N: UT, InsertPos);
6606 Types.push_back(Elt: UT);
6607 return QualType(UT, 0);
6608}
6609
6610QualType ASTContext::getAutoTypeInternal(
6611 QualType DeducedType, AutoTypeKeyword Keyword, bool IsDependent,
6612 bool IsPack, ConceptDecl *TypeConstraintConcept,
6613 ArrayRef<TemplateArgument> TypeConstraintArgs, bool IsCanon) const {
6614 if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto &&
6615 !TypeConstraintConcept && !IsDependent)
6616 return getAutoDeductType();
6617
6618 // Look in the folding set for an existing type.
6619 llvm::FoldingSetNodeID ID;
6620 bool IsDeducedDependent =
6621 !DeducedType.isNull() && DeducedType->isDependentType();
6622 AutoType::Profile(ID, Context: *this, Deduced: DeducedType, Keyword,
6623 IsDependent: IsDependent || IsDeducedDependent, CD: TypeConstraintConcept,
6624 Arguments: TypeConstraintArgs);
6625 if (auto const AT_iter = AutoTypes.find(Val: ID); AT_iter != AutoTypes.end())
6626 return QualType(AT_iter->getSecond(), 0);
6627
6628 QualType Canon;
6629 if (!IsCanon) {
6630 if (!DeducedType.isNull()) {
6631 Canon = DeducedType.getCanonicalType();
6632 } else if (TypeConstraintConcept) {
6633 bool AnyNonCanonArgs = false;
6634 ConceptDecl *CanonicalConcept = TypeConstraintConcept->getCanonicalDecl();
6635 auto CanonicalConceptArgs = ::getCanonicalTemplateArguments(
6636 C: *this, Args: TypeConstraintArgs, AnyNonCanonArgs);
6637 if (CanonicalConcept != TypeConstraintConcept || AnyNonCanonArgs) {
6638 Canon = getAutoTypeInternal(DeducedType: QualType(), Keyword, IsDependent, IsPack,
6639 TypeConstraintConcept: CanonicalConcept, TypeConstraintArgs: CanonicalConceptArgs,
6640 /*IsCanon=*/true);
6641 }
6642 }
6643 }
6644
6645 void *Mem = Allocate(Size: sizeof(AutoType) +
6646 sizeof(TemplateArgument) * TypeConstraintArgs.size(),
6647 Align: alignof(AutoType));
6648 auto *AT = new (Mem) AutoType(
6649 DeducedType, Keyword,
6650 (IsDependent ? TypeDependence::DependentInstantiation
6651 : TypeDependence::None) |
6652 (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None),
6653 Canon, TypeConstraintConcept, TypeConstraintArgs);
6654#ifndef NDEBUG
6655 llvm::FoldingSetNodeID InsertedID;
6656 AT->Profile(InsertedID, *this);
6657 assert(InsertedID == ID && "ID does not match");
6658#endif
6659 Types.push_back(Elt: AT);
6660 AutoTypes.try_emplace(Key: ID, Args&: AT);
6661 return QualType(AT, 0);
6662}
6663
6664/// getAutoType - Return the uniqued reference to the 'auto' type which has been
6665/// deduced to the given type, or to the canonical undeduced 'auto' type, or the
6666/// canonical deduced-but-dependent 'auto' type.
6667QualType
6668ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword,
6669 bool IsDependent, bool IsPack,
6670 ConceptDecl *TypeConstraintConcept,
6671 ArrayRef<TemplateArgument> TypeConstraintArgs) const {
6672 assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack");
6673 assert((!IsDependent || DeducedType.isNull()) &&
6674 "A dependent auto should be undeduced");
6675 return getAutoTypeInternal(DeducedType, Keyword, IsDependent, IsPack,
6676 TypeConstraintConcept, TypeConstraintArgs);
6677}
6678
6679QualType ASTContext::getUnconstrainedType(QualType T) const {
6680 QualType CanonT = T.getNonPackExpansionType().getCanonicalType();
6681
6682 // Remove a type-constraint from a top-level auto or decltype(auto).
6683 if (auto *AT = CanonT->getAs<AutoType>()) {
6684 if (!AT->isConstrained())
6685 return T;
6686 return getQualifiedType(T: getAutoType(DeducedType: QualType(), Keyword: AT->getKeyword(),
6687 IsDependent: AT->isDependentType(),
6688 IsPack: AT->containsUnexpandedParameterPack()),
6689 Qs: T.getQualifiers());
6690 }
6691
6692 // FIXME: We only support constrained auto at the top level in the type of a
6693 // non-type template parameter at the moment. Once we lift that restriction,
6694 // we'll need to recursively build types containing auto here.
6695 assert(!CanonT->getContainedAutoType() ||
6696 !CanonT->getContainedAutoType()->isConstrained());
6697 return T;
6698}
6699
6700QualType ASTContext::getDeducedTemplateSpecializationTypeInternal(
6701 TemplateName Template, QualType DeducedType, bool IsDependent,
6702 QualType Canon) const {
6703 // Look in the folding set for an existing type.
6704 void *InsertPos = nullptr;
6705 llvm::FoldingSetNodeID ID;
6706 DeducedTemplateSpecializationType::Profile(ID, Template, Deduced: DeducedType,
6707 IsDependent);
6708 if (DeducedTemplateSpecializationType *DTST =
6709 DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos))
6710 return QualType(DTST, 0);
6711
6712 auto *DTST = new (*this, alignof(DeducedTemplateSpecializationType))
6713 DeducedTemplateSpecializationType(Template, DeducedType, IsDependent,
6714 Canon);
6715
6716#ifndef NDEBUG
6717 llvm::FoldingSetNodeID TempID;
6718 DTST->Profile(TempID);
6719 assert(ID == TempID && "ID does not match");
6720#endif
6721 Types.push_back(Elt: DTST);
6722 DeducedTemplateSpecializationTypes.InsertNode(N: DTST, InsertPos);
6723 return QualType(DTST, 0);
6724}
6725
6726/// Return the uniqued reference to the deduced template specialization type
6727/// which has been deduced to the given type, or to the canonical undeduced
6728/// such type, or the canonical deduced-but-dependent such type.
6729QualType ASTContext::getDeducedTemplateSpecializationType(
6730 TemplateName Template, QualType DeducedType, bool IsDependent) const {
6731 QualType Canon = DeducedType.isNull()
6732 ? getDeducedTemplateSpecializationTypeInternal(
6733 Template: getCanonicalTemplateName(Name: Template), DeducedType: QualType(),
6734 IsDependent, Canon: QualType())
6735 : DeducedType.getCanonicalType();
6736 return getDeducedTemplateSpecializationTypeInternal(Template, DeducedType,
6737 IsDependent, Canon);
6738}
6739
6740/// getAtomicType - Return the uniqued reference to the atomic type for
6741/// the given value type.
6742QualType ASTContext::getAtomicType(QualType T) const {
6743 // Unique pointers, to guarantee there is only one pointer of a particular
6744 // structure.
6745 llvm::FoldingSetNodeID ID;
6746 AtomicType::Profile(ID, T);
6747
6748 void *InsertPos = nullptr;
6749 if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos))
6750 return QualType(AT, 0);
6751
6752 // If the atomic value type isn't canonical, this won't be a canonical type
6753 // either, so fill in the canonical type field.
6754 QualType Canonical;
6755 if (!T.isCanonical()) {
6756 Canonical = getAtomicType(T: getCanonicalType(T));
6757
6758 // Get the new insert position for the node we care about.
6759 AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos);
6760 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
6761 }
6762 auto *New = new (*this, alignof(AtomicType)) AtomicType(T, Canonical);
6763 Types.push_back(Elt: New);
6764 AtomicTypes.InsertNode(N: New, InsertPos);
6765 return QualType(New, 0);
6766}
6767
6768/// getAutoDeductType - Get type pattern for deducing against 'auto'.
6769QualType ASTContext::getAutoDeductType() const {
6770 if (AutoDeductTy.isNull())
6771 AutoDeductTy = QualType(new (*this, alignof(AutoType))
6772 AutoType(QualType(), AutoTypeKeyword::Auto,
6773 TypeDependence::None, QualType(),
6774 /*concept*/ nullptr, /*args*/ {}),
6775 0);
6776 return AutoDeductTy;
6777}
6778
6779/// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'.
6780QualType ASTContext::getAutoRRefDeductType() const {
6781 if (AutoRRefDeductTy.isNull())
6782 AutoRRefDeductTy = getRValueReferenceType(T: getAutoDeductType());
6783 assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern");
6784 return AutoRRefDeductTy;
6785}
6786
6787/// getTagDeclType - Return the unique reference to the type for the
6788/// specified TagDecl (struct/union/class/enum) decl.
6789QualType ASTContext::getTagDeclType(const TagDecl *Decl) const {
6790 assert(Decl);
6791 // FIXME: What is the design on getTagDeclType when it requires casting
6792 // away const? mutable?
6793 return getTypeDeclType(Decl: const_cast<TagDecl*>(Decl));
6794}
6795
6796/// getSizeType - Return the unique type for "size_t" (C99 7.17), the result
6797/// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and
6798/// needs to agree with the definition in <stddef.h>.
6799CanQualType ASTContext::getSizeType() const {
6800 return getFromTargetType(Type: Target->getSizeType());
6801}
6802
6803/// Return the unique signed counterpart of the integer type
6804/// corresponding to size_t.
6805CanQualType ASTContext::getSignedSizeType() const {
6806 return getFromTargetType(Type: Target->getSignedSizeType());
6807}
6808
6809/// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5).
6810CanQualType ASTContext::getIntMaxType() const {
6811 return getFromTargetType(Type: Target->getIntMaxType());
6812}
6813
6814/// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5).
6815CanQualType ASTContext::getUIntMaxType() const {
6816 return getFromTargetType(Type: Target->getUIntMaxType());
6817}
6818
6819/// getSignedWCharType - Return the type of "signed wchar_t".
6820/// Used when in C++, as a GCC extension.
6821QualType ASTContext::getSignedWCharType() const {
6822 // FIXME: derive from "Target" ?
6823 return WCharTy;
6824}
6825
6826/// getUnsignedWCharType - Return the type of "unsigned wchar_t".
6827/// Used when in C++, as a GCC extension.
6828QualType ASTContext::getUnsignedWCharType() const {
6829 // FIXME: derive from "Target" ?
6830 return UnsignedIntTy;
6831}
6832
6833QualType ASTContext::getIntPtrType() const {
6834 return getFromTargetType(Type: Target->getIntPtrType());
6835}
6836
6837QualType ASTContext::getUIntPtrType() const {
6838 return getCorrespondingUnsignedType(T: getIntPtrType());
6839}
6840
6841/// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17)
6842/// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
6843QualType ASTContext::getPointerDiffType() const {
6844 return getFromTargetType(Type: Target->getPtrDiffType(AddrSpace: LangAS::Default));
6845}
6846
6847/// Return the unique unsigned counterpart of "ptrdiff_t"
6848/// integer type. The standard (C11 7.21.6.1p7) refers to this type
6849/// in the definition of %tu format specifier.
6850QualType ASTContext::getUnsignedPointerDiffType() const {
6851 return getFromTargetType(Type: Target->getUnsignedPtrDiffType(AddrSpace: LangAS::Default));
6852}
6853
6854/// Return the unique type for "pid_t" defined in
6855/// <sys/types.h>. We need this to compute the correct type for vfork().
6856QualType ASTContext::getProcessIDType() const {
6857 return getFromTargetType(Type: Target->getProcessIDType());
6858}
6859
6860//===----------------------------------------------------------------------===//
6861// Type Operators
6862//===----------------------------------------------------------------------===//
6863
6864CanQualType ASTContext::getCanonicalParamType(QualType T) const {
6865 // Push qualifiers into arrays, and then discard any remaining
6866 // qualifiers.
6867 T = getCanonicalType(T);
6868 T = getVariableArrayDecayedType(type: T);
6869 const Type *Ty = T.getTypePtr();
6870 QualType Result;
6871 if (getLangOpts().HLSL && isa<ConstantArrayType>(Val: Ty)) {
6872 Result = getArrayParameterType(Ty: QualType(Ty, 0));
6873 } else if (isa<ArrayType>(Val: Ty)) {
6874 Result = getArrayDecayedType(T: QualType(Ty,0));
6875 } else if (isa<FunctionType>(Val: Ty)) {
6876 Result = getPointerType(T: QualType(Ty, 0));
6877 } else {
6878 Result = QualType(Ty, 0);
6879 }
6880
6881 return CanQualType::CreateUnsafe(Other: Result);
6882}
6883
6884QualType ASTContext::getUnqualifiedArrayType(QualType type,
6885 Qualifiers &quals) const {
6886 SplitQualType splitType = type.getSplitUnqualifiedType();
6887
6888 // FIXME: getSplitUnqualifiedType() actually walks all the way to
6889 // the unqualified desugared type and then drops it on the floor.
6890 // We then have to strip that sugar back off with
6891 // getUnqualifiedDesugaredType(), which is silly.
6892 const auto *AT =
6893 dyn_cast<ArrayType>(Val: splitType.Ty->getUnqualifiedDesugaredType());
6894
6895 // If we don't have an array, just use the results in splitType.
6896 if (!AT) {
6897 quals = splitType.Quals;
6898 return QualType(splitType.Ty, 0);
6899 }
6900
6901 // Otherwise, recurse on the array's element type.
6902 QualType elementType = AT->getElementType();
6903 QualType unqualElementType = getUnqualifiedArrayType(type: elementType, quals);
6904
6905 // If that didn't change the element type, AT has no qualifiers, so we
6906 // can just use the results in splitType.
6907 if (elementType == unqualElementType) {
6908 assert(quals.empty()); // from the recursive call
6909 quals = splitType.Quals;
6910 return QualType(splitType.Ty, 0);
6911 }
6912
6913 // Otherwise, add in the qualifiers from the outermost type, then
6914 // build the type back up.
6915 quals.addConsistentQualifiers(qs: splitType.Quals);
6916
6917 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: AT)) {
6918 return getConstantArrayType(EltTy: unqualElementType, ArySizeIn: CAT->getSize(),
6919 SizeExpr: CAT->getSizeExpr(), ASM: CAT->getSizeModifier(), IndexTypeQuals: 0);
6920 }
6921
6922 if (const auto *IAT = dyn_cast<IncompleteArrayType>(Val: AT)) {
6923 return getIncompleteArrayType(elementType: unqualElementType, ASM: IAT->getSizeModifier(), elementTypeQuals: 0);
6924 }
6925
6926 if (const auto *VAT = dyn_cast<VariableArrayType>(Val: AT)) {
6927 return getVariableArrayType(EltTy: unqualElementType, NumElts: VAT->getSizeExpr(),
6928 ASM: VAT->getSizeModifier(),
6929 IndexTypeQuals: VAT->getIndexTypeCVRQualifiers());
6930 }
6931
6932 const auto *DSAT = cast<DependentSizedArrayType>(Val: AT);
6933 return getDependentSizedArrayType(elementType: unqualElementType, numElements: DSAT->getSizeExpr(),
6934 ASM: DSAT->getSizeModifier(), elementTypeQuals: 0);
6935}
6936
6937/// Attempt to unwrap two types that may both be array types with the same bound
6938/// (or both be array types of unknown bound) for the purpose of comparing the
6939/// cv-decomposition of two types per C++ [conv.qual].
6940///
6941/// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in
6942/// C++20 [conv.qual], if permitted by the current language mode.
6943void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2,
6944 bool AllowPiMismatch) const {
6945 while (true) {
6946 auto *AT1 = getAsArrayType(T: T1);
6947 if (!AT1)
6948 return;
6949
6950 auto *AT2 = getAsArrayType(T: T2);
6951 if (!AT2)
6952 return;
6953
6954 // If we don't have two array types with the same constant bound nor two
6955 // incomplete array types, we've unwrapped everything we can.
6956 // C++20 also permits one type to be a constant array type and the other
6957 // to be an incomplete array type.
6958 // FIXME: Consider also unwrapping array of unknown bound and VLA.
6959 if (auto *CAT1 = dyn_cast<ConstantArrayType>(Val: AT1)) {
6960 auto *CAT2 = dyn_cast<ConstantArrayType>(Val: AT2);
6961 if (!((CAT2 && CAT1->getSize() == CAT2->getSize()) ||
6962 (AllowPiMismatch && getLangOpts().CPlusPlus20 &&
6963 isa<IncompleteArrayType>(Val: AT2))))
6964 return;
6965 } else if (isa<IncompleteArrayType>(Val: AT1)) {
6966 if (!(isa<IncompleteArrayType>(Val: AT2) ||
6967 (AllowPiMismatch && getLangOpts().CPlusPlus20 &&
6968 isa<ConstantArrayType>(Val: AT2))))
6969 return;
6970 } else {
6971 return;
6972 }
6973
6974 T1 = AT1->getElementType();
6975 T2 = AT2->getElementType();
6976 }
6977}
6978
6979/// Attempt to unwrap two types that may be similar (C++ [conv.qual]).
6980///
6981/// If T1 and T2 are both pointer types of the same kind, or both array types
6982/// with the same bound, unwraps layers from T1 and T2 until a pointer type is
6983/// unwrapped. Top-level qualifiers on T1 and T2 are ignored.
6984///
6985/// This function will typically be called in a loop that successively
6986/// "unwraps" pointer and pointer-to-member types to compare them at each
6987/// level.
6988///
6989/// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in
6990/// C++20 [conv.qual], if permitted by the current language mode.
6991///
6992/// \return \c true if a pointer type was unwrapped, \c false if we reached a
6993/// pair of types that can't be unwrapped further.
6994bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2,
6995 bool AllowPiMismatch) const {
6996 UnwrapSimilarArrayTypes(T1, T2, AllowPiMismatch);
6997
6998 const auto *T1PtrType = T1->getAs<PointerType>();
6999 const auto *T2PtrType = T2->getAs<PointerType>();
7000 if (T1PtrType && T2PtrType) {
7001 T1 = T1PtrType->getPointeeType();
7002 T2 = T2PtrType->getPointeeType();
7003 return true;
7004 }
7005
7006 if (const auto *T1MPType = T1->getAs<MemberPointerType>(),
7007 *T2MPType = T2->getAs<MemberPointerType>();
7008 T1MPType && T2MPType) {
7009 if (auto *RD1 = T1MPType->getMostRecentCXXRecordDecl(),
7010 *RD2 = T2MPType->getMostRecentCXXRecordDecl();
7011 RD1 != RD2 && RD1->getCanonicalDecl() != RD2->getCanonicalDecl())
7012 return false;
7013 if (getCanonicalNestedNameSpecifier(NNS: T1MPType->getQualifier()) !=
7014 getCanonicalNestedNameSpecifier(NNS: T2MPType->getQualifier()))
7015 return false;
7016 T1 = T1MPType->getPointeeType();
7017 T2 = T2MPType->getPointeeType();
7018 return true;
7019 }
7020
7021 if (getLangOpts().ObjC) {
7022 const auto *T1OPType = T1->getAs<ObjCObjectPointerType>();
7023 const auto *T2OPType = T2->getAs<ObjCObjectPointerType>();
7024 if (T1OPType && T2OPType) {
7025 T1 = T1OPType->getPointeeType();
7026 T2 = T2OPType->getPointeeType();
7027 return true;
7028 }
7029 }
7030
7031 // FIXME: Block pointers, too?
7032
7033 return false;
7034}
7035
7036bool ASTContext::hasSimilarType(QualType T1, QualType T2) const {
7037 while (true) {
7038 Qualifiers Quals;
7039 T1 = getUnqualifiedArrayType(type: T1, quals&: Quals);
7040 T2 = getUnqualifiedArrayType(type: T2, quals&: Quals);
7041 if (hasSameType(T1, T2))
7042 return true;
7043 if (!UnwrapSimilarTypes(T1, T2))
7044 return false;
7045 }
7046}
7047
7048bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) {
7049 while (true) {
7050 Qualifiers Quals1, Quals2;
7051 T1 = getUnqualifiedArrayType(type: T1, quals&: Quals1);
7052 T2 = getUnqualifiedArrayType(type: T2, quals&: Quals2);
7053
7054 Quals1.removeCVRQualifiers();
7055 Quals2.removeCVRQualifiers();
7056 if (Quals1 != Quals2)
7057 return false;
7058
7059 if (hasSameType(T1, T2))
7060 return true;
7061
7062 if (!UnwrapSimilarTypes(T1, T2, /*AllowPiMismatch*/ false))
7063 return false;
7064 }
7065}
7066
7067DeclarationNameInfo
7068ASTContext::getNameForTemplate(TemplateName Name,
7069 SourceLocation NameLoc) const {
7070 switch (Name.getKind()) {
7071 case TemplateName::QualifiedTemplate:
7072 case TemplateName::Template:
7073 // DNInfo work in progress: CHECKME: what about DNLoc?
7074 return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(),
7075 NameLoc);
7076
7077 case TemplateName::OverloadedTemplate: {
7078 OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate();
7079 // DNInfo work in progress: CHECKME: what about DNLoc?
7080 return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc);
7081 }
7082
7083 case TemplateName::AssumedTemplate: {
7084 AssumedTemplateStorage *Storage = Name.getAsAssumedTemplateName();
7085 return DeclarationNameInfo(Storage->getDeclName(), NameLoc);
7086 }
7087
7088 case TemplateName::DependentTemplate: {
7089 DependentTemplateName *DTN = Name.getAsDependentTemplateName();
7090 IdentifierOrOverloadedOperator TN = DTN->getName();
7091 DeclarationName DName;
7092 if (const IdentifierInfo *II = TN.getIdentifier()) {
7093 DName = DeclarationNames.getIdentifier(ID: II);
7094 return DeclarationNameInfo(DName, NameLoc);
7095 } else {
7096 DName = DeclarationNames.getCXXOperatorName(Op: TN.getOperator());
7097 // DNInfo work in progress: FIXME: source locations?
7098 DeclarationNameLoc DNLoc =
7099 DeclarationNameLoc::makeCXXOperatorNameLoc(Range: SourceRange());
7100 return DeclarationNameInfo(DName, NameLoc, DNLoc);
7101 }
7102 }
7103
7104 case TemplateName::SubstTemplateTemplateParm: {
7105 SubstTemplateTemplateParmStorage *subst
7106 = Name.getAsSubstTemplateTemplateParm();
7107 return DeclarationNameInfo(subst->getParameter()->getDeclName(),
7108 NameLoc);
7109 }
7110
7111 case TemplateName::SubstTemplateTemplateParmPack: {
7112 SubstTemplateTemplateParmPackStorage *subst
7113 = Name.getAsSubstTemplateTemplateParmPack();
7114 return DeclarationNameInfo(subst->getParameterPack()->getDeclName(),
7115 NameLoc);
7116 }
7117 case TemplateName::UsingTemplate:
7118 return DeclarationNameInfo(Name.getAsUsingShadowDecl()->getDeclName(),
7119 NameLoc);
7120 case TemplateName::DeducedTemplate: {
7121 DeducedTemplateStorage *DTS = Name.getAsDeducedTemplateName();
7122 return getNameForTemplate(Name: DTS->getUnderlying(), NameLoc);
7123 }
7124 }
7125
7126 llvm_unreachable("bad template name kind!");
7127}
7128
7129static const TemplateArgument *
7130getDefaultTemplateArgumentOrNone(const NamedDecl *P) {
7131 auto handleParam = [](auto *TP) -> const TemplateArgument * {
7132 if (!TP->hasDefaultArgument())
7133 return nullptr;
7134 return &TP->getDefaultArgument().getArgument();
7135 };
7136 switch (P->getKind()) {
7137 case NamedDecl::TemplateTypeParm:
7138 return handleParam(cast<TemplateTypeParmDecl>(Val: P));
7139 case NamedDecl::NonTypeTemplateParm:
7140 return handleParam(cast<NonTypeTemplateParmDecl>(Val: P));
7141 case NamedDecl::TemplateTemplateParm:
7142 return handleParam(cast<TemplateTemplateParmDecl>(Val: P));
7143 default:
7144 llvm_unreachable("Unexpected template parameter kind");
7145 }
7146}
7147
7148TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name,
7149 bool IgnoreDeduced) const {
7150 while (std::optional<TemplateName> UnderlyingOrNone =
7151 Name.desugar(IgnoreDeduced))
7152 Name = *UnderlyingOrNone;
7153
7154 switch (Name.getKind()) {
7155 case TemplateName::Template: {
7156 TemplateDecl *Template = Name.getAsTemplateDecl();
7157 if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Val: Template))
7158 Template = getCanonicalTemplateTemplateParmDecl(TTP);
7159
7160 // The canonical template name is the canonical template declaration.
7161 return TemplateName(cast<TemplateDecl>(Val: Template->getCanonicalDecl()));
7162 }
7163
7164 case TemplateName::OverloadedTemplate:
7165 case TemplateName::AssumedTemplate:
7166 llvm_unreachable("cannot canonicalize unresolved template");
7167
7168 case TemplateName::DependentTemplate: {
7169 DependentTemplateName *DTN = Name.getAsDependentTemplateName();
7170 assert(DTN && "Non-dependent template names must refer to template decls.");
7171 NestedNameSpecifier *Qualifier = DTN->getQualifier();
7172 NestedNameSpecifier *CanonQualifier =
7173 getCanonicalNestedNameSpecifier(NNS: Qualifier);
7174 if (Qualifier != CanonQualifier || !DTN->hasTemplateKeyword())
7175 return getDependentTemplateName(Name: {CanonQualifier, DTN->getName(),
7176 /*HasTemplateKeyword=*/true});
7177 return Name;
7178 }
7179
7180 case TemplateName::SubstTemplateTemplateParmPack: {
7181 SubstTemplateTemplateParmPackStorage *subst =
7182 Name.getAsSubstTemplateTemplateParmPack();
7183 TemplateArgument canonArgPack =
7184 getCanonicalTemplateArgument(Arg: subst->getArgumentPack());
7185 return getSubstTemplateTemplateParmPack(
7186 ArgPack: canonArgPack, AssociatedDecl: subst->getAssociatedDecl()->getCanonicalDecl(),
7187 Index: subst->getIndex(), Final: subst->getFinal());
7188 }
7189 case TemplateName::DeducedTemplate: {
7190 assert(IgnoreDeduced == false);
7191 DeducedTemplateStorage *DTS = Name.getAsDeducedTemplateName();
7192 DefaultArguments DefArgs = DTS->getDefaultArguments();
7193 TemplateName Underlying = DTS->getUnderlying();
7194
7195 TemplateName CanonUnderlying =
7196 getCanonicalTemplateName(Name: Underlying, /*IgnoreDeduced=*/true);
7197 bool NonCanonical = CanonUnderlying != Underlying;
7198 auto CanonArgs =
7199 getCanonicalTemplateArguments(C: *this, Args: DefArgs.Args, AnyNonCanonArgs&: NonCanonical);
7200
7201 ArrayRef<NamedDecl *> Params =
7202 CanonUnderlying.getAsTemplateDecl()->getTemplateParameters()->asArray();
7203 assert(CanonArgs.size() <= Params.size());
7204 // A deduced template name which deduces the same default arguments already
7205 // declared in the underlying template is the same template as the
7206 // underlying template. We need need to note any arguments which differ from
7207 // the corresponding declaration. If any argument differs, we must build a
7208 // deduced template name.
7209 for (int I = CanonArgs.size() - 1; I >= 0; --I) {
7210 const TemplateArgument *A = getDefaultTemplateArgumentOrNone(P: Params[I]);
7211 if (!A)
7212 break;
7213 auto CanonParamDefArg = getCanonicalTemplateArgument(Arg: *A);
7214 TemplateArgument &CanonDefArg = CanonArgs[I];
7215 if (CanonDefArg.structurallyEquals(Other: CanonParamDefArg))
7216 continue;
7217 // Keep popping from the back any deault arguments which are the same.
7218 if (I == int(CanonArgs.size() - 1))
7219 CanonArgs.pop_back();
7220 NonCanonical = true;
7221 }
7222 return NonCanonical ? getDeducedTemplateName(
7223 Underlying: CanonUnderlying,
7224 /*DefaultArgs=*/{.StartPos: DefArgs.StartPos, .Args: CanonArgs})
7225 : Name;
7226 }
7227 case TemplateName::UsingTemplate:
7228 case TemplateName::QualifiedTemplate:
7229 case TemplateName::SubstTemplateTemplateParm:
7230 llvm_unreachable("always sugar node");
7231 }
7232
7233 llvm_unreachable("bad template name!");
7234}
7235
7236bool ASTContext::hasSameTemplateName(const TemplateName &X,
7237 const TemplateName &Y,
7238 bool IgnoreDeduced) const {
7239 return getCanonicalTemplateName(Name: X, IgnoreDeduced) ==
7240 getCanonicalTemplateName(Name: Y, IgnoreDeduced);
7241}
7242
7243bool ASTContext::isSameAssociatedConstraint(
7244 const AssociatedConstraint &ACX, const AssociatedConstraint &ACY) const {
7245 if (ACX.ArgPackSubstIndex != ACY.ArgPackSubstIndex)
7246 return false;
7247 if (!isSameConstraintExpr(XCE: ACX.ConstraintExpr, YCE: ACY.ConstraintExpr))
7248 return false;
7249 return true;
7250}
7251
7252bool ASTContext::isSameConstraintExpr(const Expr *XCE, const Expr *YCE) const {
7253 if (!XCE != !YCE)
7254 return false;
7255
7256 if (!XCE)
7257 return true;
7258
7259 llvm::FoldingSetNodeID XCEID, YCEID;
7260 XCE->Profile(ID&: XCEID, Context: *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true);
7261 YCE->Profile(ID&: YCEID, Context: *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true);
7262 return XCEID == YCEID;
7263}
7264
7265bool ASTContext::isSameTypeConstraint(const TypeConstraint *XTC,
7266 const TypeConstraint *YTC) const {
7267 if (!XTC != !YTC)
7268 return false;
7269
7270 if (!XTC)
7271 return true;
7272
7273 auto *NCX = XTC->getNamedConcept();
7274 auto *NCY = YTC->getNamedConcept();
7275 if (!NCX || !NCY || !isSameEntity(X: NCX, Y: NCY))
7276 return false;
7277 if (XTC->getConceptReference()->hasExplicitTemplateArgs() !=
7278 YTC->getConceptReference()->hasExplicitTemplateArgs())
7279 return false;
7280 if (XTC->getConceptReference()->hasExplicitTemplateArgs())
7281 if (XTC->getConceptReference()
7282 ->getTemplateArgsAsWritten()
7283 ->NumTemplateArgs !=
7284 YTC->getConceptReference()->getTemplateArgsAsWritten()->NumTemplateArgs)
7285 return false;
7286
7287 // Compare slowly by profiling.
7288 //
7289 // We couldn't compare the profiling result for the template
7290 // args here. Consider the following example in different modules:
7291 //
7292 // template <__integer_like _Tp, C<_Tp> Sentinel>
7293 // constexpr _Tp operator()(_Tp &&__t, Sentinel &&last) const {
7294 // return __t;
7295 // }
7296 //
7297 // When we compare the profiling result for `C<_Tp>` in different
7298 // modules, it will compare the type of `_Tp` in different modules.
7299 // However, the type of `_Tp` in different modules refer to different
7300 // types here naturally. So we couldn't compare the profiling result
7301 // for the template args directly.
7302 return isSameConstraintExpr(XCE: XTC->getImmediatelyDeclaredConstraint(),
7303 YCE: YTC->getImmediatelyDeclaredConstraint());
7304}
7305
7306bool ASTContext::isSameTemplateParameter(const NamedDecl *X,
7307 const NamedDecl *Y) const {
7308 if (X->getKind() != Y->getKind())
7309 return false;
7310
7311 if (auto *TX = dyn_cast<TemplateTypeParmDecl>(Val: X)) {
7312 auto *TY = cast<TemplateTypeParmDecl>(Val: Y);
7313 if (TX->isParameterPack() != TY->isParameterPack())
7314 return false;
7315 if (TX->hasTypeConstraint() != TY->hasTypeConstraint())
7316 return false;
7317 return isSameTypeConstraint(XTC: TX->getTypeConstraint(),
7318 YTC: TY->getTypeConstraint());
7319 }
7320
7321 if (auto *TX = dyn_cast<NonTypeTemplateParmDecl>(Val: X)) {
7322 auto *TY = cast<NonTypeTemplateParmDecl>(Val: Y);
7323 return TX->isParameterPack() == TY->isParameterPack() &&
7324 TX->getASTContext().hasSameType(T1: TX->getType(), T2: TY->getType()) &&
7325 isSameConstraintExpr(XCE: TX->getPlaceholderTypeConstraint(),
7326 YCE: TY->getPlaceholderTypeConstraint());
7327 }
7328
7329 auto *TX = cast<TemplateTemplateParmDecl>(Val: X);
7330 auto *TY = cast<TemplateTemplateParmDecl>(Val: Y);
7331 return TX->isParameterPack() == TY->isParameterPack() &&
7332 isSameTemplateParameterList(X: TX->getTemplateParameters(),
7333 Y: TY->getTemplateParameters());
7334}
7335
7336bool ASTContext::isSameTemplateParameterList(
7337 const TemplateParameterList *X, const TemplateParameterList *Y) const {
7338 if (X->size() != Y->size())
7339 return false;
7340
7341 for (unsigned I = 0, N = X->size(); I != N; ++I)
7342 if (!isSameTemplateParameter(X: X->getParam(Idx: I), Y: Y->getParam(Idx: I)))
7343 return false;
7344
7345 return isSameConstraintExpr(XCE: X->getRequiresClause(), YCE: Y->getRequiresClause());
7346}
7347
7348bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl *X,
7349 const NamedDecl *Y) const {
7350 // If the type parameter isn't the same already, we don't need to check the
7351 // default argument further.
7352 if (!isSameTemplateParameter(X, Y))
7353 return false;
7354
7355 if (auto *TTPX = dyn_cast<TemplateTypeParmDecl>(Val: X)) {
7356 auto *TTPY = cast<TemplateTypeParmDecl>(Val: Y);
7357 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument())
7358 return false;
7359
7360 return hasSameType(T1: TTPX->getDefaultArgument().getArgument().getAsType(),
7361 T2: TTPY->getDefaultArgument().getArgument().getAsType());
7362 }
7363
7364 if (auto *NTTPX = dyn_cast<NonTypeTemplateParmDecl>(Val: X)) {
7365 auto *NTTPY = cast<NonTypeTemplateParmDecl>(Val: Y);
7366 if (!NTTPX->hasDefaultArgument() || !NTTPY->hasDefaultArgument())
7367 return false;
7368
7369 Expr *DefaultArgumentX =
7370 NTTPX->getDefaultArgument().getArgument().getAsExpr()->IgnoreImpCasts();
7371 Expr *DefaultArgumentY =
7372 NTTPY->getDefaultArgument().getArgument().getAsExpr()->IgnoreImpCasts();
7373 llvm::FoldingSetNodeID XID, YID;
7374 DefaultArgumentX->Profile(ID&: XID, Context: *this, /*Canonical=*/true);
7375 DefaultArgumentY->Profile(ID&: YID, Context: *this, /*Canonical=*/true);
7376 return XID == YID;
7377 }
7378
7379 auto *TTPX = cast<TemplateTemplateParmDecl>(Val: X);
7380 auto *TTPY = cast<TemplateTemplateParmDecl>(Val: Y);
7381
7382 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument())
7383 return false;
7384
7385 const TemplateArgument &TAX = TTPX->getDefaultArgument().getArgument();
7386 const TemplateArgument &TAY = TTPY->getDefaultArgument().getArgument();
7387 return hasSameTemplateName(X: TAX.getAsTemplate(), Y: TAY.getAsTemplate());
7388}
7389
7390static NamespaceDecl *getNamespace(const NestedNameSpecifier *X) {
7391 if (auto *NS = X->getAsNamespace())
7392 return NS;
7393 if (auto *NAS = X->getAsNamespaceAlias())
7394 return NAS->getNamespace();
7395 return nullptr;
7396}
7397
7398static bool isSameQualifier(const NestedNameSpecifier *X,
7399 const NestedNameSpecifier *Y) {
7400 if (auto *NSX = getNamespace(X)) {
7401 auto *NSY = getNamespace(X: Y);
7402 if (!NSY || NSX->getCanonicalDecl() != NSY->getCanonicalDecl())
7403 return false;
7404 } else if (X->getKind() != Y->getKind())
7405 return false;
7406
7407 // FIXME: For namespaces and types, we're permitted to check that the entity
7408 // is named via the same tokens. We should probably do so.
7409 switch (X->getKind()) {
7410 case NestedNameSpecifier::Identifier:
7411 if (X->getAsIdentifier() != Y->getAsIdentifier())
7412 return false;
7413 break;
7414 case NestedNameSpecifier::Namespace:
7415 case NestedNameSpecifier::NamespaceAlias:
7416 // We've already checked that we named the same namespace.
7417 break;
7418 case NestedNameSpecifier::TypeSpec:
7419 if (X->getAsType()->getCanonicalTypeInternal() !=
7420 Y->getAsType()->getCanonicalTypeInternal())
7421 return false;
7422 break;
7423 case NestedNameSpecifier::Global:
7424 case NestedNameSpecifier::Super:
7425 return true;
7426 }
7427
7428 // Recurse into earlier portion of NNS, if any.
7429 auto *PX = X->getPrefix();
7430 auto *PY = Y->getPrefix();
7431 if (PX && PY)
7432 return isSameQualifier(X: PX, Y: PY);
7433 return !PX && !PY;
7434}
7435
7436static bool hasSameCudaAttrs(const FunctionDecl *A, const FunctionDecl *B) {
7437 if (!A->getASTContext().getLangOpts().CUDA)
7438 return true; // Target attributes are overloadable in CUDA compilation only.
7439 if (A->hasAttr<CUDADeviceAttr>() != B->hasAttr<CUDADeviceAttr>())
7440 return false;
7441 if (A->hasAttr<CUDADeviceAttr>() && B->hasAttr<CUDADeviceAttr>())
7442 return A->hasAttr<CUDAHostAttr>() == B->hasAttr<CUDAHostAttr>();
7443 return true; // unattributed and __host__ functions are the same.
7444}
7445
7446/// Determine whether the attributes we can overload on are identical for A and
7447/// B. Will ignore any overloadable attrs represented in the type of A and B.
7448static bool hasSameOverloadableAttrs(const FunctionDecl *A,
7449 const FunctionDecl *B) {
7450 // Note that pass_object_size attributes are represented in the function's
7451 // ExtParameterInfo, so we don't need to check them here.
7452
7453 llvm::FoldingSetNodeID Cand1ID, Cand2ID;
7454 auto AEnableIfAttrs = A->specific_attrs<EnableIfAttr>();
7455 auto BEnableIfAttrs = B->specific_attrs<EnableIfAttr>();
7456
7457 for (auto Pair : zip_longest(t&: AEnableIfAttrs, u&: BEnableIfAttrs)) {
7458 std::optional<EnableIfAttr *> Cand1A = std::get<0>(t&: Pair);
7459 std::optional<EnableIfAttr *> Cand2A = std::get<1>(t&: Pair);
7460
7461 // Return false if the number of enable_if attributes is different.
7462 if (!Cand1A || !Cand2A)
7463 return false;
7464
7465 Cand1ID.clear();
7466 Cand2ID.clear();
7467
7468 (*Cand1A)->getCond()->Profile(ID&: Cand1ID, Context: A->getASTContext(), Canonical: true);
7469 (*Cand2A)->getCond()->Profile(ID&: Cand2ID, Context: B->getASTContext(), Canonical: true);
7470
7471 // Return false if any of the enable_if expressions of A and B are
7472 // different.
7473 if (Cand1ID != Cand2ID)
7474 return false;
7475 }
7476 return hasSameCudaAttrs(A, B);
7477}
7478
7479bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) const {
7480 // Caution: this function is called by the AST reader during deserialization,
7481 // so it cannot rely on AST invariants being met. Non-trivial accessors
7482 // should be avoided, along with any traversal of redeclaration chains.
7483
7484 if (X == Y)
7485 return true;
7486
7487 if (X->getDeclName() != Y->getDeclName())
7488 return false;
7489
7490 // Must be in the same context.
7491 //
7492 // Note that we can't use DeclContext::Equals here, because the DeclContexts
7493 // could be two different declarations of the same function. (We will fix the
7494 // semantic DC to refer to the primary definition after merging.)
7495 if (!declaresSameEntity(D1: cast<Decl>(Val: X->getDeclContext()->getRedeclContext()),
7496 D2: cast<Decl>(Val: Y->getDeclContext()->getRedeclContext())))
7497 return false;
7498
7499 // If either X or Y are local to the owning module, they are only possible to
7500 // be the same entity if they are in the same module.
7501 if (X->isModuleLocal() || Y->isModuleLocal())
7502 if (!isInSameModule(M1: X->getOwningModule(), M2: Y->getOwningModule()))
7503 return false;
7504
7505 // Two typedefs refer to the same entity if they have the same underlying
7506 // type.
7507 if (const auto *TypedefX = dyn_cast<TypedefNameDecl>(Val: X))
7508 if (const auto *TypedefY = dyn_cast<TypedefNameDecl>(Val: Y))
7509 return hasSameType(T1: TypedefX->getUnderlyingType(),
7510 T2: TypedefY->getUnderlyingType());
7511
7512 // Must have the same kind.
7513 if (X->getKind() != Y->getKind())
7514 return false;
7515
7516 // Objective-C classes and protocols with the same name always match.
7517 if (isa<ObjCInterfaceDecl>(Val: X) || isa<ObjCProtocolDecl>(Val: X))
7518 return true;
7519
7520 if (isa<ClassTemplateSpecializationDecl>(Val: X)) {
7521 // No need to handle these here: we merge them when adding them to the
7522 // template.
7523 return false;
7524 }
7525
7526 // Compatible tags match.
7527 if (const auto *TagX = dyn_cast<TagDecl>(Val: X)) {
7528 const auto *TagY = cast<TagDecl>(Val: Y);
7529 return (TagX->getTagKind() == TagY->getTagKind()) ||
7530 ((TagX->getTagKind() == TagTypeKind::Struct ||
7531 TagX->getTagKind() == TagTypeKind::Class ||
7532 TagX->getTagKind() == TagTypeKind::Interface) &&
7533 (TagY->getTagKind() == TagTypeKind::Struct ||
7534 TagY->getTagKind() == TagTypeKind::Class ||
7535 TagY->getTagKind() == TagTypeKind::Interface));
7536 }
7537
7538 // Functions with the same type and linkage match.
7539 // FIXME: This needs to cope with merging of prototyped/non-prototyped
7540 // functions, etc.
7541 if (const auto *FuncX = dyn_cast<FunctionDecl>(Val: X)) {
7542 const auto *FuncY = cast<FunctionDecl>(Val: Y);
7543 if (const auto *CtorX = dyn_cast<CXXConstructorDecl>(Val: X)) {
7544 const auto *CtorY = cast<CXXConstructorDecl>(Val: Y);
7545 if (CtorX->getInheritedConstructor() &&
7546 !isSameEntity(X: CtorX->getInheritedConstructor().getConstructor(),
7547 Y: CtorY->getInheritedConstructor().getConstructor()))
7548 return false;
7549 }
7550
7551 if (FuncX->isMultiVersion() != FuncY->isMultiVersion())
7552 return false;
7553
7554 // Multiversioned functions with different feature strings are represented
7555 // as separate declarations.
7556 if (FuncX->isMultiVersion()) {
7557 const auto *TAX = FuncX->getAttr<TargetAttr>();
7558 const auto *TAY = FuncY->getAttr<TargetAttr>();
7559 assert(TAX && TAY && "Multiversion Function without target attribute");
7560
7561 if (TAX->getFeaturesStr() != TAY->getFeaturesStr())
7562 return false;
7563 }
7564
7565 // Per C++20 [temp.over.link]/4, friends in different classes are sometimes
7566 // not the same entity if they are constrained.
7567 if ((FuncX->isMemberLikeConstrainedFriend() ||
7568 FuncY->isMemberLikeConstrainedFriend()) &&
7569 !FuncX->getLexicalDeclContext()->Equals(
7570 DC: FuncY->getLexicalDeclContext())) {
7571 return false;
7572 }
7573
7574 if (!isSameAssociatedConstraint(ACX: FuncX->getTrailingRequiresClause(),
7575 ACY: FuncY->getTrailingRequiresClause()))
7576 return false;
7577
7578 auto GetTypeAsWritten = [](const FunctionDecl *FD) {
7579 // Map to the first declaration that we've already merged into this one.
7580 // The TSI of redeclarations might not match (due to calling conventions
7581 // being inherited onto the type but not the TSI), but the TSI type of
7582 // the first declaration of the function should match across modules.
7583 FD = FD->getCanonicalDecl();
7584 return FD->getTypeSourceInfo() ? FD->getTypeSourceInfo()->getType()
7585 : FD->getType();
7586 };
7587 QualType XT = GetTypeAsWritten(FuncX), YT = GetTypeAsWritten(FuncY);
7588 if (!hasSameType(T1: XT, T2: YT)) {
7589 // We can get functions with different types on the redecl chain in C++17
7590 // if they have differing exception specifications and at least one of
7591 // the excpetion specs is unresolved.
7592 auto *XFPT = XT->getAs<FunctionProtoType>();
7593 auto *YFPT = YT->getAs<FunctionProtoType>();
7594 if (getLangOpts().CPlusPlus17 && XFPT && YFPT &&
7595 (isUnresolvedExceptionSpec(ESpecType: XFPT->getExceptionSpecType()) ||
7596 isUnresolvedExceptionSpec(ESpecType: YFPT->getExceptionSpecType())) &&
7597 hasSameFunctionTypeIgnoringExceptionSpec(T: XT, U: YT))
7598 return true;
7599 return false;
7600 }
7601
7602 return FuncX->getLinkageInternal() == FuncY->getLinkageInternal() &&
7603 hasSameOverloadableAttrs(A: FuncX, B: FuncY);
7604 }
7605
7606 // Variables with the same type and linkage match.
7607 if (const auto *VarX = dyn_cast<VarDecl>(Val: X)) {
7608 const auto *VarY = cast<VarDecl>(Val: Y);
7609 if (VarX->getLinkageInternal() == VarY->getLinkageInternal()) {
7610 // During deserialization, we might compare variables before we load
7611 // their types. Assume the types will end up being the same.
7612 if (VarX->getType().isNull() || VarY->getType().isNull())
7613 return true;
7614
7615 if (hasSameType(T1: VarX->getType(), T2: VarY->getType()))
7616 return true;
7617
7618 // We can get decls with different types on the redecl chain. Eg.
7619 // template <typename T> struct S { static T Var[]; }; // #1
7620 // template <typename T> T S<T>::Var[sizeof(T)]; // #2
7621 // Only? happens when completing an incomplete array type. In this case
7622 // when comparing #1 and #2 we should go through their element type.
7623 const ArrayType *VarXTy = getAsArrayType(T: VarX->getType());
7624 const ArrayType *VarYTy = getAsArrayType(T: VarY->getType());
7625 if (!VarXTy || !VarYTy)
7626 return false;
7627 if (VarXTy->isIncompleteArrayType() || VarYTy->isIncompleteArrayType())
7628 return hasSameType(T1: VarXTy->getElementType(), T2: VarYTy->getElementType());
7629 }
7630 return false;
7631 }
7632
7633 // Namespaces with the same name and inlinedness match.
7634 if (const auto *NamespaceX = dyn_cast<NamespaceDecl>(Val: X)) {
7635 const auto *NamespaceY = cast<NamespaceDecl>(Val: Y);
7636 return NamespaceX->isInline() == NamespaceY->isInline();
7637 }
7638
7639 // Identical template names and kinds match if their template parameter lists
7640 // and patterns match.
7641 if (const auto *TemplateX = dyn_cast<TemplateDecl>(Val: X)) {
7642 const auto *TemplateY = cast<TemplateDecl>(Val: Y);
7643
7644 // ConceptDecl wouldn't be the same if their constraint expression differs.
7645 if (const auto *ConceptX = dyn_cast<ConceptDecl>(Val: X)) {
7646 const auto *ConceptY = cast<ConceptDecl>(Val: Y);
7647 if (!isSameConstraintExpr(XCE: ConceptX->getConstraintExpr(),
7648 YCE: ConceptY->getConstraintExpr()))
7649 return false;
7650 }
7651
7652 return isSameEntity(X: TemplateX->getTemplatedDecl(),
7653 Y: TemplateY->getTemplatedDecl()) &&
7654 isSameTemplateParameterList(X: TemplateX->getTemplateParameters(),
7655 Y: TemplateY->getTemplateParameters());
7656 }
7657
7658 // Fields with the same name and the same type match.
7659 if (const auto *FDX = dyn_cast<FieldDecl>(Val: X)) {
7660 const auto *FDY = cast<FieldDecl>(Val: Y);
7661 // FIXME: Also check the bitwidth is odr-equivalent, if any.
7662 return hasSameType(T1: FDX->getType(), T2: FDY->getType());
7663 }
7664
7665 // Indirect fields with the same target field match.
7666 if (const auto *IFDX = dyn_cast<IndirectFieldDecl>(Val: X)) {
7667 const auto *IFDY = cast<IndirectFieldDecl>(Val: Y);
7668 return IFDX->getAnonField()->getCanonicalDecl() ==
7669 IFDY->getAnonField()->getCanonicalDecl();
7670 }
7671
7672 // Enumerators with the same name match.
7673 if (isa<EnumConstantDecl>(Val: X))
7674 // FIXME: Also check the value is odr-equivalent.
7675 return true;
7676
7677 // Using shadow declarations with the same target match.
7678 if (const auto *USX = dyn_cast<UsingShadowDecl>(Val: X)) {
7679 const auto *USY = cast<UsingShadowDecl>(Val: Y);
7680 return declaresSameEntity(D1: USX->getTargetDecl(), D2: USY->getTargetDecl());
7681 }
7682
7683 // Using declarations with the same qualifier match. (We already know that
7684 // the name matches.)
7685 if (const auto *UX = dyn_cast<UsingDecl>(Val: X)) {
7686 const auto *UY = cast<UsingDecl>(Val: Y);
7687 return isSameQualifier(X: UX->getQualifier(), Y: UY->getQualifier()) &&
7688 UX->hasTypename() == UY->hasTypename() &&
7689 UX->isAccessDeclaration() == UY->isAccessDeclaration();
7690 }
7691 if (const auto *UX = dyn_cast<UnresolvedUsingValueDecl>(Val: X)) {
7692 const auto *UY = cast<UnresolvedUsingValueDecl>(Val: Y);
7693 return isSameQualifier(X: UX->getQualifier(), Y: UY->getQualifier()) &&
7694 UX->isAccessDeclaration() == UY->isAccessDeclaration();
7695 }
7696 if (const auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(Val: X)) {
7697 return isSameQualifier(
7698 X: UX->getQualifier(),
7699 Y: cast<UnresolvedUsingTypenameDecl>(Val: Y)->getQualifier());
7700 }
7701
7702 // Using-pack declarations are only created by instantiation, and match if
7703 // they're instantiated from matching UnresolvedUsing...Decls.
7704 if (const auto *UX = dyn_cast<UsingPackDecl>(Val: X)) {
7705 return declaresSameEntity(
7706 D1: UX->getInstantiatedFromUsingDecl(),
7707 D2: cast<UsingPackDecl>(Val: Y)->getInstantiatedFromUsingDecl());
7708 }
7709
7710 // Namespace alias definitions with the same target match.
7711 if (const auto *NAX = dyn_cast<NamespaceAliasDecl>(Val: X)) {
7712 const auto *NAY = cast<NamespaceAliasDecl>(Val: Y);
7713 return NAX->getNamespace()->Equals(DC: NAY->getNamespace());
7714 }
7715
7716 return false;
7717}
7718
7719TemplateArgument
7720ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const {
7721 switch (Arg.getKind()) {
7722 case TemplateArgument::Null:
7723 return Arg;
7724
7725 case TemplateArgument::Expression:
7726 return TemplateArgument(Arg.getAsExpr(), /*IsCanonical=*/true,
7727 Arg.getIsDefaulted());
7728
7729 case TemplateArgument::Declaration: {
7730 auto *D = cast<ValueDecl>(Val: Arg.getAsDecl()->getCanonicalDecl());
7731 return TemplateArgument(D, getCanonicalType(T: Arg.getParamTypeForDecl()),
7732 Arg.getIsDefaulted());
7733 }
7734
7735 case TemplateArgument::NullPtr:
7736 return TemplateArgument(getCanonicalType(T: Arg.getNullPtrType()),
7737 /*isNullPtr*/ true, Arg.getIsDefaulted());
7738
7739 case TemplateArgument::Template:
7740 return TemplateArgument(getCanonicalTemplateName(Name: Arg.getAsTemplate()),
7741 Arg.getIsDefaulted());
7742
7743 case TemplateArgument::TemplateExpansion:
7744 return TemplateArgument(
7745 getCanonicalTemplateName(Name: Arg.getAsTemplateOrTemplatePattern()),
7746 Arg.getNumTemplateExpansions(), Arg.getIsDefaulted());
7747
7748 case TemplateArgument::Integral:
7749 return TemplateArgument(Arg, getCanonicalType(T: Arg.getIntegralType()));
7750
7751 case TemplateArgument::StructuralValue:
7752 return TemplateArgument(*this,
7753 getCanonicalType(T: Arg.getStructuralValueType()),
7754 Arg.getAsStructuralValue(), Arg.getIsDefaulted());
7755
7756 case TemplateArgument::Type:
7757 return TemplateArgument(getCanonicalType(T: Arg.getAsType()),
7758 /*isNullPtr*/ false, Arg.getIsDefaulted());
7759
7760 case TemplateArgument::Pack: {
7761 bool AnyNonCanonArgs = false;
7762 auto CanonArgs = ::getCanonicalTemplateArguments(
7763 C: *this, Args: Arg.pack_elements(), AnyNonCanonArgs);
7764 if (!AnyNonCanonArgs)
7765 return Arg;
7766 auto NewArg = TemplateArgument::CreatePackCopy(
7767 Context&: const_cast<ASTContext &>(*this), Args: CanonArgs);
7768 NewArg.setIsDefaulted(Arg.getIsDefaulted());
7769 return NewArg;
7770 }
7771 }
7772
7773 // Silence GCC warning
7774 llvm_unreachable("Unhandled template argument kind");
7775}
7776
7777bool ASTContext::isSameTemplateArgument(const TemplateArgument &Arg1,
7778 const TemplateArgument &Arg2) const {
7779 if (Arg1.getKind() != Arg2.getKind())
7780 return false;
7781
7782 switch (Arg1.getKind()) {
7783 case TemplateArgument::Null:
7784 llvm_unreachable("Comparing NULL template argument");
7785
7786 case TemplateArgument::Type:
7787 return hasSameType(T1: Arg1.getAsType(), T2: Arg2.getAsType());
7788
7789 case TemplateArgument::Declaration:
7790 return Arg1.getAsDecl()->getUnderlyingDecl()->getCanonicalDecl() ==
7791 Arg2.getAsDecl()->getUnderlyingDecl()->getCanonicalDecl();
7792
7793 case TemplateArgument::NullPtr:
7794 return hasSameType(T1: Arg1.getNullPtrType(), T2: Arg2.getNullPtrType());
7795
7796 case TemplateArgument::Template:
7797 case TemplateArgument::TemplateExpansion:
7798 return getCanonicalTemplateName(Name: Arg1.getAsTemplateOrTemplatePattern()) ==
7799 getCanonicalTemplateName(Name: Arg2.getAsTemplateOrTemplatePattern());
7800
7801 case TemplateArgument::Integral:
7802 return llvm::APSInt::isSameValue(I1: Arg1.getAsIntegral(),
7803 I2: Arg2.getAsIntegral());
7804
7805 case TemplateArgument::StructuralValue:
7806 return Arg1.structurallyEquals(Other: Arg2);
7807
7808 case TemplateArgument::Expression: {
7809 llvm::FoldingSetNodeID ID1, ID2;
7810 Arg1.getAsExpr()->Profile(ID&: ID1, Context: *this, /*Canonical=*/true);
7811 Arg2.getAsExpr()->Profile(ID&: ID2, Context: *this, /*Canonical=*/true);
7812 return ID1 == ID2;
7813 }
7814
7815 case TemplateArgument::Pack:
7816 return llvm::equal(
7817 LRange: Arg1.getPackAsArray(), RRange: Arg2.getPackAsArray(),
7818 P: [&](const TemplateArgument &Arg1, const TemplateArgument &Arg2) {
7819 return isSameTemplateArgument(Arg1, Arg2);
7820 });
7821 }
7822
7823 llvm_unreachable("Unhandled template argument kind");
7824}
7825
7826NestedNameSpecifier *
7827ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const {
7828 if (!NNS)
7829 return nullptr;
7830
7831 switch (NNS->getKind()) {
7832 case NestedNameSpecifier::Identifier:
7833 // Canonicalize the prefix but keep the identifier the same.
7834 return NestedNameSpecifier::Create(Context: *this,
7835 Prefix: getCanonicalNestedNameSpecifier(NNS: NNS->getPrefix()),
7836 II: NNS->getAsIdentifier());
7837
7838 case NestedNameSpecifier::Namespace:
7839 // A namespace is canonical; build a nested-name-specifier with
7840 // this namespace and no prefix.
7841 return NestedNameSpecifier::Create(Context: *this, Prefix: nullptr,
7842 NS: NNS->getAsNamespace()->getFirstDecl());
7843
7844 case NestedNameSpecifier::NamespaceAlias:
7845 // A namespace is canonical; build a nested-name-specifier with
7846 // this namespace and no prefix.
7847 return NestedNameSpecifier::Create(
7848 Context: *this, Prefix: nullptr,
7849 NS: NNS->getAsNamespaceAlias()->getNamespace()->getFirstDecl());
7850
7851 // The difference between TypeSpec and TypeSpecWithTemplate is that the
7852 // latter will have the 'template' keyword when printed.
7853 case NestedNameSpecifier::TypeSpec: {
7854 const Type *T = getCanonicalType(T: NNS->getAsType());
7855
7856 // If we have some kind of dependent-named type (e.g., "typename T::type"),
7857 // break it apart into its prefix and identifier, then reconsititute those
7858 // as the canonical nested-name-specifier. This is required to canonicalize
7859 // a dependent nested-name-specifier involving typedefs of dependent-name
7860 // types, e.g.,
7861 // typedef typename T::type T1;
7862 // typedef typename T1::type T2;
7863 if (const auto *DNT = T->getAs<DependentNameType>())
7864 return NestedNameSpecifier::Create(Context: *this, Prefix: DNT->getQualifier(),
7865 II: DNT->getIdentifier());
7866 if (const auto *DTST = T->getAs<DependentTemplateSpecializationType>()) {
7867 const DependentTemplateStorage &DTN = DTST->getDependentTemplateName();
7868 QualType NewT = getDependentTemplateSpecializationType(
7869 Keyword: ElaboratedTypeKeyword::None,
7870 Name: {/*NNS=*/nullptr, DTN.getName(), /*HasTemplateKeyword=*/true},
7871 Args: DTST->template_arguments(), /*IsCanonical=*/true);
7872 assert(NewT.isCanonical());
7873 NestedNameSpecifier *Prefix = DTN.getQualifier();
7874 if (!Prefix)
7875 Prefix = getCanonicalNestedNameSpecifier(NNS: NNS->getPrefix());
7876 return NestedNameSpecifier::Create(Context: *this, Prefix, T: NewT.getTypePtr());
7877 }
7878 return NestedNameSpecifier::Create(Context: *this, Prefix: nullptr, T);
7879 }
7880
7881 case NestedNameSpecifier::Global:
7882 case NestedNameSpecifier::Super:
7883 // The global specifier and __super specifer are canonical and unique.
7884 return NNS;
7885 }
7886
7887 llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
7888}
7889
7890const ArrayType *ASTContext::getAsArrayType(QualType T) const {
7891 // Handle the non-qualified case efficiently.
7892 if (!T.hasLocalQualifiers()) {
7893 // Handle the common positive case fast.
7894 if (const auto *AT = dyn_cast<ArrayType>(Val&: T))
7895 return AT;
7896 }
7897
7898 // Handle the common negative case fast.
7899 if (!isa<ArrayType>(Val: T.getCanonicalType()))
7900 return nullptr;
7901
7902 // Apply any qualifiers from the array type to the element type. This
7903 // implements C99 6.7.3p8: "If the specification of an array type includes
7904 // any type qualifiers, the element type is so qualified, not the array type."
7905
7906 // If we get here, we either have type qualifiers on the type, or we have
7907 // sugar such as a typedef in the way. If we have type qualifiers on the type
7908 // we must propagate them down into the element type.
7909
7910 SplitQualType split = T.getSplitDesugaredType();
7911 Qualifiers qs = split.Quals;
7912
7913 // If we have a simple case, just return now.
7914 const auto *ATy = dyn_cast<ArrayType>(Val: split.Ty);
7915 if (!ATy || qs.empty())
7916 return ATy;
7917
7918 // Otherwise, we have an array and we have qualifiers on it. Push the
7919 // qualifiers into the array element type and return a new array type.
7920 QualType NewEltTy = getQualifiedType(T: ATy->getElementType(), Qs: qs);
7921
7922 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: ATy))
7923 return cast<ArrayType>(Val: getConstantArrayType(EltTy: NewEltTy, ArySizeIn: CAT->getSize(),
7924 SizeExpr: CAT->getSizeExpr(),
7925 ASM: CAT->getSizeModifier(),
7926 IndexTypeQuals: CAT->getIndexTypeCVRQualifiers()));
7927 if (const auto *IAT = dyn_cast<IncompleteArrayType>(Val: ATy))
7928 return cast<ArrayType>(Val: getIncompleteArrayType(elementType: NewEltTy,
7929 ASM: IAT->getSizeModifier(),
7930 elementTypeQuals: IAT->getIndexTypeCVRQualifiers()));
7931
7932 if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(Val: ATy))
7933 return cast<ArrayType>(Val: getDependentSizedArrayType(
7934 elementType: NewEltTy, numElements: DSAT->getSizeExpr(), ASM: DSAT->getSizeModifier(),
7935 elementTypeQuals: DSAT->getIndexTypeCVRQualifiers()));
7936
7937 const auto *VAT = cast<VariableArrayType>(Val: ATy);
7938 return cast<ArrayType>(
7939 Val: getVariableArrayType(EltTy: NewEltTy, NumElts: VAT->getSizeExpr(), ASM: VAT->getSizeModifier(),
7940 IndexTypeQuals: VAT->getIndexTypeCVRQualifiers()));
7941}
7942
7943QualType ASTContext::getAdjustedParameterType(QualType T) const {
7944 if (getLangOpts().HLSL && T->isConstantArrayType())
7945 return getArrayParameterType(Ty: T);
7946 if (T->isArrayType() || T->isFunctionType())
7947 return getDecayedType(T);
7948 return T;
7949}
7950
7951QualType ASTContext::getSignatureParameterType(QualType T) const {
7952 T = getVariableArrayDecayedType(type: T);
7953 T = getAdjustedParameterType(T);
7954 return T.getUnqualifiedType();
7955}
7956
7957QualType ASTContext::getExceptionObjectType(QualType T) const {
7958 // C++ [except.throw]p3:
7959 // A throw-expression initializes a temporary object, called the exception
7960 // object, the type of which is determined by removing any top-level
7961 // cv-qualifiers from the static type of the operand of throw and adjusting
7962 // the type from "array of T" or "function returning T" to "pointer to T"
7963 // or "pointer to function returning T", [...]
7964 T = getVariableArrayDecayedType(type: T);
7965 if (T->isArrayType() || T->isFunctionType())
7966 T = getDecayedType(T);
7967 return T.getUnqualifiedType();
7968}
7969
7970/// getArrayDecayedType - Return the properly qualified result of decaying the
7971/// specified array type to a pointer. This operation is non-trivial when
7972/// handling typedefs etc. The canonical type of "T" must be an array type,
7973/// this returns a pointer to a properly qualified element of the array.
7974///
7975/// See C99 6.7.5.3p7 and C99 6.3.2.1p3.
7976QualType ASTContext::getArrayDecayedType(QualType Ty) const {
7977 // Get the element type with 'getAsArrayType' so that we don't lose any
7978 // typedefs in the element type of the array. This also handles propagation
7979 // of type qualifiers from the array type into the element type if present
7980 // (C99 6.7.3p8).
7981 const ArrayType *PrettyArrayType = getAsArrayType(T: Ty);
7982 assert(PrettyArrayType && "Not an array type!");
7983
7984 QualType PtrTy = getPointerType(T: PrettyArrayType->getElementType());
7985
7986 // int x[restrict 4] -> int *restrict
7987 QualType Result = getQualifiedType(T: PtrTy,
7988 Qs: PrettyArrayType->getIndexTypeQualifiers());
7989
7990 // int x[_Nullable] -> int * _Nullable
7991 if (auto Nullability = Ty->getNullability()) {
7992 Result = const_cast<ASTContext *>(this)->getAttributedType(nullability: *Nullability,
7993 modifiedType: Result, equivalentType: Result);
7994 }
7995 return Result;
7996}
7997
7998QualType ASTContext::getBaseElementType(const ArrayType *array) const {
7999 return getBaseElementType(QT: array->getElementType());
8000}
8001
8002QualType ASTContext::getBaseElementType(QualType type) const {
8003 Qualifiers qs;
8004 while (true) {
8005 SplitQualType split = type.getSplitDesugaredType();
8006 const ArrayType *array = split.Ty->getAsArrayTypeUnsafe();
8007 if (!array) break;
8008
8009 type = array->getElementType();
8010 qs.addConsistentQualifiers(qs: split.Quals);
8011 }
8012
8013 return getQualifiedType(T: type, Qs: qs);
8014}
8015
8016/// getConstantArrayElementCount - Returns number of constant array elements.
8017uint64_t
8018ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const {
8019 uint64_t ElementCount = 1;
8020 do {
8021 ElementCount *= CA->getZExtSize();
8022 CA = dyn_cast_or_null<ConstantArrayType>(
8023 Val: CA->getElementType()->getAsArrayTypeUnsafe());
8024 } while (CA);
8025 return ElementCount;
8026}
8027
8028uint64_t ASTContext::getArrayInitLoopExprElementCount(
8029 const ArrayInitLoopExpr *AILE) const {
8030 if (!AILE)
8031 return 0;
8032
8033 uint64_t ElementCount = 1;
8034
8035 do {
8036 ElementCount *= AILE->getArraySize().getZExtValue();
8037 AILE = dyn_cast<ArrayInitLoopExpr>(Val: AILE->getSubExpr());
8038 } while (AILE);
8039
8040 return ElementCount;
8041}
8042
8043/// getFloatingRank - Return a relative rank for floating point types.
8044/// This routine will assert if passed a built-in type that isn't a float.
8045static FloatingRank getFloatingRank(QualType T) {
8046 if (const auto *CT = T->getAs<ComplexType>())
8047 return getFloatingRank(T: CT->getElementType());
8048
8049 switch (T->castAs<BuiltinType>()->getKind()) {
8050 default: llvm_unreachable("getFloatingRank(): not a floating type");
8051 case BuiltinType::Float16: return Float16Rank;
8052 case BuiltinType::Half: return HalfRank;
8053 case BuiltinType::Float: return FloatRank;
8054 case BuiltinType::Double: return DoubleRank;
8055 case BuiltinType::LongDouble: return LongDoubleRank;
8056 case BuiltinType::Float128: return Float128Rank;
8057 case BuiltinType::BFloat16: return BFloat16Rank;
8058 case BuiltinType::Ibm128: return Ibm128Rank;
8059 }
8060}
8061
8062/// getFloatingTypeOrder - Compare the rank of the two specified floating
8063/// point types, ignoring the domain of the type (i.e. 'double' ==
8064/// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If
8065/// LHS < RHS, return -1.
8066int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const {
8067 FloatingRank LHSR = getFloatingRank(T: LHS);
8068 FloatingRank RHSR = getFloatingRank(T: RHS);
8069
8070 if (LHSR == RHSR)
8071 return 0;
8072 if (LHSR > RHSR)
8073 return 1;
8074 return -1;
8075}
8076
8077int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const {
8078 if (&getFloatTypeSemantics(T: LHS) == &getFloatTypeSemantics(T: RHS))
8079 return 0;
8080 return getFloatingTypeOrder(LHS, RHS);
8081}
8082
8083/// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This
8084/// routine will assert if passed a built-in type that isn't an integer or enum,
8085/// or if it is not canonicalized.
8086unsigned ASTContext::getIntegerRank(const Type *T) const {
8087 assert(T->isCanonicalUnqualified() && "T should be canonicalized");
8088
8089 // Results in this 'losing' to any type of the same size, but winning if
8090 // larger.
8091 if (const auto *EIT = dyn_cast<BitIntType>(Val: T))
8092 return 0 + (EIT->getNumBits() << 3);
8093
8094 switch (cast<BuiltinType>(Val: T)->getKind()) {
8095 default: llvm_unreachable("getIntegerRank(): not a built-in integer");
8096 case BuiltinType::Bool:
8097 return 1 + (getIntWidth(T: BoolTy) << 3);
8098 case BuiltinType::Char_S:
8099 case BuiltinType::Char_U:
8100 case BuiltinType::SChar:
8101 case BuiltinType::UChar:
8102 return 2 + (getIntWidth(T: CharTy) << 3);
8103 case BuiltinType::Short:
8104 case BuiltinType::UShort:
8105 return 3 + (getIntWidth(T: ShortTy) << 3);
8106 case BuiltinType::Int:
8107 case BuiltinType::UInt:
8108 return 4 + (getIntWidth(T: IntTy) << 3);
8109 case BuiltinType::Long:
8110 case BuiltinType::ULong:
8111 return 5 + (getIntWidth(T: LongTy) << 3);
8112 case BuiltinType::LongLong:
8113 case BuiltinType::ULongLong:
8114 return 6 + (getIntWidth(T: LongLongTy) << 3);
8115 case BuiltinType::Int128:
8116 case BuiltinType::UInt128:
8117 return 7 + (getIntWidth(T: Int128Ty) << 3);
8118
8119 // "The ranks of char8_t, char16_t, char32_t, and wchar_t equal the ranks of
8120 // their underlying types" [c++20 conv.rank]
8121 case BuiltinType::Char8:
8122 return getIntegerRank(T: UnsignedCharTy.getTypePtr());
8123 case BuiltinType::Char16:
8124 return getIntegerRank(
8125 T: getFromTargetType(Type: Target->getChar16Type()).getTypePtr());
8126 case BuiltinType::Char32:
8127 return getIntegerRank(
8128 T: getFromTargetType(Type: Target->getChar32Type()).getTypePtr());
8129 case BuiltinType::WChar_S:
8130 case BuiltinType::WChar_U:
8131 return getIntegerRank(
8132 T: getFromTargetType(Type: Target->getWCharType()).getTypePtr());
8133 }
8134}
8135
8136/// Whether this is a promotable bitfield reference according
8137/// to C99 6.3.1.1p2, bullet 2 (and GCC extensions).
8138///
8139/// \returns the type this bit-field will promote to, or NULL if no
8140/// promotion occurs.
8141QualType ASTContext::isPromotableBitField(Expr *E) const {
8142 if (E->isTypeDependent() || E->isValueDependent())
8143 return {};
8144
8145 // C++ [conv.prom]p5:
8146 // If the bit-field has an enumerated type, it is treated as any other
8147 // value of that type for promotion purposes.
8148 if (getLangOpts().CPlusPlus && E->getType()->isEnumeralType())
8149 return {};
8150
8151 // FIXME: We should not do this unless E->refersToBitField() is true. This
8152 // matters in C where getSourceBitField() will find bit-fields for various
8153 // cases where the source expression is not a bit-field designator.
8154
8155 FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields?
8156 if (!Field)
8157 return {};
8158
8159 QualType FT = Field->getType();
8160
8161 uint64_t BitWidth = Field->getBitWidthValue();
8162 uint64_t IntSize = getTypeSize(T: IntTy);
8163 // C++ [conv.prom]p5:
8164 // A prvalue for an integral bit-field can be converted to a prvalue of type
8165 // int if int can represent all the values of the bit-field; otherwise, it
8166 // can be converted to unsigned int if unsigned int can represent all the
8167 // values of the bit-field. If the bit-field is larger yet, no integral
8168 // promotion applies to it.
8169 // C11 6.3.1.1/2:
8170 // [For a bit-field of type _Bool, int, signed int, or unsigned int:]
8171 // If an int can represent all values of the original type (as restricted by
8172 // the width, for a bit-field), the value is converted to an int; otherwise,
8173 // it is converted to an unsigned int.
8174 //
8175 // FIXME: C does not permit promotion of a 'long : 3' bitfield to int.
8176 // We perform that promotion here to match GCC and C++.
8177 // FIXME: C does not permit promotion of an enum bit-field whose rank is
8178 // greater than that of 'int'. We perform that promotion to match GCC.
8179 //
8180 // C23 6.3.1.1p2:
8181 // The value from a bit-field of a bit-precise integer type is converted to
8182 // the corresponding bit-precise integer type. (The rest is the same as in
8183 // C11.)
8184 if (QualType QT = Field->getType(); QT->isBitIntType())
8185 return QT;
8186
8187 if (BitWidth < IntSize)
8188 return IntTy;
8189
8190 if (BitWidth == IntSize)
8191 return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy;
8192
8193 // Bit-fields wider than int are not subject to promotions, and therefore act
8194 // like the base type. GCC has some weird bugs in this area that we
8195 // deliberately do not follow (GCC follows a pre-standard resolution to
8196 // C's DR315 which treats bit-width as being part of the type, and this leaks
8197 // into their semantics in some cases).
8198 return {};
8199}
8200
8201/// getPromotedIntegerType - Returns the type that Promotable will
8202/// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable
8203/// integer type.
8204QualType ASTContext::getPromotedIntegerType(QualType Promotable) const {
8205 assert(!Promotable.isNull());
8206 assert(isPromotableIntegerType(Promotable));
8207 if (const auto *ET = Promotable->getAs<EnumType>())
8208 return ET->getDecl()->getPromotionType();
8209
8210 if (const auto *BT = Promotable->getAs<BuiltinType>()) {
8211 // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t
8212 // (3.9.1) can be converted to a prvalue of the first of the following
8213 // types that can represent all the values of its underlying type:
8214 // int, unsigned int, long int, unsigned long int, long long int, or
8215 // unsigned long long int [...]
8216 // FIXME: Is there some better way to compute this?
8217 if (BT->getKind() == BuiltinType::WChar_S ||
8218 BT->getKind() == BuiltinType::WChar_U ||
8219 BT->getKind() == BuiltinType::Char8 ||
8220 BT->getKind() == BuiltinType::Char16 ||
8221 BT->getKind() == BuiltinType::Char32) {
8222 bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S;
8223 uint64_t FromSize = getTypeSize(T: BT);
8224 QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy,
8225 LongLongTy, UnsignedLongLongTy };
8226 for (const auto &PT : PromoteTypes) {
8227 uint64_t ToSize = getTypeSize(T: PT);
8228 if (FromSize < ToSize ||
8229 (FromSize == ToSize && FromIsSigned == PT->isSignedIntegerType()))
8230 return PT;
8231 }
8232 llvm_unreachable("char type should fit into long long");
8233 }
8234 }
8235
8236 // At this point, we should have a signed or unsigned integer type.
8237 if (Promotable->isSignedIntegerType())
8238 return IntTy;
8239 uint64_t PromotableSize = getIntWidth(T: Promotable);
8240 uint64_t IntSize = getIntWidth(T: IntTy);
8241 assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize);
8242 return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy;
8243}
8244
8245/// Recurses in pointer/array types until it finds an objc retainable
8246/// type and returns its ownership.
8247Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const {
8248 while (!T.isNull()) {
8249 if (T.getObjCLifetime() != Qualifiers::OCL_None)
8250 return T.getObjCLifetime();
8251 if (T->isArrayType())
8252 T = getBaseElementType(type: T);
8253 else if (const auto *PT = T->getAs<PointerType>())
8254 T = PT->getPointeeType();
8255 else if (const auto *RT = T->getAs<ReferenceType>())
8256 T = RT->getPointeeType();
8257 else
8258 break;
8259 }
8260
8261 return Qualifiers::OCL_None;
8262}
8263
8264static const Type *getIntegerTypeForEnum(const EnumType *ET) {
8265 // Incomplete enum types are not treated as integer types.
8266 // FIXME: In C++, enum types are never integer types.
8267 if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped())
8268 return ET->getDecl()->getIntegerType().getTypePtr();
8269 return nullptr;
8270}
8271
8272/// getIntegerTypeOrder - Returns the highest ranked integer type:
8273/// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If
8274/// LHS < RHS, return -1.
8275int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const {
8276 const Type *LHSC = getCanonicalType(T: LHS).getTypePtr();
8277 const Type *RHSC = getCanonicalType(T: RHS).getTypePtr();
8278
8279 // Unwrap enums to their underlying type.
8280 if (const auto *ET = dyn_cast<EnumType>(Val: LHSC))
8281 LHSC = getIntegerTypeForEnum(ET);
8282 if (const auto *ET = dyn_cast<EnumType>(Val: RHSC))
8283 RHSC = getIntegerTypeForEnum(ET);
8284
8285 if (LHSC == RHSC) return 0;
8286
8287 bool LHSUnsigned = LHSC->isUnsignedIntegerType();
8288 bool RHSUnsigned = RHSC->isUnsignedIntegerType();
8289
8290 unsigned LHSRank = getIntegerRank(T: LHSC);
8291 unsigned RHSRank = getIntegerRank(T: RHSC);
8292
8293 if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned.
8294 if (LHSRank == RHSRank) return 0;
8295 return LHSRank > RHSRank ? 1 : -1;
8296 }
8297
8298 // Otherwise, the LHS is signed and the RHS is unsigned or visa versa.
8299 if (LHSUnsigned) {
8300 // If the unsigned [LHS] type is larger, return it.
8301 if (LHSRank >= RHSRank)
8302 return 1;
8303
8304 // If the signed type can represent all values of the unsigned type, it
8305 // wins. Because we are dealing with 2's complement and types that are
8306 // powers of two larger than each other, this is always safe.
8307 return -1;
8308 }
8309
8310 // If the unsigned [RHS] type is larger, return it.
8311 if (RHSRank >= LHSRank)
8312 return -1;
8313
8314 // If the signed type can represent all values of the unsigned type, it
8315 // wins. Because we are dealing with 2's complement and types that are
8316 // powers of two larger than each other, this is always safe.
8317 return 1;
8318}
8319
8320TypedefDecl *ASTContext::getCFConstantStringDecl() const {
8321 if (CFConstantStringTypeDecl)
8322 return CFConstantStringTypeDecl;
8323
8324 assert(!CFConstantStringTagDecl &&
8325 "tag and typedef should be initialized together");
8326 CFConstantStringTagDecl = buildImplicitRecord(Name: "__NSConstantString_tag");
8327 CFConstantStringTagDecl->startDefinition();
8328
8329 struct {
8330 QualType Type;
8331 const char *Name;
8332 } Fields[5];
8333 unsigned Count = 0;
8334
8335 /// Objective-C ABI
8336 ///
8337 /// typedef struct __NSConstantString_tag {
8338 /// const int *isa;
8339 /// int flags;
8340 /// const char *str;
8341 /// long length;
8342 /// } __NSConstantString;
8343 ///
8344 /// Swift ABI (4.1, 4.2)
8345 ///
8346 /// typedef struct __NSConstantString_tag {
8347 /// uintptr_t _cfisa;
8348 /// uintptr_t _swift_rc;
8349 /// _Atomic(uint64_t) _cfinfoa;
8350 /// const char *_ptr;
8351 /// uint32_t _length;
8352 /// } __NSConstantString;
8353 ///
8354 /// Swift ABI (5.0)
8355 ///
8356 /// typedef struct __NSConstantString_tag {
8357 /// uintptr_t _cfisa;
8358 /// uintptr_t _swift_rc;
8359 /// _Atomic(uint64_t) _cfinfoa;
8360 /// const char *_ptr;
8361 /// uintptr_t _length;
8362 /// } __NSConstantString;
8363
8364 const auto CFRuntime = getLangOpts().CFRuntime;
8365 if (static_cast<unsigned>(CFRuntime) <
8366 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift)) {
8367 Fields[Count++] = { .Type: getPointerType(T: IntTy.withConst()), .Name: "isa" };
8368 Fields[Count++] = { .Type: IntTy, .Name: "flags" };
8369 Fields[Count++] = { .Type: getPointerType(T: CharTy.withConst()), .Name: "str" };
8370 Fields[Count++] = { .Type: LongTy, .Name: "length" };
8371 } else {
8372 Fields[Count++] = { .Type: getUIntPtrType(), .Name: "_cfisa" };
8373 Fields[Count++] = { .Type: getUIntPtrType(), .Name: "_swift_rc" };
8374 Fields[Count++] = { .Type: getFromTargetType(Type: Target->getUInt64Type()), .Name: "_swift_rc" };
8375 Fields[Count++] = { .Type: getPointerType(T: CharTy.withConst()), .Name: "_ptr" };
8376 if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 ||
8377 CFRuntime == LangOptions::CoreFoundationABI::Swift4_2)
8378 Fields[Count++] = { .Type: IntTy, .Name: "_ptr" };
8379 else
8380 Fields[Count++] = { .Type: getUIntPtrType(), .Name: "_ptr" };
8381 }
8382
8383 // Create fields
8384 for (unsigned i = 0; i < Count; ++i) {
8385 FieldDecl *Field =
8386 FieldDecl::Create(C: *this, DC: CFConstantStringTagDecl, StartLoc: SourceLocation(),
8387 IdLoc: SourceLocation(), Id: &Idents.get(Name: Fields[i].Name),
8388 T: Fields[i].Type, /*TInfo=*/nullptr,
8389 /*BitWidth=*/BW: nullptr, /*Mutable=*/false, InitStyle: ICIS_NoInit);
8390 Field->setAccess(AS_public);
8391 CFConstantStringTagDecl->addDecl(D: Field);
8392 }
8393
8394 CFConstantStringTagDecl->completeDefinition();
8395 // This type is designed to be compatible with NSConstantString, but cannot
8396 // use the same name, since NSConstantString is an interface.
8397 auto tagType = getTagDeclType(Decl: CFConstantStringTagDecl);
8398 CFConstantStringTypeDecl =
8399 buildImplicitTypedef(T: tagType, Name: "__NSConstantString");
8400
8401 return CFConstantStringTypeDecl;
8402}
8403
8404RecordDecl *ASTContext::getCFConstantStringTagDecl() const {
8405 if (!CFConstantStringTagDecl)
8406 getCFConstantStringDecl(); // Build the tag and the typedef.
8407 return CFConstantStringTagDecl;
8408}
8409
8410// getCFConstantStringType - Return the type used for constant CFStrings.
8411QualType ASTContext::getCFConstantStringType() const {
8412 return getTypedefType(Decl: getCFConstantStringDecl());
8413}
8414
8415QualType ASTContext::getObjCSuperType() const {
8416 if (ObjCSuperType.isNull()) {
8417 RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord(Name: "objc_super");
8418 getTranslationUnitDecl()->addDecl(D: ObjCSuperTypeDecl);
8419 ObjCSuperType = getTagDeclType(Decl: ObjCSuperTypeDecl);
8420 }
8421 return ObjCSuperType;
8422}
8423
8424void ASTContext::setCFConstantStringType(QualType T) {
8425 const auto *TD = T->castAs<TypedefType>();
8426 CFConstantStringTypeDecl = cast<TypedefDecl>(Val: TD->getDecl());
8427 const auto *TagType = TD->castAs<RecordType>();
8428 CFConstantStringTagDecl = TagType->getDecl();
8429}
8430
8431QualType ASTContext::getBlockDescriptorType() const {
8432 if (BlockDescriptorType)
8433 return getTagDeclType(Decl: BlockDescriptorType);
8434
8435 RecordDecl *RD;
8436 // FIXME: Needs the FlagAppleBlock bit.
8437 RD = buildImplicitRecord(Name: "__block_descriptor");
8438 RD->startDefinition();
8439
8440 QualType FieldTypes[] = {
8441 UnsignedLongTy,
8442 UnsignedLongTy,
8443 };
8444
8445 static const char *const FieldNames[] = {
8446 "reserved",
8447 "Size"
8448 };
8449
8450 for (size_t i = 0; i < 2; ++i) {
8451 FieldDecl *Field = FieldDecl::Create(
8452 C: *this, DC: RD, StartLoc: SourceLocation(), IdLoc: SourceLocation(),
8453 Id: &Idents.get(Name: FieldNames[i]), T: FieldTypes[i], /*TInfo=*/nullptr,
8454 /*BitWidth=*/BW: nullptr, /*Mutable=*/false, InitStyle: ICIS_NoInit);
8455 Field->setAccess(AS_public);
8456 RD->addDecl(D: Field);
8457 }
8458
8459 RD->completeDefinition();
8460
8461 BlockDescriptorType = RD;
8462
8463 return getTagDeclType(Decl: BlockDescriptorType);
8464}
8465
8466QualType ASTContext::getBlockDescriptorExtendedType() const {
8467 if (BlockDescriptorExtendedType)
8468 return getTagDeclType(Decl: BlockDescriptorExtendedType);
8469
8470 RecordDecl *RD;
8471 // FIXME: Needs the FlagAppleBlock bit.
8472 RD = buildImplicitRecord(Name: "__block_descriptor_withcopydispose");
8473 RD->startDefinition();
8474
8475 QualType FieldTypes[] = {
8476 UnsignedLongTy,
8477 UnsignedLongTy,
8478 getPointerType(T: VoidPtrTy),
8479 getPointerType(T: VoidPtrTy)
8480 };
8481
8482 static const char *const FieldNames[] = {
8483 "reserved",
8484 "Size",
8485 "CopyFuncPtr",
8486 "DestroyFuncPtr"
8487 };
8488
8489 for (size_t i = 0; i < 4; ++i) {
8490 FieldDecl *Field = FieldDecl::Create(
8491 C: *this, DC: RD, StartLoc: SourceLocation(), IdLoc: SourceLocation(),
8492 Id: &Idents.get(Name: FieldNames[i]), T: FieldTypes[i], /*TInfo=*/nullptr,
8493 /*BitWidth=*/BW: nullptr,
8494 /*Mutable=*/false, InitStyle: ICIS_NoInit);
8495 Field->setAccess(AS_public);
8496 RD->addDecl(D: Field);
8497 }
8498
8499 RD->completeDefinition();
8500
8501 BlockDescriptorExtendedType = RD;
8502 return getTagDeclType(Decl: BlockDescriptorExtendedType);
8503}
8504
8505OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const {
8506 const auto *BT = dyn_cast<BuiltinType>(Val: T);
8507
8508 if (!BT) {
8509 if (isa<PipeType>(Val: T))
8510 return OCLTK_Pipe;
8511
8512 return OCLTK_Default;
8513 }
8514
8515 switch (BT->getKind()) {
8516#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
8517 case BuiltinType::Id: \
8518 return OCLTK_Image;
8519#include "clang/Basic/OpenCLImageTypes.def"
8520
8521 case BuiltinType::OCLClkEvent:
8522 return OCLTK_ClkEvent;
8523
8524 case BuiltinType::OCLEvent:
8525 return OCLTK_Event;
8526
8527 case BuiltinType::OCLQueue:
8528 return OCLTK_Queue;
8529
8530 case BuiltinType::OCLReserveID:
8531 return OCLTK_ReserveID;
8532
8533 case BuiltinType::OCLSampler:
8534 return OCLTK_Sampler;
8535
8536 default:
8537 return OCLTK_Default;
8538 }
8539}
8540
8541LangAS ASTContext::getOpenCLTypeAddrSpace(const Type *T) const {
8542 return Target->getOpenCLTypeAddrSpace(TK: getOpenCLTypeKind(T));
8543}
8544
8545/// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty"
8546/// requires copy/dispose. Note that this must match the logic
8547/// in buildByrefHelpers.
8548bool ASTContext::BlockRequiresCopying(QualType Ty,
8549 const VarDecl *D) {
8550 if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) {
8551 const Expr *copyExpr = getBlockVarCopyInit(VD: D).getCopyExpr();
8552 if (!copyExpr && record->hasTrivialDestructor()) return false;
8553
8554 return true;
8555 }
8556
8557 if (Ty.hasAddressDiscriminatedPointerAuth())
8558 return true;
8559
8560 // The block needs copy/destroy helpers if Ty is non-trivial to destructively
8561 // move or destroy.
8562 if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType())
8563 return true;
8564
8565 if (!Ty->isObjCRetainableType()) return false;
8566
8567 Qualifiers qs = Ty.getQualifiers();
8568
8569 // If we have lifetime, that dominates.
8570 if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) {
8571 switch (lifetime) {
8572 case Qualifiers::OCL_None: llvm_unreachable("impossible");
8573
8574 // These are just bits as far as the runtime is concerned.
8575 case Qualifiers::OCL_ExplicitNone:
8576 case Qualifiers::OCL_Autoreleasing:
8577 return false;
8578
8579 // These cases should have been taken care of when checking the type's
8580 // non-triviality.
8581 case Qualifiers::OCL_Weak:
8582 case Qualifiers::OCL_Strong:
8583 llvm_unreachable("impossible");
8584 }
8585 llvm_unreachable("fell out of lifetime switch!");
8586 }
8587 return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) ||
8588 Ty->isObjCObjectPointerType());
8589}
8590
8591bool ASTContext::getByrefLifetime(QualType Ty,
8592 Qualifiers::ObjCLifetime &LifeTime,
8593 bool &HasByrefExtendedLayout) const {
8594 if (!getLangOpts().ObjC ||
8595 getLangOpts().getGC() != LangOptions::NonGC)
8596 return false;
8597
8598 HasByrefExtendedLayout = false;
8599 if (Ty->isRecordType()) {
8600 HasByrefExtendedLayout = true;
8601 LifeTime = Qualifiers::OCL_None;
8602 } else if ((LifeTime = Ty.getObjCLifetime())) {
8603 // Honor the ARC qualifiers.
8604 } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) {
8605 // The MRR rule.
8606 LifeTime = Qualifiers::OCL_ExplicitNone;
8607 } else {
8608 LifeTime = Qualifiers::OCL_None;
8609 }
8610 return true;
8611}
8612
8613CanQualType ASTContext::getNSUIntegerType() const {
8614 assert(Target && "Expected target to be initialized");
8615 const llvm::Triple &T = Target->getTriple();
8616 // Windows is LLP64 rather than LP64
8617 if (T.isOSWindows() && T.isArch64Bit())
8618 return UnsignedLongLongTy;
8619 return UnsignedLongTy;
8620}
8621
8622CanQualType ASTContext::getNSIntegerType() const {
8623 assert(Target && "Expected target to be initialized");
8624 const llvm::Triple &T = Target->getTriple();
8625 // Windows is LLP64 rather than LP64
8626 if (T.isOSWindows() && T.isArch64Bit())
8627 return LongLongTy;
8628 return LongTy;
8629}
8630
8631TypedefDecl *ASTContext::getObjCInstanceTypeDecl() {
8632 if (!ObjCInstanceTypeDecl)
8633 ObjCInstanceTypeDecl =
8634 buildImplicitTypedef(T: getObjCIdType(), Name: "instancetype");
8635 return ObjCInstanceTypeDecl;
8636}
8637
8638// This returns true if a type has been typedefed to BOOL:
8639// typedef <type> BOOL;
8640static bool isTypeTypedefedAsBOOL(QualType T) {
8641 if (const auto *TT = dyn_cast<TypedefType>(Val&: T))
8642 if (IdentifierInfo *II = TT->getDecl()->getIdentifier())
8643 return II->isStr(Str: "BOOL");
8644
8645 return false;
8646}
8647
8648/// getObjCEncodingTypeSize returns size of type for objective-c encoding
8649/// purpose.
8650CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const {
8651 if (!type->isIncompleteArrayType() && type->isIncompleteType())
8652 return CharUnits::Zero();
8653
8654 CharUnits sz = getTypeSizeInChars(T: type);
8655
8656 // Make all integer and enum types at least as large as an int
8657 if (sz.isPositive() && type->isIntegralOrEnumerationType())
8658 sz = std::max(a: sz, b: getTypeSizeInChars(T: IntTy));
8659 // Treat arrays as pointers, since that's how they're passed in.
8660 else if (type->isArrayType())
8661 sz = getTypeSizeInChars(T: VoidPtrTy);
8662 return sz;
8663}
8664
8665bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const {
8666 return getTargetInfo().getCXXABI().isMicrosoft() &&
8667 VD->isStaticDataMember() &&
8668 VD->getType()->isIntegralOrEnumerationType() &&
8669 !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit();
8670}
8671
8672ASTContext::InlineVariableDefinitionKind
8673ASTContext::getInlineVariableDefinitionKind(const VarDecl *VD) const {
8674 if (!VD->isInline())
8675 return InlineVariableDefinitionKind::None;
8676
8677 // In almost all cases, it's a weak definition.
8678 auto *First = VD->getFirstDecl();
8679 if (First->isInlineSpecified() || !First->isStaticDataMember())
8680 return InlineVariableDefinitionKind::Weak;
8681
8682 // If there's a file-context declaration in this translation unit, it's a
8683 // non-discardable definition.
8684 for (auto *D : VD->redecls())
8685 if (D->getLexicalDeclContext()->isFileContext() &&
8686 !D->isInlineSpecified() && (D->isConstexpr() || First->isConstexpr()))
8687 return InlineVariableDefinitionKind::Strong;
8688
8689 // If we've not seen one yet, we don't know.
8690 return InlineVariableDefinitionKind::WeakUnknown;
8691}
8692
8693static std::string charUnitsToString(const CharUnits &CU) {
8694 return llvm::itostr(X: CU.getQuantity());
8695}
8696
8697/// getObjCEncodingForBlock - Return the encoded type for this block
8698/// declaration.
8699std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const {
8700 std::string S;
8701
8702 const BlockDecl *Decl = Expr->getBlockDecl();
8703 QualType BlockTy =
8704 Expr->getType()->castAs<BlockPointerType>()->getPointeeType();
8705 QualType BlockReturnTy = BlockTy->castAs<FunctionType>()->getReturnType();
8706 // Encode result type.
8707 if (getLangOpts().EncodeExtendedBlockSig)
8708 getObjCEncodingForMethodParameter(QT: Decl::OBJC_TQ_None, T: BlockReturnTy, S,
8709 Extended: true /*Extended*/);
8710 else
8711 getObjCEncodingForType(T: BlockReturnTy, S);
8712 // Compute size of all parameters.
8713 // Start with computing size of a pointer in number of bytes.
8714 // FIXME: There might(should) be a better way of doing this computation!
8715 CharUnits PtrSize = getTypeSizeInChars(T: VoidPtrTy);
8716 CharUnits ParmOffset = PtrSize;
8717 for (auto *PI : Decl->parameters()) {
8718 QualType PType = PI->getType();
8719 CharUnits sz = getObjCEncodingTypeSize(type: PType);
8720 if (sz.isZero())
8721 continue;
8722 assert(sz.isPositive() && "BlockExpr - Incomplete param type");
8723 ParmOffset += sz;
8724 }
8725 // Size of the argument frame
8726 S += charUnitsToString(CU: ParmOffset);
8727 // Block pointer and offset.
8728 S += "@?0";
8729
8730 // Argument types.
8731 ParmOffset = PtrSize;
8732 for (auto *PVDecl : Decl->parameters()) {
8733 QualType PType = PVDecl->getOriginalType();
8734 if (const auto *AT =
8735 dyn_cast<ArrayType>(Val: PType->getCanonicalTypeInternal())) {
8736 // Use array's original type only if it has known number of
8737 // elements.
8738 if (!isa<ConstantArrayType>(Val: AT))
8739 PType = PVDecl->getType();
8740 } else if (PType->isFunctionType())
8741 PType = PVDecl->getType();
8742 if (getLangOpts().EncodeExtendedBlockSig)
8743 getObjCEncodingForMethodParameter(QT: Decl::OBJC_TQ_None, T: PType,
8744 S, Extended: true /*Extended*/);
8745 else
8746 getObjCEncodingForType(T: PType, S);
8747 S += charUnitsToString(CU: ParmOffset);
8748 ParmOffset += getObjCEncodingTypeSize(type: PType);
8749 }
8750
8751 return S;
8752}
8753
8754std::string
8755ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const {
8756 std::string S;
8757 // Encode result type.
8758 getObjCEncodingForType(T: Decl->getReturnType(), S);
8759 CharUnits ParmOffset;
8760 // Compute size of all parameters.
8761 for (auto *PI : Decl->parameters()) {
8762 QualType PType = PI->getType();
8763 CharUnits sz = getObjCEncodingTypeSize(type: PType);
8764 if (sz.isZero())
8765 continue;
8766
8767 assert(sz.isPositive() &&
8768 "getObjCEncodingForFunctionDecl - Incomplete param type");
8769 ParmOffset += sz;
8770 }
8771 S += charUnitsToString(CU: ParmOffset);
8772 ParmOffset = CharUnits::Zero();
8773
8774 // Argument types.
8775 for (auto *PVDecl : Decl->parameters()) {
8776 QualType PType = PVDecl->getOriginalType();
8777 if (const auto *AT =
8778 dyn_cast<ArrayType>(Val: PType->getCanonicalTypeInternal())) {
8779 // Use array's original type only if it has known number of
8780 // elements.
8781 if (!isa<ConstantArrayType>(Val: AT))
8782 PType = PVDecl->getType();
8783 } else if (PType->isFunctionType())
8784 PType = PVDecl->getType();
8785 getObjCEncodingForType(T: PType, S);
8786 S += charUnitsToString(CU: ParmOffset);
8787 ParmOffset += getObjCEncodingTypeSize(type: PType);
8788 }
8789
8790 return S;
8791}
8792
8793/// getObjCEncodingForMethodParameter - Return the encoded type for a single
8794/// method parameter or return type. If Extended, include class names and
8795/// block object types.
8796void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT,
8797 QualType T, std::string& S,
8798 bool Extended) const {
8799 // Encode type qualifier, 'in', 'inout', etc. for the parameter.
8800 getObjCEncodingForTypeQualifier(QT, S);
8801 // Encode parameter type.
8802 ObjCEncOptions Options = ObjCEncOptions()
8803 .setExpandPointedToStructures()
8804 .setExpandStructures()
8805 .setIsOutermostType();
8806 if (Extended)
8807 Options.setEncodeBlockParameters().setEncodeClassNames();
8808 getObjCEncodingForTypeImpl(t: T, S, Options, /*Field=*/nullptr);
8809}
8810
8811/// getObjCEncodingForMethodDecl - Return the encoded type for this method
8812/// declaration.
8813std::string ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl,
8814 bool Extended) const {
8815 // FIXME: This is not very efficient.
8816 // Encode return type.
8817 std::string S;
8818 getObjCEncodingForMethodParameter(QT: Decl->getObjCDeclQualifier(),
8819 T: Decl->getReturnType(), S, Extended);
8820 // Compute size of all parameters.
8821 // Start with computing size of a pointer in number of bytes.
8822 // FIXME: There might(should) be a better way of doing this computation!
8823 CharUnits PtrSize = getTypeSizeInChars(T: VoidPtrTy);
8824 // The first two arguments (self and _cmd) are pointers; account for
8825 // their size.
8826 CharUnits ParmOffset = 2 * PtrSize;
8827 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(),
8828 E = Decl->sel_param_end(); PI != E; ++PI) {
8829 QualType PType = (*PI)->getType();
8830 CharUnits sz = getObjCEncodingTypeSize(type: PType);
8831 if (sz.isZero())
8832 continue;
8833
8834 assert(sz.isPositive() &&
8835 "getObjCEncodingForMethodDecl - Incomplete param type");
8836 ParmOffset += sz;
8837 }
8838 S += charUnitsToString(CU: ParmOffset);
8839 S += "@0:";
8840 S += charUnitsToString(CU: PtrSize);
8841
8842 // Argument types.
8843 ParmOffset = 2 * PtrSize;
8844 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(),
8845 E = Decl->sel_param_end(); PI != E; ++PI) {
8846 const ParmVarDecl *PVDecl = *PI;
8847 QualType PType = PVDecl->getOriginalType();
8848 if (const auto *AT =
8849 dyn_cast<ArrayType>(Val: PType->getCanonicalTypeInternal())) {
8850 // Use array's original type only if it has known number of
8851 // elements.
8852 if (!isa<ConstantArrayType>(Val: AT))
8853 PType = PVDecl->getType();
8854 } else if (PType->isFunctionType())
8855 PType = PVDecl->getType();
8856 getObjCEncodingForMethodParameter(QT: PVDecl->getObjCDeclQualifier(),
8857 T: PType, S, Extended);
8858 S += charUnitsToString(CU: ParmOffset);
8859 ParmOffset += getObjCEncodingTypeSize(type: PType);
8860 }
8861
8862 return S;
8863}
8864
8865ObjCPropertyImplDecl *
8866ASTContext::getObjCPropertyImplDeclForPropertyDecl(
8867 const ObjCPropertyDecl *PD,
8868 const Decl *Container) const {
8869 if (!Container)
8870 return nullptr;
8871 if (const auto *CID = dyn_cast<ObjCCategoryImplDecl>(Val: Container)) {
8872 for (auto *PID : CID->property_impls())
8873 if (PID->getPropertyDecl() == PD)
8874 return PID;
8875 } else {
8876 const auto *OID = cast<ObjCImplementationDecl>(Val: Container);
8877 for (auto *PID : OID->property_impls())
8878 if (PID->getPropertyDecl() == PD)
8879 return PID;
8880 }
8881 return nullptr;
8882}
8883
8884/// getObjCEncodingForPropertyDecl - Return the encoded type for this
8885/// property declaration. If non-NULL, Container must be either an
8886/// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be
8887/// NULL when getting encodings for protocol properties.
8888/// Property attributes are stored as a comma-delimited C string. The simple
8889/// attributes readonly and bycopy are encoded as single characters. The
8890/// parametrized attributes, getter=name, setter=name, and ivar=name, are
8891/// encoded as single characters, followed by an identifier. Property types
8892/// are also encoded as a parametrized attribute. The characters used to encode
8893/// these attributes are defined by the following enumeration:
8894/// @code
8895/// enum PropertyAttributes {
8896/// kPropertyReadOnly = 'R', // property is read-only.
8897/// kPropertyBycopy = 'C', // property is a copy of the value last assigned
8898/// kPropertyByref = '&', // property is a reference to the value last assigned
8899/// kPropertyDynamic = 'D', // property is dynamic
8900/// kPropertyGetter = 'G', // followed by getter selector name
8901/// kPropertySetter = 'S', // followed by setter selector name
8902/// kPropertyInstanceVariable = 'V' // followed by instance variable name
8903/// kPropertyType = 'T' // followed by old-style type encoding.
8904/// kPropertyWeak = 'W' // 'weak' property
8905/// kPropertyStrong = 'P' // property GC'able
8906/// kPropertyNonAtomic = 'N' // property non-atomic
8907/// kPropertyOptional = '?' // property optional
8908/// };
8909/// @endcode
8910std::string
8911ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
8912 const Decl *Container) const {
8913 // Collect information from the property implementation decl(s).
8914 bool Dynamic = false;
8915 ObjCPropertyImplDecl *SynthesizePID = nullptr;
8916
8917 if (ObjCPropertyImplDecl *PropertyImpDecl =
8918 getObjCPropertyImplDeclForPropertyDecl(PD, Container)) {
8919 if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
8920 Dynamic = true;
8921 else
8922 SynthesizePID = PropertyImpDecl;
8923 }
8924
8925 // FIXME: This is not very efficient.
8926 std::string S = "T";
8927
8928 // Encode result type.
8929 // GCC has some special rules regarding encoding of properties which
8930 // closely resembles encoding of ivars.
8931 getObjCEncodingForPropertyType(T: PD->getType(), S);
8932
8933 if (PD->isOptional())
8934 S += ",?";
8935
8936 if (PD->isReadOnly()) {
8937 S += ",R";
8938 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy)
8939 S += ",C";
8940 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain)
8941 S += ",&";
8942 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak)
8943 S += ",W";
8944 } else {
8945 switch (PD->getSetterKind()) {
8946 case ObjCPropertyDecl::Assign: break;
8947 case ObjCPropertyDecl::Copy: S += ",C"; break;
8948 case ObjCPropertyDecl::Retain: S += ",&"; break;
8949 case ObjCPropertyDecl::Weak: S += ",W"; break;
8950 }
8951 }
8952
8953 // It really isn't clear at all what this means, since properties
8954 // are "dynamic by default".
8955 if (Dynamic)
8956 S += ",D";
8957
8958 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic)
8959 S += ",N";
8960
8961 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) {
8962 S += ",G";
8963 S += PD->getGetterName().getAsString();
8964 }
8965
8966 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) {
8967 S += ",S";
8968 S += PD->getSetterName().getAsString();
8969 }
8970
8971 if (SynthesizePID) {
8972 const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl();
8973 S += ",V";
8974 S += OID->getNameAsString();
8975 }
8976
8977 // FIXME: OBJCGC: weak & strong
8978 return S;
8979}
8980
8981/// getLegacyIntegralTypeEncoding -
8982/// Another legacy compatibility encoding: 32-bit longs are encoded as
8983/// 'l' or 'L' , but not always. For typedefs, we need to use
8984/// 'i' or 'I' instead if encoding a struct field, or a pointer!
8985void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const {
8986 if (PointeeTy->getAs<TypedefType>()) {
8987 if (const auto *BT = PointeeTy->getAs<BuiltinType>()) {
8988 if (BT->getKind() == BuiltinType::ULong && getIntWidth(T: PointeeTy) == 32)
8989 PointeeTy = UnsignedIntTy;
8990 else
8991 if (BT->getKind() == BuiltinType::Long && getIntWidth(T: PointeeTy) == 32)
8992 PointeeTy = IntTy;
8993 }
8994 }
8995}
8996
8997void ASTContext::getObjCEncodingForType(QualType T, std::string& S,
8998 const FieldDecl *Field,
8999 QualType *NotEncodedT) const {
9000 // We follow the behavior of gcc, expanding structures which are
9001 // directly pointed to, and expanding embedded structures. Note that
9002 // these rules are sufficient to prevent recursive encoding of the
9003 // same type.
9004 getObjCEncodingForTypeImpl(t: T, S,
9005 Options: ObjCEncOptions()
9006 .setExpandPointedToStructures()
9007 .setExpandStructures()
9008 .setIsOutermostType(),
9009 Field, NotEncodedT);
9010}
9011
9012void ASTContext::getObjCEncodingForPropertyType(QualType T,
9013 std::string& S) const {
9014 // Encode result type.
9015 // GCC has some special rules regarding encoding of properties which
9016 // closely resembles encoding of ivars.
9017 getObjCEncodingForTypeImpl(t: T, S,
9018 Options: ObjCEncOptions()
9019 .setExpandPointedToStructures()
9020 .setExpandStructures()
9021 .setIsOutermostType()
9022 .setEncodingProperty(),
9023 /*Field=*/nullptr);
9024}
9025
9026static char getObjCEncodingForPrimitiveType(const ASTContext *C,
9027 const BuiltinType *BT) {
9028 BuiltinType::Kind kind = BT->getKind();
9029 switch (kind) {
9030 case BuiltinType::Void: return 'v';
9031 case BuiltinType::Bool: return 'B';
9032 case BuiltinType::Char8:
9033 case BuiltinType::Char_U:
9034 case BuiltinType::UChar: return 'C';
9035 case BuiltinType::Char16:
9036 case BuiltinType::UShort: return 'S';
9037 case BuiltinType::Char32:
9038 case BuiltinType::UInt: return 'I';
9039 case BuiltinType::ULong:
9040 return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q';
9041 case BuiltinType::UInt128: return 'T';
9042 case BuiltinType::ULongLong: return 'Q';
9043 case BuiltinType::Char_S:
9044 case BuiltinType::SChar: return 'c';
9045 case BuiltinType::Short: return 's';
9046 case BuiltinType::WChar_S:
9047 case BuiltinType::WChar_U:
9048 case BuiltinType::Int: return 'i';
9049 case BuiltinType::Long:
9050 return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q';
9051 case BuiltinType::LongLong: return 'q';
9052 case BuiltinType::Int128: return 't';
9053 case BuiltinType::Float: return 'f';
9054 case BuiltinType::Double: return 'd';
9055 case BuiltinType::LongDouble: return 'D';
9056 case BuiltinType::NullPtr: return '*'; // like char*
9057
9058 case BuiltinType::BFloat16:
9059 case BuiltinType::Float16:
9060 case BuiltinType::Float128:
9061 case BuiltinType::Ibm128:
9062 case BuiltinType::Half:
9063 case BuiltinType::ShortAccum:
9064 case BuiltinType::Accum:
9065 case BuiltinType::LongAccum:
9066 case BuiltinType::UShortAccum:
9067 case BuiltinType::UAccum:
9068 case BuiltinType::ULongAccum:
9069 case BuiltinType::ShortFract:
9070 case BuiltinType::Fract:
9071 case BuiltinType::LongFract:
9072 case BuiltinType::UShortFract:
9073 case BuiltinType::UFract:
9074 case BuiltinType::ULongFract:
9075 case BuiltinType::SatShortAccum:
9076 case BuiltinType::SatAccum:
9077 case BuiltinType::SatLongAccum:
9078 case BuiltinType::SatUShortAccum:
9079 case BuiltinType::SatUAccum:
9080 case BuiltinType::SatULongAccum:
9081 case BuiltinType::SatShortFract:
9082 case BuiltinType::SatFract:
9083 case BuiltinType::SatLongFract:
9084 case BuiltinType::SatUShortFract:
9085 case BuiltinType::SatUFract:
9086 case BuiltinType::SatULongFract:
9087 // FIXME: potentially need @encodes for these!
9088 return ' ';
9089
9090#define SVE_TYPE(Name, Id, SingletonId) \
9091 case BuiltinType::Id:
9092#include "clang/Basic/AArch64ACLETypes.def"
9093#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
9094#include "clang/Basic/RISCVVTypes.def"
9095#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
9096#include "clang/Basic/WebAssemblyReferenceTypes.def"
9097#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) case BuiltinType::Id:
9098#include "clang/Basic/AMDGPUTypes.def"
9099 {
9100 DiagnosticsEngine &Diags = C->getDiagnostics();
9101 unsigned DiagID = Diags.getCustomDiagID(L: DiagnosticsEngine::Error,
9102 FormatString: "cannot yet @encode type %0");
9103 Diags.Report(DiagID) << BT->getName(Policy: C->getPrintingPolicy());
9104 return ' ';
9105 }
9106
9107 case BuiltinType::ObjCId:
9108 case BuiltinType::ObjCClass:
9109 case BuiltinType::ObjCSel:
9110 llvm_unreachable("@encoding ObjC primitive type");
9111
9112 // OpenCL and placeholder types don't need @encodings.
9113#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
9114 case BuiltinType::Id:
9115#include "clang/Basic/OpenCLImageTypes.def"
9116#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
9117 case BuiltinType::Id:
9118#include "clang/Basic/OpenCLExtensionTypes.def"
9119 case BuiltinType::OCLEvent:
9120 case BuiltinType::OCLClkEvent:
9121 case BuiltinType::OCLQueue:
9122 case BuiltinType::OCLReserveID:
9123 case BuiltinType::OCLSampler:
9124 case BuiltinType::Dependent:
9125#define PPC_VECTOR_TYPE(Name, Id, Size) \
9126 case BuiltinType::Id:
9127#include "clang/Basic/PPCTypes.def"
9128#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
9129#include "clang/Basic/HLSLIntangibleTypes.def"
9130#define BUILTIN_TYPE(KIND, ID)
9131#define PLACEHOLDER_TYPE(KIND, ID) \
9132 case BuiltinType::KIND:
9133#include "clang/AST/BuiltinTypes.def"
9134 llvm_unreachable("invalid builtin type for @encode");
9135 }
9136 llvm_unreachable("invalid BuiltinType::Kind value");
9137}
9138
9139static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) {
9140 EnumDecl *Enum = ET->getDecl();
9141
9142 // The encoding of an non-fixed enum type is always 'i', regardless of size.
9143 if (!Enum->isFixed())
9144 return 'i';
9145
9146 // The encoding of a fixed enum type matches its fixed underlying type.
9147 const auto *BT = Enum->getIntegerType()->castAs<BuiltinType>();
9148 return getObjCEncodingForPrimitiveType(C, BT);
9149}
9150
9151static void EncodeBitField(const ASTContext *Ctx, std::string& S,
9152 QualType T, const FieldDecl *FD) {
9153 assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl");
9154 S += 'b';
9155 // The NeXT runtime encodes bit fields as b followed by the number of bits.
9156 // The GNU runtime requires more information; bitfields are encoded as b,
9157 // then the offset (in bits) of the first element, then the type of the
9158 // bitfield, then the size in bits. For example, in this structure:
9159 //
9160 // struct
9161 // {
9162 // int integer;
9163 // int flags:2;
9164 // };
9165 // On a 32-bit system, the encoding for flags would be b2 for the NeXT
9166 // runtime, but b32i2 for the GNU runtime. The reason for this extra
9167 // information is not especially sensible, but we're stuck with it for
9168 // compatibility with GCC, although providing it breaks anything that
9169 // actually uses runtime introspection and wants to work on both runtimes...
9170 if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) {
9171 uint64_t Offset;
9172
9173 if (const auto *IVD = dyn_cast<ObjCIvarDecl>(Val: FD)) {
9174 Offset = Ctx->lookupFieldBitOffset(OID: IVD->getContainingInterface(), Ivar: IVD);
9175 } else {
9176 const RecordDecl *RD = FD->getParent();
9177 const ASTRecordLayout &RL = Ctx->getASTRecordLayout(D: RD);
9178 Offset = RL.getFieldOffset(FieldNo: FD->getFieldIndex());
9179 }
9180
9181 S += llvm::utostr(X: Offset);
9182
9183 if (const auto *ET = T->getAs<EnumType>())
9184 S += ObjCEncodingForEnumType(C: Ctx, ET);
9185 else {
9186 const auto *BT = T->castAs<BuiltinType>();
9187 S += getObjCEncodingForPrimitiveType(C: Ctx, BT);
9188 }
9189 }
9190 S += llvm::utostr(X: FD->getBitWidthValue());
9191}
9192
9193// Helper function for determining whether the encoded type string would include
9194// a template specialization type.
9195static bool hasTemplateSpecializationInEncodedString(const Type *T,
9196 bool VisitBasesAndFields) {
9197 T = T->getBaseElementTypeUnsafe();
9198
9199 if (auto *PT = T->getAs<PointerType>())
9200 return hasTemplateSpecializationInEncodedString(
9201 T: PT->getPointeeType().getTypePtr(), VisitBasesAndFields: false);
9202
9203 auto *CXXRD = T->getAsCXXRecordDecl();
9204
9205 if (!CXXRD)
9206 return false;
9207
9208 if (isa<ClassTemplateSpecializationDecl>(Val: CXXRD))
9209 return true;
9210
9211 if (!CXXRD->hasDefinition() || !VisitBasesAndFields)
9212 return false;
9213
9214 for (const auto &B : CXXRD->bases())
9215 if (hasTemplateSpecializationInEncodedString(T: B.getType().getTypePtr(),
9216 VisitBasesAndFields: true))
9217 return true;
9218
9219 for (auto *FD : CXXRD->fields())
9220 if (hasTemplateSpecializationInEncodedString(T: FD->getType().getTypePtr(),
9221 VisitBasesAndFields: true))
9222 return true;
9223
9224 return false;
9225}
9226
9227// FIXME: Use SmallString for accumulating string.
9228void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
9229 const ObjCEncOptions Options,
9230 const FieldDecl *FD,
9231 QualType *NotEncodedT) const {
9232 CanQualType CT = getCanonicalType(T);
9233 switch (CT->getTypeClass()) {
9234 case Type::Builtin:
9235 case Type::Enum:
9236 if (FD && FD->isBitField())
9237 return EncodeBitField(Ctx: this, S, T, FD);
9238 if (const auto *BT = dyn_cast<BuiltinType>(Val&: CT))
9239 S += getObjCEncodingForPrimitiveType(C: this, BT);
9240 else
9241 S += ObjCEncodingForEnumType(C: this, ET: cast<EnumType>(Val&: CT));
9242 return;
9243
9244 case Type::Complex:
9245 S += 'j';
9246 getObjCEncodingForTypeImpl(T: T->castAs<ComplexType>()->getElementType(), S,
9247 Options: ObjCEncOptions(),
9248 /*Field=*/FD: nullptr);
9249 return;
9250
9251 case Type::Atomic:
9252 S += 'A';
9253 getObjCEncodingForTypeImpl(T: T->castAs<AtomicType>()->getValueType(), S,
9254 Options: ObjCEncOptions(),
9255 /*Field=*/FD: nullptr);
9256 return;
9257
9258 // encoding for pointer or reference types.
9259 case Type::Pointer:
9260 case Type::LValueReference:
9261 case Type::RValueReference: {
9262 QualType PointeeTy;
9263 if (isa<PointerType>(Val: CT)) {
9264 const auto *PT = T->castAs<PointerType>();
9265 if (PT->isObjCSelType()) {
9266 S += ':';
9267 return;
9268 }
9269 PointeeTy = PT->getPointeeType();
9270 } else {
9271 PointeeTy = T->castAs<ReferenceType>()->getPointeeType();
9272 }
9273
9274 bool isReadOnly = false;
9275 // For historical/compatibility reasons, the read-only qualifier of the
9276 // pointee gets emitted _before_ the '^'. The read-only qualifier of
9277 // the pointer itself gets ignored, _unless_ we are looking at a typedef!
9278 // Also, do not emit the 'r' for anything but the outermost type!
9279 if (T->getAs<TypedefType>()) {
9280 if (Options.IsOutermostType() && T.isConstQualified()) {
9281 isReadOnly = true;
9282 S += 'r';
9283 }
9284 } else if (Options.IsOutermostType()) {
9285 QualType P = PointeeTy;
9286 while (auto PT = P->getAs<PointerType>())
9287 P = PT->getPointeeType();
9288 if (P.isConstQualified()) {
9289 isReadOnly = true;
9290 S += 'r';
9291 }
9292 }
9293 if (isReadOnly) {
9294 // Another legacy compatibility encoding. Some ObjC qualifier and type
9295 // combinations need to be rearranged.
9296 // Rewrite "in const" from "nr" to "rn"
9297 if (StringRef(S).ends_with(Suffix: "nr"))
9298 S.replace(i1: S.end()-2, i2: S.end(), s: "rn");
9299 }
9300
9301 if (PointeeTy->isCharType()) {
9302 // char pointer types should be encoded as '*' unless it is a
9303 // type that has been typedef'd to 'BOOL'.
9304 if (!isTypeTypedefedAsBOOL(T: PointeeTy)) {
9305 S += '*';
9306 return;
9307 }
9308 } else if (const auto *RTy = PointeeTy->getAs<RecordType>()) {
9309 // GCC binary compat: Need to convert "struct objc_class *" to "#".
9310 if (RTy->getDecl()->getIdentifier() == &Idents.get(Name: "objc_class")) {
9311 S += '#';
9312 return;
9313 }
9314 // GCC binary compat: Need to convert "struct objc_object *" to "@".
9315 if (RTy->getDecl()->getIdentifier() == &Idents.get(Name: "objc_object")) {
9316 S += '@';
9317 return;
9318 }
9319 // If the encoded string for the class includes template names, just emit
9320 // "^v" for pointers to the class.
9321 if (getLangOpts().CPlusPlus &&
9322 (!getLangOpts().EncodeCXXClassTemplateSpec &&
9323 hasTemplateSpecializationInEncodedString(
9324 T: RTy, VisitBasesAndFields: Options.ExpandPointedToStructures()))) {
9325 S += "^v";
9326 return;
9327 }
9328 // fall through...
9329 }
9330 S += '^';
9331 getLegacyIntegralTypeEncoding(PointeeTy);
9332
9333 ObjCEncOptions NewOptions;
9334 if (Options.ExpandPointedToStructures())
9335 NewOptions.setExpandStructures();
9336 getObjCEncodingForTypeImpl(T: PointeeTy, S, Options: NewOptions,
9337 /*Field=*/FD: nullptr, NotEncodedT);
9338 return;
9339 }
9340
9341 case Type::ConstantArray:
9342 case Type::IncompleteArray:
9343 case Type::VariableArray: {
9344 const auto *AT = cast<ArrayType>(Val&: CT);
9345
9346 if (isa<IncompleteArrayType>(Val: AT) && !Options.IsStructField()) {
9347 // Incomplete arrays are encoded as a pointer to the array element.
9348 S += '^';
9349
9350 getObjCEncodingForTypeImpl(
9351 T: AT->getElementType(), S,
9352 Options: Options.keepingOnly(Mask: ObjCEncOptions().setExpandStructures()), FD);
9353 } else {
9354 S += '[';
9355
9356 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: AT))
9357 S += llvm::utostr(X: CAT->getZExtSize());
9358 else {
9359 //Variable length arrays are encoded as a regular array with 0 elements.
9360 assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) &&
9361 "Unknown array type!");
9362 S += '0';
9363 }
9364
9365 getObjCEncodingForTypeImpl(
9366 T: AT->getElementType(), S,
9367 Options: Options.keepingOnly(Mask: ObjCEncOptions().setExpandStructures()), FD,
9368 NotEncodedT);
9369 S += ']';
9370 }
9371 return;
9372 }
9373
9374 case Type::FunctionNoProto:
9375 case Type::FunctionProto:
9376 S += '?';
9377 return;
9378
9379 case Type::Record: {
9380 RecordDecl *RDecl = cast<RecordType>(Val&: CT)->getDecl();
9381 S += RDecl->isUnion() ? '(' : '{';
9382 // Anonymous structures print as '?'
9383 if (const IdentifierInfo *II = RDecl->getIdentifier()) {
9384 S += II->getName();
9385 if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(Val: RDecl)) {
9386 const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
9387 llvm::raw_string_ostream OS(S);
9388 printTemplateArgumentList(OS, Args: TemplateArgs.asArray(),
9389 Policy: getPrintingPolicy());
9390 }
9391 } else {
9392 S += '?';
9393 }
9394 if (Options.ExpandStructures()) {
9395 S += '=';
9396 if (!RDecl->isUnion()) {
9397 getObjCEncodingForStructureImpl(RD: RDecl, S, Field: FD, includeVBases: true, NotEncodedT);
9398 } else {
9399 for (const auto *Field : RDecl->fields()) {
9400 if (FD) {
9401 S += '"';
9402 S += Field->getNameAsString();
9403 S += '"';
9404 }
9405
9406 // Special case bit-fields.
9407 if (Field->isBitField()) {
9408 getObjCEncodingForTypeImpl(T: Field->getType(), S,
9409 Options: ObjCEncOptions().setExpandStructures(),
9410 FD: Field);
9411 } else {
9412 QualType qt = Field->getType();
9413 getLegacyIntegralTypeEncoding(PointeeTy&: qt);
9414 getObjCEncodingForTypeImpl(
9415 T: qt, S,
9416 Options: ObjCEncOptions().setExpandStructures().setIsStructField(), FD,
9417 NotEncodedT);
9418 }
9419 }
9420 }
9421 }
9422 S += RDecl->isUnion() ? ')' : '}';
9423 return;
9424 }
9425
9426 case Type::BlockPointer: {
9427 const auto *BT = T->castAs<BlockPointerType>();
9428 S += "@?"; // Unlike a pointer-to-function, which is "^?".
9429 if (Options.EncodeBlockParameters()) {
9430 const auto *FT = BT->getPointeeType()->castAs<FunctionType>();
9431
9432 S += '<';
9433 // Block return type
9434 getObjCEncodingForTypeImpl(T: FT->getReturnType(), S,
9435 Options: Options.forComponentType(), FD, NotEncodedT);
9436 // Block self
9437 S += "@?";
9438 // Block parameters
9439 if (const auto *FPT = dyn_cast<FunctionProtoType>(Val: FT)) {
9440 for (const auto &I : FPT->param_types())
9441 getObjCEncodingForTypeImpl(T: I, S, Options: Options.forComponentType(), FD,
9442 NotEncodedT);
9443 }
9444 S += '>';
9445 }
9446 return;
9447 }
9448
9449 case Type::ObjCObject: {
9450 // hack to match legacy encoding of *id and *Class
9451 QualType Ty = getObjCObjectPointerType(ObjectT: CT);
9452 if (Ty->isObjCIdType()) {
9453 S += "{objc_object=}";
9454 return;
9455 }
9456 else if (Ty->isObjCClassType()) {
9457 S += "{objc_class=}";
9458 return;
9459 }
9460 // TODO: Double check to make sure this intentionally falls through.
9461 [[fallthrough]];
9462 }
9463
9464 case Type::ObjCInterface: {
9465 // Ignore protocol qualifiers when mangling at this level.
9466 // @encode(class_name)
9467 ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface();
9468 S += '{';
9469 S += OI->getObjCRuntimeNameAsString();
9470 if (Options.ExpandStructures()) {
9471 S += '=';
9472 SmallVector<const ObjCIvarDecl*, 32> Ivars;
9473 DeepCollectObjCIvars(OI, leafClass: true, Ivars);
9474 for (unsigned i = 0, e = Ivars.size(); i != e; ++i) {
9475 const FieldDecl *Field = Ivars[i];
9476 if (Field->isBitField())
9477 getObjCEncodingForTypeImpl(T: Field->getType(), S,
9478 Options: ObjCEncOptions().setExpandStructures(),
9479 FD: Field);
9480 else
9481 getObjCEncodingForTypeImpl(T: Field->getType(), S,
9482 Options: ObjCEncOptions().setExpandStructures(), FD,
9483 NotEncodedT);
9484 }
9485 }
9486 S += '}';
9487 return;
9488 }
9489
9490 case Type::ObjCObjectPointer: {
9491 const auto *OPT = T->castAs<ObjCObjectPointerType>();
9492 if (OPT->isObjCIdType()) {
9493 S += '@';
9494 return;
9495 }
9496
9497 if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) {
9498 // FIXME: Consider if we need to output qualifiers for 'Class<p>'.
9499 // Since this is a binary compatibility issue, need to consult with
9500 // runtime folks. Fortunately, this is a *very* obscure construct.
9501 S += '#';
9502 return;
9503 }
9504
9505 if (OPT->isObjCQualifiedIdType()) {
9506 getObjCEncodingForTypeImpl(
9507 T: getObjCIdType(), S,
9508 Options: Options.keepingOnly(Mask: ObjCEncOptions()
9509 .setExpandPointedToStructures()
9510 .setExpandStructures()),
9511 FD);
9512 if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) {
9513 // Note that we do extended encoding of protocol qualifier list
9514 // Only when doing ivar or property encoding.
9515 S += '"';
9516 for (const auto *I : OPT->quals()) {
9517 S += '<';
9518 S += I->getObjCRuntimeNameAsString();
9519 S += '>';
9520 }
9521 S += '"';
9522 }
9523 return;
9524 }
9525
9526 S += '@';
9527 if (OPT->getInterfaceDecl() &&
9528 (FD || Options.EncodingProperty() || Options.EncodeClassNames())) {
9529 S += '"';
9530 S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString();
9531 for (const auto *I : OPT->quals()) {
9532 S += '<';
9533 S += I->getObjCRuntimeNameAsString();
9534 S += '>';
9535 }
9536 S += '"';
9537 }
9538 return;
9539 }
9540
9541 // gcc just blithely ignores member pointers.
9542 // FIXME: we should do better than that. 'M' is available.
9543 case Type::MemberPointer:
9544 // This matches gcc's encoding, even though technically it is insufficient.
9545 //FIXME. We should do a better job than gcc.
9546 case Type::Vector:
9547 case Type::ExtVector:
9548 // Until we have a coherent encoding of these three types, issue warning.
9549 if (NotEncodedT)
9550 *NotEncodedT = T;
9551 return;
9552
9553 case Type::ConstantMatrix:
9554 if (NotEncodedT)
9555 *NotEncodedT = T;
9556 return;
9557
9558 case Type::BitInt:
9559 if (NotEncodedT)
9560 *NotEncodedT = T;
9561 return;
9562
9563 // We could see an undeduced auto type here during error recovery.
9564 // Just ignore it.
9565 case Type::Auto:
9566 case Type::DeducedTemplateSpecialization:
9567 return;
9568
9569 case Type::HLSLAttributedResource:
9570 case Type::HLSLInlineSpirv:
9571 llvm_unreachable("unexpected type");
9572
9573 case Type::ArrayParameter:
9574 case Type::Pipe:
9575#define ABSTRACT_TYPE(KIND, BASE)
9576#define TYPE(KIND, BASE)
9577#define DEPENDENT_TYPE(KIND, BASE) \
9578 case Type::KIND:
9579#define NON_CANONICAL_TYPE(KIND, BASE) \
9580 case Type::KIND:
9581#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \
9582 case Type::KIND:
9583#include "clang/AST/TypeNodes.inc"
9584 llvm_unreachable("@encode for dependent type!");
9585 }
9586 llvm_unreachable("bad type kind!");
9587}
9588
9589void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
9590 std::string &S,
9591 const FieldDecl *FD,
9592 bool includeVBases,
9593 QualType *NotEncodedT) const {
9594 assert(RDecl && "Expected non-null RecordDecl");
9595 assert(!RDecl->isUnion() && "Should not be called for unions");
9596 if (!RDecl->getDefinition() || RDecl->getDefinition()->isInvalidDecl())
9597 return;
9598
9599 const auto *CXXRec = dyn_cast<CXXRecordDecl>(Val: RDecl);
9600 std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets;
9601 const ASTRecordLayout &layout = getASTRecordLayout(D: RDecl);
9602
9603 if (CXXRec) {
9604 for (const auto &BI : CXXRec->bases()) {
9605 if (!BI.isVirtual()) {
9606 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl();
9607 if (base->isEmpty())
9608 continue;
9609 uint64_t offs = toBits(CharSize: layout.getBaseClassOffset(Base: base));
9610 FieldOrBaseOffsets.insert(position: FieldOrBaseOffsets.upper_bound(x: offs),
9611 x: std::make_pair(x&: offs, y&: base));
9612 }
9613 }
9614 }
9615
9616 for (FieldDecl *Field : RDecl->fields()) {
9617 if (!Field->isZeroLengthBitField() && Field->isZeroSize(Ctx: *this))
9618 continue;
9619 uint64_t offs = layout.getFieldOffset(FieldNo: Field->getFieldIndex());
9620 FieldOrBaseOffsets.insert(position: FieldOrBaseOffsets.upper_bound(x: offs),
9621 x: std::make_pair(x&: offs, y&: Field));
9622 }
9623
9624 if (CXXRec && includeVBases) {
9625 for (const auto &BI : CXXRec->vbases()) {
9626 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl();
9627 if (base->isEmpty())
9628 continue;
9629 uint64_t offs = toBits(CharSize: layout.getVBaseClassOffset(VBase: base));
9630 if (offs >= uint64_t(toBits(CharSize: layout.getNonVirtualSize())) &&
9631 FieldOrBaseOffsets.find(x: offs) == FieldOrBaseOffsets.end())
9632 FieldOrBaseOffsets.insert(position: FieldOrBaseOffsets.end(),
9633 x: std::make_pair(x&: offs, y&: base));
9634 }
9635 }
9636
9637 CharUnits size;
9638 if (CXXRec) {
9639 size = includeVBases ? layout.getSize() : layout.getNonVirtualSize();
9640 } else {
9641 size = layout.getSize();
9642 }
9643
9644#ifndef NDEBUG
9645 uint64_t CurOffs = 0;
9646#endif
9647 std::multimap<uint64_t, NamedDecl *>::iterator
9648 CurLayObj = FieldOrBaseOffsets.begin();
9649
9650 if (CXXRec && CXXRec->isDynamicClass() &&
9651 (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) {
9652 if (FD) {
9653 S += "\"_vptr$";
9654 std::string recname = CXXRec->getNameAsString();
9655 if (recname.empty()) recname = "?";
9656 S += recname;
9657 S += '"';
9658 }
9659 S += "^^?";
9660#ifndef NDEBUG
9661 CurOffs += getTypeSize(VoidPtrTy);
9662#endif
9663 }
9664
9665 if (!RDecl->hasFlexibleArrayMember()) {
9666 // Mark the end of the structure.
9667 uint64_t offs = toBits(CharSize: size);
9668 FieldOrBaseOffsets.insert(position: FieldOrBaseOffsets.upper_bound(x: offs),
9669 x: std::make_pair(x&: offs, y: nullptr));
9670 }
9671
9672 for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) {
9673#ifndef NDEBUG
9674 assert(CurOffs <= CurLayObj->first);
9675 if (CurOffs < CurLayObj->first) {
9676 uint64_t padding = CurLayObj->first - CurOffs;
9677 // FIXME: There doesn't seem to be a way to indicate in the encoding that
9678 // packing/alignment of members is different that normal, in which case
9679 // the encoding will be out-of-sync with the real layout.
9680 // If the runtime switches to just consider the size of types without
9681 // taking into account alignment, we could make padding explicit in the
9682 // encoding (e.g. using arrays of chars). The encoding strings would be
9683 // longer then though.
9684 CurOffs += padding;
9685 }
9686#endif
9687
9688 NamedDecl *dcl = CurLayObj->second;
9689 if (!dcl)
9690 break; // reached end of structure.
9691
9692 if (auto *base = dyn_cast<CXXRecordDecl>(Val: dcl)) {
9693 // We expand the bases without their virtual bases since those are going
9694 // in the initial structure. Note that this differs from gcc which
9695 // expands virtual bases each time one is encountered in the hierarchy,
9696 // making the encoding type bigger than it really is.
9697 getObjCEncodingForStructureImpl(RDecl: base, S, FD, /*includeVBases*/false,
9698 NotEncodedT);
9699 assert(!base->isEmpty());
9700#ifndef NDEBUG
9701 CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize());
9702#endif
9703 } else {
9704 const auto *field = cast<FieldDecl>(Val: dcl);
9705 if (FD) {
9706 S += '"';
9707 S += field->getNameAsString();
9708 S += '"';
9709 }
9710
9711 if (field->isBitField()) {
9712 EncodeBitField(Ctx: this, S, T: field->getType(), FD: field);
9713#ifndef NDEBUG
9714 CurOffs += field->getBitWidthValue();
9715#endif
9716 } else {
9717 QualType qt = field->getType();
9718 getLegacyIntegralTypeEncoding(PointeeTy&: qt);
9719 getObjCEncodingForTypeImpl(
9720 T: qt, S, Options: ObjCEncOptions().setExpandStructures().setIsStructField(),
9721 FD, NotEncodedT);
9722#ifndef NDEBUG
9723 CurOffs += getTypeSize(field->getType());
9724#endif
9725 }
9726 }
9727 }
9728}
9729
9730void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT,
9731 std::string& S) const {
9732 if (QT & Decl::OBJC_TQ_In)
9733 S += 'n';
9734 if (QT & Decl::OBJC_TQ_Inout)
9735 S += 'N';
9736 if (QT & Decl::OBJC_TQ_Out)
9737 S += 'o';
9738 if (QT & Decl::OBJC_TQ_Bycopy)
9739 S += 'O';
9740 if (QT & Decl::OBJC_TQ_Byref)
9741 S += 'R';
9742 if (QT & Decl::OBJC_TQ_Oneway)
9743 S += 'V';
9744}
9745
9746TypedefDecl *ASTContext::getObjCIdDecl() const {
9747 if (!ObjCIdDecl) {
9748 QualType T = getObjCObjectType(BaseType: ObjCBuiltinIdTy, Protocols: {}, NumProtocols: {});
9749 T = getObjCObjectPointerType(ObjectT: T);
9750 ObjCIdDecl = buildImplicitTypedef(T, Name: "id");
9751 }
9752 return ObjCIdDecl;
9753}
9754
9755TypedefDecl *ASTContext::getObjCSelDecl() const {
9756 if (!ObjCSelDecl) {
9757 QualType T = getPointerType(T: ObjCBuiltinSelTy);
9758 ObjCSelDecl = buildImplicitTypedef(T, Name: "SEL");
9759 }
9760 return ObjCSelDecl;
9761}
9762
9763TypedefDecl *ASTContext::getObjCClassDecl() const {
9764 if (!ObjCClassDecl) {
9765 QualType T = getObjCObjectType(BaseType: ObjCBuiltinClassTy, Protocols: {}, NumProtocols: {});
9766 T = getObjCObjectPointerType(ObjectT: T);
9767 ObjCClassDecl = buildImplicitTypedef(T, Name: "Class");
9768 }
9769 return ObjCClassDecl;
9770}
9771
9772ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const {
9773 if (!ObjCProtocolClassDecl) {
9774 ObjCProtocolClassDecl
9775 = ObjCInterfaceDecl::Create(C: *this, DC: getTranslationUnitDecl(),
9776 atLoc: SourceLocation(),
9777 Id: &Idents.get(Name: "Protocol"),
9778 /*typeParamList=*/nullptr,
9779 /*PrevDecl=*/nullptr,
9780 ClassLoc: SourceLocation(), isInternal: true);
9781 }
9782
9783 return ObjCProtocolClassDecl;
9784}
9785
9786//===----------------------------------------------------------------------===//
9787// __builtin_va_list Construction Functions
9788//===----------------------------------------------------------------------===//
9789
9790static TypedefDecl *CreateCharPtrNamedVaListDecl(const ASTContext *Context,
9791 StringRef Name) {
9792 // typedef char* __builtin[_ms]_va_list;
9793 QualType T = Context->getPointerType(T: Context->CharTy);
9794 return Context->buildImplicitTypedef(T, Name);
9795}
9796
9797static TypedefDecl *CreateMSVaListDecl(const ASTContext *Context) {
9798 return CreateCharPtrNamedVaListDecl(Context, Name: "__builtin_ms_va_list");
9799}
9800
9801static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) {
9802 return CreateCharPtrNamedVaListDecl(Context, Name: "__builtin_va_list");
9803}
9804
9805static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) {
9806 // typedef void* __builtin_va_list;
9807 QualType T = Context->getPointerType(T: Context->VoidTy);
9808 return Context->buildImplicitTypedef(T, Name: "__builtin_va_list");
9809}
9810
9811static TypedefDecl *
9812CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) {
9813 // struct __va_list
9814 RecordDecl *VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list");
9815 if (Context->getLangOpts().CPlusPlus) {
9816 // namespace std { struct __va_list {
9817 auto *NS = NamespaceDecl::Create(
9818 C&: const_cast<ASTContext &>(*Context), DC: Context->getTranslationUnitDecl(),
9819 /*Inline=*/false, StartLoc: SourceLocation(), IdLoc: SourceLocation(),
9820 Id: &Context->Idents.get(Name: "std"),
9821 /*PrevDecl=*/nullptr, /*Nested=*/false);
9822 NS->setImplicit();
9823 VaListTagDecl->setDeclContext(NS);
9824 }
9825
9826 VaListTagDecl->startDefinition();
9827
9828 const size_t NumFields = 5;
9829 QualType FieldTypes[NumFields];
9830 const char *FieldNames[NumFields];
9831
9832 // void *__stack;
9833 FieldTypes[0] = Context->getPointerType(T: Context->VoidTy);
9834 FieldNames[0] = "__stack";
9835
9836 // void *__gr_top;
9837 FieldTypes[1] = Context->getPointerType(T: Context->VoidTy);
9838 FieldNames[1] = "__gr_top";
9839
9840 // void *__vr_top;
9841 FieldTypes[2] = Context->getPointerType(T: Context->VoidTy);
9842 FieldNames[2] = "__vr_top";
9843
9844 // int __gr_offs;
9845 FieldTypes[3] = Context->IntTy;
9846 FieldNames[3] = "__gr_offs";
9847
9848 // int __vr_offs;
9849 FieldTypes[4] = Context->IntTy;
9850 FieldNames[4] = "__vr_offs";
9851
9852 // Create fields
9853 for (unsigned i = 0; i < NumFields; ++i) {
9854 FieldDecl *Field = FieldDecl::Create(C: const_cast<ASTContext &>(*Context),
9855 DC: VaListTagDecl,
9856 StartLoc: SourceLocation(),
9857 IdLoc: SourceLocation(),
9858 Id: &Context->Idents.get(Name: FieldNames[i]),
9859 T: FieldTypes[i], /*TInfo=*/nullptr,
9860 /*BitWidth=*/BW: nullptr,
9861 /*Mutable=*/false,
9862 InitStyle: ICIS_NoInit);
9863 Field->setAccess(AS_public);
9864 VaListTagDecl->addDecl(D: Field);
9865 }
9866 VaListTagDecl->completeDefinition();
9867 Context->VaListTagDecl = VaListTagDecl;
9868 QualType VaListTagType = Context->getRecordType(Decl: VaListTagDecl);
9869
9870 // } __builtin_va_list;
9871 return Context->buildImplicitTypedef(T: VaListTagType, Name: "__builtin_va_list");
9872}
9873
9874static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) {
9875 // typedef struct __va_list_tag {
9876 RecordDecl *VaListTagDecl;
9877
9878 VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
9879 VaListTagDecl->startDefinition();
9880
9881 const size_t NumFields = 5;
9882 QualType FieldTypes[NumFields];
9883 const char *FieldNames[NumFields];
9884
9885 // unsigned char gpr;
9886 FieldTypes[0] = Context->UnsignedCharTy;
9887 FieldNames[0] = "gpr";
9888
9889 // unsigned char fpr;
9890 FieldTypes[1] = Context->UnsignedCharTy;
9891 FieldNames[1] = "fpr";
9892
9893 // unsigned short reserved;
9894 FieldTypes[2] = Context->UnsignedShortTy;
9895 FieldNames[2] = "reserved";
9896
9897 // void* overflow_arg_area;
9898 FieldTypes[3] = Context->getPointerType(T: Context->VoidTy);
9899 FieldNames[3] = "overflow_arg_area";
9900
9901 // void* reg_save_area;
9902 FieldTypes[4] = Context->getPointerType(T: Context->VoidTy);
9903 FieldNames[4] = "reg_save_area";
9904
9905 // Create fields
9906 for (unsigned i = 0; i < NumFields; ++i) {
9907 FieldDecl *Field = FieldDecl::Create(C: *Context, DC: VaListTagDecl,
9908 StartLoc: SourceLocation(),
9909 IdLoc: SourceLocation(),
9910 Id: &Context->Idents.get(Name: FieldNames[i]),
9911 T: FieldTypes[i], /*TInfo=*/nullptr,
9912 /*BitWidth=*/BW: nullptr,
9913 /*Mutable=*/false,
9914 InitStyle: ICIS_NoInit);
9915 Field->setAccess(AS_public);
9916 VaListTagDecl->addDecl(D: Field);
9917 }
9918 VaListTagDecl->completeDefinition();
9919 Context->VaListTagDecl = VaListTagDecl;
9920 QualType VaListTagType = Context->getRecordType(Decl: VaListTagDecl);
9921
9922 // } __va_list_tag;
9923 TypedefDecl *VaListTagTypedefDecl =
9924 Context->buildImplicitTypedef(T: VaListTagType, Name: "__va_list_tag");
9925
9926 QualType VaListTagTypedefType =
9927 Context->getTypedefType(Decl: VaListTagTypedefDecl);
9928
9929 // typedef __va_list_tag __builtin_va_list[1];
9930 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 1);
9931 QualType VaListTagArrayType = Context->getConstantArrayType(
9932 EltTy: VaListTagTypedefType, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
9933 return Context->buildImplicitTypedef(T: VaListTagArrayType, Name: "__builtin_va_list");
9934}
9935
9936static TypedefDecl *
9937CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) {
9938 // struct __va_list_tag {
9939 RecordDecl *VaListTagDecl;
9940 VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
9941 VaListTagDecl->startDefinition();
9942
9943 const size_t NumFields = 4;
9944 QualType FieldTypes[NumFields];
9945 const char *FieldNames[NumFields];
9946
9947 // unsigned gp_offset;
9948 FieldTypes[0] = Context->UnsignedIntTy;
9949 FieldNames[0] = "gp_offset";
9950
9951 // unsigned fp_offset;
9952 FieldTypes[1] = Context->UnsignedIntTy;
9953 FieldNames[1] = "fp_offset";
9954
9955 // void* overflow_arg_area;
9956 FieldTypes[2] = Context->getPointerType(T: Context->VoidTy);
9957 FieldNames[2] = "overflow_arg_area";
9958
9959 // void* reg_save_area;
9960 FieldTypes[3] = Context->getPointerType(T: Context->VoidTy);
9961 FieldNames[3] = "reg_save_area";
9962
9963 // Create fields
9964 for (unsigned i = 0; i < NumFields; ++i) {
9965 FieldDecl *Field = FieldDecl::Create(C: const_cast<ASTContext &>(*Context),
9966 DC: VaListTagDecl,
9967 StartLoc: SourceLocation(),
9968 IdLoc: SourceLocation(),
9969 Id: &Context->Idents.get(Name: FieldNames[i]),
9970 T: FieldTypes[i], /*TInfo=*/nullptr,
9971 /*BitWidth=*/BW: nullptr,
9972 /*Mutable=*/false,
9973 InitStyle: ICIS_NoInit);
9974 Field->setAccess(AS_public);
9975 VaListTagDecl->addDecl(D: Field);
9976 }
9977 VaListTagDecl->completeDefinition();
9978 Context->VaListTagDecl = VaListTagDecl;
9979 QualType VaListTagType = Context->getRecordType(Decl: VaListTagDecl);
9980
9981 // };
9982
9983 // typedef struct __va_list_tag __builtin_va_list[1];
9984 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 1);
9985 QualType VaListTagArrayType = Context->getConstantArrayType(
9986 EltTy: VaListTagType, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
9987 return Context->buildImplicitTypedef(T: VaListTagArrayType, Name: "__builtin_va_list");
9988}
9989
9990static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) {
9991 // typedef int __builtin_va_list[4];
9992 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 4);
9993 QualType IntArrayType = Context->getConstantArrayType(
9994 EltTy: Context->IntTy, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
9995 return Context->buildImplicitTypedef(T: IntArrayType, Name: "__builtin_va_list");
9996}
9997
9998static TypedefDecl *
9999CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) {
10000 // struct __va_list
10001 RecordDecl *VaListDecl = Context->buildImplicitRecord(Name: "__va_list");
10002 if (Context->getLangOpts().CPlusPlus) {
10003 // namespace std { struct __va_list {
10004 NamespaceDecl *NS;
10005 NS = NamespaceDecl::Create(C&: const_cast<ASTContext &>(*Context),
10006 DC: Context->getTranslationUnitDecl(),
10007 /*Inline=*/false, StartLoc: SourceLocation(),
10008 IdLoc: SourceLocation(), Id: &Context->Idents.get(Name: "std"),
10009 /*PrevDecl=*/nullptr, /*Nested=*/false);
10010 NS->setImplicit();
10011 VaListDecl->setDeclContext(NS);
10012 }
10013
10014 VaListDecl->startDefinition();
10015
10016 // void * __ap;
10017 FieldDecl *Field = FieldDecl::Create(C: const_cast<ASTContext &>(*Context),
10018 DC: VaListDecl,
10019 StartLoc: SourceLocation(),
10020 IdLoc: SourceLocation(),
10021 Id: &Context->Idents.get(Name: "__ap"),
10022 T: Context->getPointerType(T: Context->VoidTy),
10023 /*TInfo=*/nullptr,
10024 /*BitWidth=*/BW: nullptr,
10025 /*Mutable=*/false,
10026 InitStyle: ICIS_NoInit);
10027 Field->setAccess(AS_public);
10028 VaListDecl->addDecl(D: Field);
10029
10030 // };
10031 VaListDecl->completeDefinition();
10032 Context->VaListTagDecl = VaListDecl;
10033
10034 // typedef struct __va_list __builtin_va_list;
10035 QualType T = Context->getRecordType(Decl: VaListDecl);
10036 return Context->buildImplicitTypedef(T, Name: "__builtin_va_list");
10037}
10038
10039static TypedefDecl *
10040CreateSystemZBuiltinVaListDecl(const ASTContext *Context) {
10041 // struct __va_list_tag {
10042 RecordDecl *VaListTagDecl;
10043 VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
10044 VaListTagDecl->startDefinition();
10045
10046 const size_t NumFields = 4;
10047 QualType FieldTypes[NumFields];
10048 const char *FieldNames[NumFields];
10049
10050 // long __gpr;
10051 FieldTypes[0] = Context->LongTy;
10052 FieldNames[0] = "__gpr";
10053
10054 // long __fpr;
10055 FieldTypes[1] = Context->LongTy;
10056 FieldNames[1] = "__fpr";
10057
10058 // void *__overflow_arg_area;
10059 FieldTypes[2] = Context->getPointerType(T: Context->VoidTy);
10060 FieldNames[2] = "__overflow_arg_area";
10061
10062 // void *__reg_save_area;
10063 FieldTypes[3] = Context->getPointerType(T: Context->VoidTy);
10064 FieldNames[3] = "__reg_save_area";
10065
10066 // Create fields
10067 for (unsigned i = 0; i < NumFields; ++i) {
10068 FieldDecl *Field = FieldDecl::Create(C: const_cast<ASTContext &>(*Context),
10069 DC: VaListTagDecl,
10070 StartLoc: SourceLocation(),
10071 IdLoc: SourceLocation(),
10072 Id: &Context->Idents.get(Name: FieldNames[i]),
10073 T: FieldTypes[i], /*TInfo=*/nullptr,
10074 /*BitWidth=*/BW: nullptr,
10075 /*Mutable=*/false,
10076 InitStyle: ICIS_NoInit);
10077 Field->setAccess(AS_public);
10078 VaListTagDecl->addDecl(D: Field);
10079 }
10080 VaListTagDecl->completeDefinition();
10081 Context->VaListTagDecl = VaListTagDecl;
10082 QualType VaListTagType = Context->getRecordType(Decl: VaListTagDecl);
10083
10084 // };
10085
10086 // typedef __va_list_tag __builtin_va_list[1];
10087 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 1);
10088 QualType VaListTagArrayType = Context->getConstantArrayType(
10089 EltTy: VaListTagType, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
10090
10091 return Context->buildImplicitTypedef(T: VaListTagArrayType, Name: "__builtin_va_list");
10092}
10093
10094static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) {
10095 // typedef struct __va_list_tag {
10096 RecordDecl *VaListTagDecl;
10097 VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
10098 VaListTagDecl->startDefinition();
10099
10100 const size_t NumFields = 3;
10101 QualType FieldTypes[NumFields];
10102 const char *FieldNames[NumFields];
10103
10104 // void *CurrentSavedRegisterArea;
10105 FieldTypes[0] = Context->getPointerType(T: Context->VoidTy);
10106 FieldNames[0] = "__current_saved_reg_area_pointer";
10107
10108 // void *SavedRegAreaEnd;
10109 FieldTypes[1] = Context->getPointerType(T: Context->VoidTy);
10110 FieldNames[1] = "__saved_reg_area_end_pointer";
10111
10112 // void *OverflowArea;
10113 FieldTypes[2] = Context->getPointerType(T: Context->VoidTy);
10114 FieldNames[2] = "__overflow_area_pointer";
10115
10116 // Create fields
10117 for (unsigned i = 0; i < NumFields; ++i) {
10118 FieldDecl *Field = FieldDecl::Create(
10119 C: const_cast<ASTContext &>(*Context), DC: VaListTagDecl, StartLoc: SourceLocation(),
10120 IdLoc: SourceLocation(), Id: &Context->Idents.get(Name: FieldNames[i]), T: FieldTypes[i],
10121 /*TInfo=*/nullptr,
10122 /*BitWidth=*/BW: nullptr,
10123 /*Mutable=*/false, InitStyle: ICIS_NoInit);
10124 Field->setAccess(AS_public);
10125 VaListTagDecl->addDecl(D: Field);
10126 }
10127 VaListTagDecl->completeDefinition();
10128 Context->VaListTagDecl = VaListTagDecl;
10129 QualType VaListTagType = Context->getRecordType(Decl: VaListTagDecl);
10130
10131 // } __va_list_tag;
10132 TypedefDecl *VaListTagTypedefDecl =
10133 Context->buildImplicitTypedef(T: VaListTagType, Name: "__va_list_tag");
10134
10135 QualType VaListTagTypedefType = Context->getTypedefType(Decl: VaListTagTypedefDecl);
10136
10137 // typedef __va_list_tag __builtin_va_list[1];
10138 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 1);
10139 QualType VaListTagArrayType = Context->getConstantArrayType(
10140 EltTy: VaListTagTypedefType, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
10141
10142 return Context->buildImplicitTypedef(T: VaListTagArrayType, Name: "__builtin_va_list");
10143}
10144
10145static TypedefDecl *
10146CreateXtensaABIBuiltinVaListDecl(const ASTContext *Context) {
10147 // typedef struct __va_list_tag {
10148 RecordDecl *VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
10149
10150 VaListTagDecl->startDefinition();
10151
10152 // int* __va_stk;
10153 // int* __va_reg;
10154 // int __va_ndx;
10155 constexpr size_t NumFields = 3;
10156 QualType FieldTypes[NumFields] = {Context->getPointerType(T: Context->IntTy),
10157 Context->getPointerType(T: Context->IntTy),
10158 Context->IntTy};
10159 const char *FieldNames[NumFields] = {"__va_stk", "__va_reg", "__va_ndx"};
10160
10161 // Create fields
10162 for (unsigned i = 0; i < NumFields; ++i) {
10163 FieldDecl *Field = FieldDecl::Create(
10164 C: *Context, DC: VaListTagDecl, StartLoc: SourceLocation(), IdLoc: SourceLocation(),
10165 Id: &Context->Idents.get(Name: FieldNames[i]), T: FieldTypes[i], /*TInfo=*/nullptr,
10166 /*BitWidth=*/BW: nullptr,
10167 /*Mutable=*/false, InitStyle: ICIS_NoInit);
10168 Field->setAccess(AS_public);
10169 VaListTagDecl->addDecl(D: Field);
10170 }
10171 VaListTagDecl->completeDefinition();
10172 Context->VaListTagDecl = VaListTagDecl;
10173 QualType VaListTagType = Context->getRecordType(Decl: VaListTagDecl);
10174
10175 // } __va_list_tag;
10176 TypedefDecl *VaListTagTypedefDecl =
10177 Context->buildImplicitTypedef(T: VaListTagType, Name: "__builtin_va_list");
10178
10179 return VaListTagTypedefDecl;
10180}
10181
10182static TypedefDecl *CreateVaListDecl(const ASTContext *Context,
10183 TargetInfo::BuiltinVaListKind Kind) {
10184 switch (Kind) {
10185 case TargetInfo::CharPtrBuiltinVaList:
10186 return CreateCharPtrBuiltinVaListDecl(Context);
10187 case TargetInfo::VoidPtrBuiltinVaList:
10188 return CreateVoidPtrBuiltinVaListDecl(Context);
10189 case TargetInfo::AArch64ABIBuiltinVaList:
10190 return CreateAArch64ABIBuiltinVaListDecl(Context);
10191 case TargetInfo::PowerABIBuiltinVaList:
10192 return CreatePowerABIBuiltinVaListDecl(Context);
10193 case TargetInfo::X86_64ABIBuiltinVaList:
10194 return CreateX86_64ABIBuiltinVaListDecl(Context);
10195 case TargetInfo::PNaClABIBuiltinVaList:
10196 return CreatePNaClABIBuiltinVaListDecl(Context);
10197 case TargetInfo::AAPCSABIBuiltinVaList:
10198 return CreateAAPCSABIBuiltinVaListDecl(Context);
10199 case TargetInfo::SystemZBuiltinVaList:
10200 return CreateSystemZBuiltinVaListDecl(Context);
10201 case TargetInfo::HexagonBuiltinVaList:
10202 return CreateHexagonBuiltinVaListDecl(Context);
10203 case TargetInfo::XtensaABIBuiltinVaList:
10204 return CreateXtensaABIBuiltinVaListDecl(Context);
10205 }
10206
10207 llvm_unreachable("Unhandled __builtin_va_list type kind");
10208}
10209
10210TypedefDecl *ASTContext::getBuiltinVaListDecl() const {
10211 if (!BuiltinVaListDecl) {
10212 BuiltinVaListDecl = CreateVaListDecl(Context: this, Kind: Target->getBuiltinVaListKind());
10213 assert(BuiltinVaListDecl->isImplicit());
10214 }
10215
10216 return BuiltinVaListDecl;
10217}
10218
10219Decl *ASTContext::getVaListTagDecl() const {
10220 // Force the creation of VaListTagDecl by building the __builtin_va_list
10221 // declaration.
10222 if (!VaListTagDecl)
10223 (void)getBuiltinVaListDecl();
10224
10225 return VaListTagDecl;
10226}
10227
10228TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const {
10229 if (!BuiltinMSVaListDecl)
10230 BuiltinMSVaListDecl = CreateMSVaListDecl(Context: this);
10231
10232 return BuiltinMSVaListDecl;
10233}
10234
10235bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const {
10236 // Allow redecl custom type checking builtin for HLSL.
10237 if (LangOpts.HLSL && FD->getBuiltinID() != Builtin::NotBuiltin &&
10238 BuiltinInfo.hasCustomTypechecking(ID: FD->getBuiltinID()))
10239 return true;
10240 // Allow redecl custom type checking builtin for SPIR-V.
10241 if (getTargetInfo().getTriple().isSPIROrSPIRV() &&
10242 BuiltinInfo.isTSBuiltin(ID: FD->getBuiltinID()) &&
10243 BuiltinInfo.hasCustomTypechecking(ID: FD->getBuiltinID()))
10244 return true;
10245 return BuiltinInfo.canBeRedeclared(ID: FD->getBuiltinID());
10246}
10247
10248void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) {
10249 assert(ObjCConstantStringType.isNull() &&
10250 "'NSConstantString' type already set!");
10251
10252 ObjCConstantStringType = getObjCInterfaceType(Decl);
10253}
10254
10255/// Retrieve the template name that corresponds to a non-empty
10256/// lookup.
10257TemplateName
10258ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin,
10259 UnresolvedSetIterator End) const {
10260 unsigned size = End - Begin;
10261 assert(size > 1 && "set is not overloaded!");
10262
10263 void *memory = Allocate(Size: sizeof(OverloadedTemplateStorage) +
10264 size * sizeof(FunctionTemplateDecl*));
10265 auto *OT = new (memory) OverloadedTemplateStorage(size);
10266
10267 NamedDecl **Storage = OT->getStorage();
10268 for (UnresolvedSetIterator I = Begin; I != End; ++I) {
10269 NamedDecl *D = *I;
10270 assert(isa<FunctionTemplateDecl>(D) ||
10271 isa<UnresolvedUsingValueDecl>(D) ||
10272 (isa<UsingShadowDecl>(D) &&
10273 isa<FunctionTemplateDecl>(D->getUnderlyingDecl())));
10274 *Storage++ = D;
10275 }
10276
10277 return TemplateName(OT);
10278}
10279
10280/// Retrieve a template name representing an unqualified-id that has been
10281/// assumed to name a template for ADL purposes.
10282TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const {
10283 auto *OT = new (*this) AssumedTemplateStorage(Name);
10284 return TemplateName(OT);
10285}
10286
10287/// Retrieve the template name that represents a qualified
10288/// template name such as \c std::vector.
10289TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS,
10290 bool TemplateKeyword,
10291 TemplateName Template) const {
10292 assert(Template.getKind() == TemplateName::Template ||
10293 Template.getKind() == TemplateName::UsingTemplate);
10294
10295 // FIXME: Canonicalization?
10296 llvm::FoldingSetNodeID ID;
10297 QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, TN: Template);
10298
10299 void *InsertPos = nullptr;
10300 QualifiedTemplateName *QTN =
10301 QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
10302 if (!QTN) {
10303 QTN = new (*this, alignof(QualifiedTemplateName))
10304 QualifiedTemplateName(NNS, TemplateKeyword, Template);
10305 QualifiedTemplateNames.InsertNode(N: QTN, InsertPos);
10306 }
10307
10308 return TemplateName(QTN);
10309}
10310
10311/// Retrieve the template name that represents a dependent
10312/// template name such as \c MetaFun::template operator+.
10313TemplateName
10314ASTContext::getDependentTemplateName(const DependentTemplateStorage &S) const {
10315 llvm::FoldingSetNodeID ID;
10316 S.Profile(ID);
10317
10318 void *InsertPos = nullptr;
10319 if (DependentTemplateName *QTN =
10320 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos))
10321 return TemplateName(QTN);
10322
10323 DependentTemplateName *QTN =
10324 new (*this, alignof(DependentTemplateName)) DependentTemplateName(S);
10325 DependentTemplateNames.InsertNode(N: QTN, InsertPos);
10326 return TemplateName(QTN);
10327}
10328
10329TemplateName ASTContext::getSubstTemplateTemplateParm(TemplateName Replacement,
10330 Decl *AssociatedDecl,
10331 unsigned Index,
10332 UnsignedOrNone PackIndex,
10333 bool Final) const {
10334 llvm::FoldingSetNodeID ID;
10335 SubstTemplateTemplateParmStorage::Profile(ID, Replacement, AssociatedDecl,
10336 Index, PackIndex, Final);
10337
10338 void *insertPos = nullptr;
10339 SubstTemplateTemplateParmStorage *subst
10340 = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
10341
10342 if (!subst) {
10343 subst = new (*this) SubstTemplateTemplateParmStorage(
10344 Replacement, AssociatedDecl, Index, PackIndex, Final);
10345 SubstTemplateTemplateParms.InsertNode(N: subst, InsertPos: insertPos);
10346 }
10347
10348 return TemplateName(subst);
10349}
10350
10351TemplateName
10352ASTContext::getSubstTemplateTemplateParmPack(const TemplateArgument &ArgPack,
10353 Decl *AssociatedDecl,
10354 unsigned Index, bool Final) const {
10355 auto &Self = const_cast<ASTContext &>(*this);
10356 llvm::FoldingSetNodeID ID;
10357 SubstTemplateTemplateParmPackStorage::Profile(ID, Context&: Self, ArgPack,
10358 AssociatedDecl, Index, Final);
10359
10360 void *InsertPos = nullptr;
10361 SubstTemplateTemplateParmPackStorage *Subst
10362 = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos);
10363
10364 if (!Subst) {
10365 Subst = new (*this) SubstTemplateTemplateParmPackStorage(
10366 ArgPack.pack_elements(), AssociatedDecl, Index, Final);
10367 SubstTemplateTemplateParmPacks.InsertNode(N: Subst, InsertPos);
10368 }
10369
10370 return TemplateName(Subst);
10371}
10372
10373/// Retrieve the template name that represents a template name
10374/// deduced from a specialization.
10375TemplateName
10376ASTContext::getDeducedTemplateName(TemplateName Underlying,
10377 DefaultArguments DefaultArgs) const {
10378 if (!DefaultArgs)
10379 return Underlying;
10380
10381 llvm::FoldingSetNodeID ID;
10382 DeducedTemplateStorage::Profile(ID, Context: *this, Underlying, DefArgs: DefaultArgs);
10383
10384 void *InsertPos = nullptr;
10385 DeducedTemplateStorage *DTS =
10386 DeducedTemplates.FindNodeOrInsertPos(ID, InsertPos);
10387 if (!DTS) {
10388 void *Mem = Allocate(Size: sizeof(DeducedTemplateStorage) +
10389 sizeof(TemplateArgument) * DefaultArgs.Args.size(),
10390 Align: alignof(DeducedTemplateStorage));
10391 DTS = new (Mem) DeducedTemplateStorage(Underlying, DefaultArgs);
10392 DeducedTemplates.InsertNode(N: DTS, InsertPos);
10393 }
10394 return TemplateName(DTS);
10395}
10396
10397/// getFromTargetType - Given one of the integer types provided by
10398/// TargetInfo, produce the corresponding type. The unsigned @p Type
10399/// is actually a value of type @c TargetInfo::IntType.
10400CanQualType ASTContext::getFromTargetType(unsigned Type) const {
10401 switch (Type) {
10402 case TargetInfo::NoInt: return {};
10403 case TargetInfo::SignedChar: return SignedCharTy;
10404 case TargetInfo::UnsignedChar: return UnsignedCharTy;
10405 case TargetInfo::SignedShort: return ShortTy;
10406 case TargetInfo::UnsignedShort: return UnsignedShortTy;
10407 case TargetInfo::SignedInt: return IntTy;
10408 case TargetInfo::UnsignedInt: return UnsignedIntTy;
10409 case TargetInfo::SignedLong: return LongTy;
10410 case TargetInfo::UnsignedLong: return UnsignedLongTy;
10411 case TargetInfo::SignedLongLong: return LongLongTy;
10412 case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy;
10413 }
10414
10415 llvm_unreachable("Unhandled TargetInfo::IntType value");
10416}
10417
10418//===----------------------------------------------------------------------===//
10419// Type Predicates.
10420//===----------------------------------------------------------------------===//
10421
10422/// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's
10423/// garbage collection attribute.
10424///
10425Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const {
10426 if (getLangOpts().getGC() == LangOptions::NonGC)
10427 return Qualifiers::GCNone;
10428
10429 assert(getLangOpts().ObjC);
10430 Qualifiers::GC GCAttrs = Ty.getObjCGCAttr();
10431
10432 // Default behaviour under objective-C's gc is for ObjC pointers
10433 // (or pointers to them) be treated as though they were declared
10434 // as __strong.
10435 if (GCAttrs == Qualifiers::GCNone) {
10436 if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType())
10437 return Qualifiers::Strong;
10438 else if (Ty->isPointerType())
10439 return getObjCGCAttrKind(Ty: Ty->castAs<PointerType>()->getPointeeType());
10440 } else {
10441 // It's not valid to set GC attributes on anything that isn't a
10442 // pointer.
10443#ifndef NDEBUG
10444 QualType CT = Ty->getCanonicalTypeInternal();
10445 while (const auto *AT = dyn_cast<ArrayType>(CT))
10446 CT = AT->getElementType();
10447 assert(CT->isAnyPointerType() || CT->isBlockPointerType());
10448#endif
10449 }
10450 return GCAttrs;
10451}
10452
10453//===----------------------------------------------------------------------===//
10454// Type Compatibility Testing
10455//===----------------------------------------------------------------------===//
10456
10457/// areCompatVectorTypes - Return true if the two specified vector types are
10458/// compatible.
10459static bool areCompatVectorTypes(const VectorType *LHS,
10460 const VectorType *RHS) {
10461 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified());
10462 return LHS->getElementType() == RHS->getElementType() &&
10463 LHS->getNumElements() == RHS->getNumElements();
10464}
10465
10466/// areCompatMatrixTypes - Return true if the two specified matrix types are
10467/// compatible.
10468static bool areCompatMatrixTypes(const ConstantMatrixType *LHS,
10469 const ConstantMatrixType *RHS) {
10470 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified());
10471 return LHS->getElementType() == RHS->getElementType() &&
10472 LHS->getNumRows() == RHS->getNumRows() &&
10473 LHS->getNumColumns() == RHS->getNumColumns();
10474}
10475
10476bool ASTContext::areCompatibleVectorTypes(QualType FirstVec,
10477 QualType SecondVec) {
10478 assert(FirstVec->isVectorType() && "FirstVec should be a vector type");
10479 assert(SecondVec->isVectorType() && "SecondVec should be a vector type");
10480
10481 if (hasSameUnqualifiedType(T1: FirstVec, T2: SecondVec))
10482 return true;
10483
10484 // Treat Neon vector types and most AltiVec vector types as if they are the
10485 // equivalent GCC vector types.
10486 const auto *First = FirstVec->castAs<VectorType>();
10487 const auto *Second = SecondVec->castAs<VectorType>();
10488 if (First->getNumElements() == Second->getNumElements() &&
10489 hasSameType(T1: First->getElementType(), T2: Second->getElementType()) &&
10490 First->getVectorKind() != VectorKind::AltiVecPixel &&
10491 First->getVectorKind() != VectorKind::AltiVecBool &&
10492 Second->getVectorKind() != VectorKind::AltiVecPixel &&
10493 Second->getVectorKind() != VectorKind::AltiVecBool &&
10494 First->getVectorKind() != VectorKind::SveFixedLengthData &&
10495 First->getVectorKind() != VectorKind::SveFixedLengthPredicate &&
10496 Second->getVectorKind() != VectorKind::SveFixedLengthData &&
10497 Second->getVectorKind() != VectorKind::SveFixedLengthPredicate &&
10498 First->getVectorKind() != VectorKind::RVVFixedLengthData &&
10499 Second->getVectorKind() != VectorKind::RVVFixedLengthData &&
10500 First->getVectorKind() != VectorKind::RVVFixedLengthMask &&
10501 Second->getVectorKind() != VectorKind::RVVFixedLengthMask &&
10502 First->getVectorKind() != VectorKind::RVVFixedLengthMask_1 &&
10503 Second->getVectorKind() != VectorKind::RVVFixedLengthMask_1 &&
10504 First->getVectorKind() != VectorKind::RVVFixedLengthMask_2 &&
10505 Second->getVectorKind() != VectorKind::RVVFixedLengthMask_2 &&
10506 First->getVectorKind() != VectorKind::RVVFixedLengthMask_4 &&
10507 Second->getVectorKind() != VectorKind::RVVFixedLengthMask_4)
10508 return true;
10509
10510 return false;
10511}
10512
10513/// getRVVTypeSize - Return RVV vector register size.
10514static uint64_t getRVVTypeSize(ASTContext &Context, const BuiltinType *Ty) {
10515 assert(Ty->isRVVVLSBuiltinType() && "Invalid RVV Type");
10516 auto VScale = Context.getTargetInfo().getVScaleRange(
10517 LangOpts: Context.getLangOpts(), Mode: TargetInfo::ArmStreamingKind::NotStreaming);
10518 if (!VScale)
10519 return 0;
10520
10521 ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(Ty);
10522
10523 uint64_t EltSize = Context.getTypeSize(T: Info.ElementType);
10524 if (Info.ElementType == Context.BoolTy)
10525 EltSize = 1;
10526
10527 uint64_t MinElts = Info.EC.getKnownMinValue();
10528 return VScale->first * MinElts * EltSize;
10529}
10530
10531bool ASTContext::areCompatibleRVVTypes(QualType FirstType,
10532 QualType SecondType) {
10533 assert(
10534 ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) ||
10535 (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) &&
10536 "Expected RVV builtin type and vector type!");
10537
10538 auto IsValidCast = [this](QualType FirstType, QualType SecondType) {
10539 if (const auto *BT = FirstType->getAs<BuiltinType>()) {
10540 if (const auto *VT = SecondType->getAs<VectorType>()) {
10541 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask) {
10542 BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(Ty: BT);
10543 return FirstType->isRVVVLSBuiltinType() &&
10544 Info.ElementType == BoolTy &&
10545 getTypeSize(T: SecondType) == ((getRVVTypeSize(Context&: *this, Ty: BT)));
10546 }
10547 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask_1) {
10548 BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(Ty: BT);
10549 return FirstType->isRVVVLSBuiltinType() &&
10550 Info.ElementType == BoolTy &&
10551 getTypeSize(T: SecondType) == ((getRVVTypeSize(Context&: *this, Ty: BT) * 8));
10552 }
10553 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask_2) {
10554 BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(Ty: BT);
10555 return FirstType->isRVVVLSBuiltinType() &&
10556 Info.ElementType == BoolTy &&
10557 getTypeSize(T: SecondType) == ((getRVVTypeSize(Context&: *this, Ty: BT)) * 4);
10558 }
10559 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask_4) {
10560 BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(Ty: BT);
10561 return FirstType->isRVVVLSBuiltinType() &&
10562 Info.ElementType == BoolTy &&
10563 getTypeSize(T: SecondType) == ((getRVVTypeSize(Context&: *this, Ty: BT)) * 2);
10564 }
10565 if (VT->getVectorKind() == VectorKind::RVVFixedLengthData ||
10566 VT->getVectorKind() == VectorKind::Generic)
10567 return FirstType->isRVVVLSBuiltinType() &&
10568 getTypeSize(T: SecondType) == getRVVTypeSize(Context&: *this, Ty: BT) &&
10569 hasSameType(T1: VT->getElementType(),
10570 T2: getBuiltinVectorTypeInfo(Ty: BT).ElementType);
10571 }
10572 }
10573 return false;
10574 };
10575
10576 return IsValidCast(FirstType, SecondType) ||
10577 IsValidCast(SecondType, FirstType);
10578}
10579
10580bool ASTContext::areLaxCompatibleRVVTypes(QualType FirstType,
10581 QualType SecondType) {
10582 assert(
10583 ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) ||
10584 (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) &&
10585 "Expected RVV builtin type and vector type!");
10586
10587 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) {
10588 const auto *BT = FirstType->getAs<BuiltinType>();
10589 if (!BT)
10590 return false;
10591
10592 if (!BT->isRVVVLSBuiltinType())
10593 return false;
10594
10595 const auto *VecTy = SecondType->getAs<VectorType>();
10596 if (VecTy && VecTy->getVectorKind() == VectorKind::Generic) {
10597 const LangOptions::LaxVectorConversionKind LVCKind =
10598 getLangOpts().getLaxVectorConversions();
10599
10600 // If __riscv_v_fixed_vlen != N do not allow vector lax conversion.
10601 if (getTypeSize(T: SecondType) != getRVVTypeSize(Context&: *this, Ty: BT))
10602 return false;
10603
10604 // If -flax-vector-conversions=all is specified, the types are
10605 // certainly compatible.
10606 if (LVCKind == LangOptions::LaxVectorConversionKind::All)
10607 return true;
10608
10609 // If -flax-vector-conversions=integer is specified, the types are
10610 // compatible if the elements are integer types.
10611 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer)
10612 return VecTy->getElementType().getCanonicalType()->isIntegerType() &&
10613 FirstType->getRVVEltType(Ctx: *this)->isIntegerType();
10614 }
10615
10616 return false;
10617 };
10618
10619 return IsLaxCompatible(FirstType, SecondType) ||
10620 IsLaxCompatible(SecondType, FirstType);
10621}
10622
10623bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const {
10624 while (true) {
10625 // __strong id
10626 if (const AttributedType *Attr = dyn_cast<AttributedType>(Val&: Ty)) {
10627 if (Attr->getAttrKind() == attr::ObjCOwnership)
10628 return true;
10629
10630 Ty = Attr->getModifiedType();
10631
10632 // X *__strong (...)
10633 } else if (const ParenType *Paren = dyn_cast<ParenType>(Val&: Ty)) {
10634 Ty = Paren->getInnerType();
10635
10636 // We do not want to look through typedefs, typeof(expr),
10637 // typeof(type), or any other way that the type is somehow
10638 // abstracted.
10639 } else {
10640 return false;
10641 }
10642 }
10643}
10644
10645//===----------------------------------------------------------------------===//
10646// ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's.
10647//===----------------------------------------------------------------------===//
10648
10649/// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the
10650/// inheritance hierarchy of 'rProto'.
10651bool
10652ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto,
10653 ObjCProtocolDecl *rProto) const {
10654 if (declaresSameEntity(D1: lProto, D2: rProto))
10655 return true;
10656 for (auto *PI : rProto->protocols())
10657 if (ProtocolCompatibleWithProtocol(lProto, rProto: PI))
10658 return true;
10659 return false;
10660}
10661
10662/// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and
10663/// Class<pr1, ...>.
10664bool ASTContext::ObjCQualifiedClassTypesAreCompatible(
10665 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs) {
10666 for (auto *lhsProto : lhs->quals()) {
10667 bool match = false;
10668 for (auto *rhsProto : rhs->quals()) {
10669 if (ProtocolCompatibleWithProtocol(lProto: lhsProto, rProto: rhsProto)) {
10670 match = true;
10671 break;
10672 }
10673 }
10674 if (!match)
10675 return false;
10676 }
10677 return true;
10678}
10679
10680/// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an
10681/// ObjCQualifiedIDType.
10682bool ASTContext::ObjCQualifiedIdTypesAreCompatible(
10683 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs,
10684 bool compare) {
10685 // Allow id<P..> and an 'id' in all cases.
10686 if (lhs->isObjCIdType() || rhs->isObjCIdType())
10687 return true;
10688
10689 // Don't allow id<P..> to convert to Class or Class<P..> in either direction.
10690 if (lhs->isObjCClassType() || lhs->isObjCQualifiedClassType() ||
10691 rhs->isObjCClassType() || rhs->isObjCQualifiedClassType())
10692 return false;
10693
10694 if (lhs->isObjCQualifiedIdType()) {
10695 if (rhs->qual_empty()) {
10696 // If the RHS is a unqualified interface pointer "NSString*",
10697 // make sure we check the class hierarchy.
10698 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) {
10699 for (auto *I : lhs->quals()) {
10700 // when comparing an id<P> on lhs with a static type on rhs,
10701 // see if static class implements all of id's protocols, directly or
10702 // through its super class and categories.
10703 if (!rhsID->ClassImplementsProtocol(lProto: I, lookupCategory: true))
10704 return false;
10705 }
10706 }
10707 // If there are no qualifiers and no interface, we have an 'id'.
10708 return true;
10709 }
10710 // Both the right and left sides have qualifiers.
10711 for (auto *lhsProto : lhs->quals()) {
10712 bool match = false;
10713
10714 // when comparing an id<P> on lhs with a static type on rhs,
10715 // see if static class implements all of id's protocols, directly or
10716 // through its super class and categories.
10717 for (auto *rhsProto : rhs->quals()) {
10718 if (ProtocolCompatibleWithProtocol(lProto: lhsProto, rProto: rhsProto) ||
10719 (compare && ProtocolCompatibleWithProtocol(lProto: rhsProto, rProto: lhsProto))) {
10720 match = true;
10721 break;
10722 }
10723 }
10724 // If the RHS is a qualified interface pointer "NSString<P>*",
10725 // make sure we check the class hierarchy.
10726 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) {
10727 for (auto *I : lhs->quals()) {
10728 // when comparing an id<P> on lhs with a static type on rhs,
10729 // see if static class implements all of id's protocols, directly or
10730 // through its super class and categories.
10731 if (rhsID->ClassImplementsProtocol(lProto: I, lookupCategory: true)) {
10732 match = true;
10733 break;
10734 }
10735 }
10736 }
10737 if (!match)
10738 return false;
10739 }
10740
10741 return true;
10742 }
10743
10744 assert(rhs->isObjCQualifiedIdType() && "One of the LHS/RHS should be id<x>");
10745
10746 if (lhs->getInterfaceType()) {
10747 // If both the right and left sides have qualifiers.
10748 for (auto *lhsProto : lhs->quals()) {
10749 bool match = false;
10750
10751 // when comparing an id<P> on rhs with a static type on lhs,
10752 // see if static class implements all of id's protocols, directly or
10753 // through its super class and categories.
10754 // First, lhs protocols in the qualifier list must be found, direct
10755 // or indirect in rhs's qualifier list or it is a mismatch.
10756 for (auto *rhsProto : rhs->quals()) {
10757 if (ProtocolCompatibleWithProtocol(lProto: lhsProto, rProto: rhsProto) ||
10758 (compare && ProtocolCompatibleWithProtocol(lProto: rhsProto, rProto: lhsProto))) {
10759 match = true;
10760 break;
10761 }
10762 }
10763 if (!match)
10764 return false;
10765 }
10766
10767 // Static class's protocols, or its super class or category protocols
10768 // must be found, direct or indirect in rhs's qualifier list or it is a mismatch.
10769 if (ObjCInterfaceDecl *lhsID = lhs->getInterfaceDecl()) {
10770 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols;
10771 CollectInheritedProtocols(CDecl: lhsID, Protocols&: LHSInheritedProtocols);
10772 // This is rather dubious but matches gcc's behavior. If lhs has
10773 // no type qualifier and its class has no static protocol(s)
10774 // assume that it is mismatch.
10775 if (LHSInheritedProtocols.empty() && lhs->qual_empty())
10776 return false;
10777 for (auto *lhsProto : LHSInheritedProtocols) {
10778 bool match = false;
10779 for (auto *rhsProto : rhs->quals()) {
10780 if (ProtocolCompatibleWithProtocol(lProto: lhsProto, rProto: rhsProto) ||
10781 (compare && ProtocolCompatibleWithProtocol(lProto: rhsProto, rProto: lhsProto))) {
10782 match = true;
10783 break;
10784 }
10785 }
10786 if (!match)
10787 return false;
10788 }
10789 }
10790 return true;
10791 }
10792 return false;
10793}
10794
10795/// canAssignObjCInterfaces - Return true if the two interface types are
10796/// compatible for assignment from RHS to LHS. This handles validation of any
10797/// protocol qualifiers on the LHS or RHS.
10798bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT,
10799 const ObjCObjectPointerType *RHSOPT) {
10800 const ObjCObjectType* LHS = LHSOPT->getObjectType();
10801 const ObjCObjectType* RHS = RHSOPT->getObjectType();
10802
10803 // If either type represents the built-in 'id' type, return true.
10804 if (LHS->isObjCUnqualifiedId() || RHS->isObjCUnqualifiedId())
10805 return true;
10806
10807 // Function object that propagates a successful result or handles
10808 // __kindof types.
10809 auto finish = [&](bool succeeded) -> bool {
10810 if (succeeded)
10811 return true;
10812
10813 if (!RHS->isKindOfType())
10814 return false;
10815
10816 // Strip off __kindof and protocol qualifiers, then check whether
10817 // we can assign the other way.
10818 return canAssignObjCInterfaces(LHSOPT: RHSOPT->stripObjCKindOfTypeAndQuals(ctx: *this),
10819 RHSOPT: LHSOPT->stripObjCKindOfTypeAndQuals(ctx: *this));
10820 };
10821
10822 // Casts from or to id<P> are allowed when the other side has compatible
10823 // protocols.
10824 if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) {
10825 return finish(ObjCQualifiedIdTypesAreCompatible(lhs: LHSOPT, rhs: RHSOPT, compare: false));
10826 }
10827
10828 // Verify protocol compatibility for casts from Class<P1> to Class<P2>.
10829 if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) {
10830 return finish(ObjCQualifiedClassTypesAreCompatible(lhs: LHSOPT, rhs: RHSOPT));
10831 }
10832
10833 // Casts from Class to Class<Foo>, or vice-versa, are allowed.
10834 if (LHS->isObjCClass() && RHS->isObjCClass()) {
10835 return true;
10836 }
10837
10838 // If we have 2 user-defined types, fall into that path.
10839 if (LHS->getInterface() && RHS->getInterface()) {
10840 return finish(canAssignObjCInterfaces(LHS, RHS));
10841 }
10842
10843 return false;
10844}
10845
10846/// canAssignObjCInterfacesInBlockPointer - This routine is specifically written
10847/// for providing type-safety for objective-c pointers used to pass/return
10848/// arguments in block literals. When passed as arguments, passing 'A*' where
10849/// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is
10850/// not OK. For the return type, the opposite is not OK.
10851bool ASTContext::canAssignObjCInterfacesInBlockPointer(
10852 const ObjCObjectPointerType *LHSOPT,
10853 const ObjCObjectPointerType *RHSOPT,
10854 bool BlockReturnType) {
10855
10856 // Function object that propagates a successful result or handles
10857 // __kindof types.
10858 auto finish = [&](bool succeeded) -> bool {
10859 if (succeeded)
10860 return true;
10861
10862 const ObjCObjectPointerType *Expected = BlockReturnType ? RHSOPT : LHSOPT;
10863 if (!Expected->isKindOfType())
10864 return false;
10865
10866 // Strip off __kindof and protocol qualifiers, then check whether
10867 // we can assign the other way.
10868 return canAssignObjCInterfacesInBlockPointer(
10869 LHSOPT: RHSOPT->stripObjCKindOfTypeAndQuals(ctx: *this),
10870 RHSOPT: LHSOPT->stripObjCKindOfTypeAndQuals(ctx: *this),
10871 BlockReturnType);
10872 };
10873
10874 if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType())
10875 return true;
10876
10877 if (LHSOPT->isObjCBuiltinType()) {
10878 return finish(RHSOPT->isObjCBuiltinType() ||
10879 RHSOPT->isObjCQualifiedIdType());
10880 }
10881
10882 if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) {
10883 if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking)
10884 // Use for block parameters previous type checking for compatibility.
10885 return finish(ObjCQualifiedIdTypesAreCompatible(lhs: LHSOPT, rhs: RHSOPT, compare: false) ||
10886 // Or corrected type checking as in non-compat mode.
10887 (!BlockReturnType &&
10888 ObjCQualifiedIdTypesAreCompatible(lhs: RHSOPT, rhs: LHSOPT, compare: false)));
10889 else
10890 return finish(ObjCQualifiedIdTypesAreCompatible(
10891 lhs: (BlockReturnType ? LHSOPT : RHSOPT),
10892 rhs: (BlockReturnType ? RHSOPT : LHSOPT), compare: false));
10893 }
10894
10895 const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType();
10896 const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType();
10897 if (LHS && RHS) { // We have 2 user-defined types.
10898 if (LHS != RHS) {
10899 if (LHS->getDecl()->isSuperClassOf(I: RHS->getDecl()))
10900 return finish(BlockReturnType);
10901 if (RHS->getDecl()->isSuperClassOf(I: LHS->getDecl()))
10902 return finish(!BlockReturnType);
10903 }
10904 else
10905 return true;
10906 }
10907 return false;
10908}
10909
10910/// Comparison routine for Objective-C protocols to be used with
10911/// llvm::array_pod_sort.
10912static int compareObjCProtocolsByName(ObjCProtocolDecl * const *lhs,
10913 ObjCProtocolDecl * const *rhs) {
10914 return (*lhs)->getName().compare(RHS: (*rhs)->getName());
10915}
10916
10917/// getIntersectionOfProtocols - This routine finds the intersection of set
10918/// of protocols inherited from two distinct objective-c pointer objects with
10919/// the given common base.
10920/// It is used to build composite qualifier list of the composite type of
10921/// the conditional expression involving two objective-c pointer objects.
10922static
10923void getIntersectionOfProtocols(ASTContext &Context,
10924 const ObjCInterfaceDecl *CommonBase,
10925 const ObjCObjectPointerType *LHSOPT,
10926 const ObjCObjectPointerType *RHSOPT,
10927 SmallVectorImpl<ObjCProtocolDecl *> &IntersectionSet) {
10928
10929 const ObjCObjectType* LHS = LHSOPT->getObjectType();
10930 const ObjCObjectType* RHS = RHSOPT->getObjectType();
10931 assert(LHS->getInterface() && "LHS must have an interface base");
10932 assert(RHS->getInterface() && "RHS must have an interface base");
10933
10934 // Add all of the protocols for the LHS.
10935 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSProtocolSet;
10936
10937 // Start with the protocol qualifiers.
10938 for (auto *proto : LHS->quals()) {
10939 Context.CollectInheritedProtocols(CDecl: proto, Protocols&: LHSProtocolSet);
10940 }
10941
10942 // Also add the protocols associated with the LHS interface.
10943 Context.CollectInheritedProtocols(CDecl: LHS->getInterface(), Protocols&: LHSProtocolSet);
10944
10945 // Add all of the protocols for the RHS.
10946 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet;
10947
10948 // Start with the protocol qualifiers.
10949 for (auto *proto : RHS->quals()) {
10950 Context.CollectInheritedProtocols(CDecl: proto, Protocols&: RHSProtocolSet);
10951 }
10952
10953 // Also add the protocols associated with the RHS interface.
10954 Context.CollectInheritedProtocols(CDecl: RHS->getInterface(), Protocols&: RHSProtocolSet);
10955
10956 // Compute the intersection of the collected protocol sets.
10957 for (auto *proto : LHSProtocolSet) {
10958 if (RHSProtocolSet.count(Ptr: proto))
10959 IntersectionSet.push_back(Elt: proto);
10960 }
10961
10962 // Compute the set of protocols that is implied by either the common type or
10963 // the protocols within the intersection.
10964 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> ImpliedProtocols;
10965 Context.CollectInheritedProtocols(CDecl: CommonBase, Protocols&: ImpliedProtocols);
10966
10967 // Remove any implied protocols from the list of inherited protocols.
10968 if (!ImpliedProtocols.empty()) {
10969 llvm::erase_if(C&: IntersectionSet, P: [&](ObjCProtocolDecl *proto) -> bool {
10970 return ImpliedProtocols.contains(Ptr: proto);
10971 });
10972 }
10973
10974 // Sort the remaining protocols by name.
10975 llvm::array_pod_sort(Start: IntersectionSet.begin(), End: IntersectionSet.end(),
10976 Compare: compareObjCProtocolsByName);
10977}
10978
10979/// Determine whether the first type is a subtype of the second.
10980static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs,
10981 QualType rhs) {
10982 // Common case: two object pointers.
10983 const auto *lhsOPT = lhs->getAs<ObjCObjectPointerType>();
10984 const auto *rhsOPT = rhs->getAs<ObjCObjectPointerType>();
10985 if (lhsOPT && rhsOPT)
10986 return ctx.canAssignObjCInterfaces(LHSOPT: lhsOPT, RHSOPT: rhsOPT);
10987
10988 // Two block pointers.
10989 const auto *lhsBlock = lhs->getAs<BlockPointerType>();
10990 const auto *rhsBlock = rhs->getAs<BlockPointerType>();
10991 if (lhsBlock && rhsBlock)
10992 return ctx.typesAreBlockPointerCompatible(lhs, rhs);
10993
10994 // If either is an unqualified 'id' and the other is a block, it's
10995 // acceptable.
10996 if ((lhsOPT && lhsOPT->isObjCIdType() && rhsBlock) ||
10997 (rhsOPT && rhsOPT->isObjCIdType() && lhsBlock))
10998 return true;
10999
11000 return false;
11001}
11002
11003// Check that the given Objective-C type argument lists are equivalent.
11004static bool sameObjCTypeArgs(ASTContext &ctx,
11005 const ObjCInterfaceDecl *iface,
11006 ArrayRef<QualType> lhsArgs,
11007 ArrayRef<QualType> rhsArgs,
11008 bool stripKindOf) {
11009 if (lhsArgs.size() != rhsArgs.size())
11010 return false;
11011
11012 ObjCTypeParamList *typeParams = iface->getTypeParamList();
11013 if (!typeParams)
11014 return false;
11015
11016 for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) {
11017 if (ctx.hasSameType(T1: lhsArgs[i], T2: rhsArgs[i]))
11018 continue;
11019
11020 switch (typeParams->begin()[i]->getVariance()) {
11021 case ObjCTypeParamVariance::Invariant:
11022 if (!stripKindOf ||
11023 !ctx.hasSameType(T1: lhsArgs[i].stripObjCKindOfType(ctx),
11024 T2: rhsArgs[i].stripObjCKindOfType(ctx))) {
11025 return false;
11026 }
11027 break;
11028
11029 case ObjCTypeParamVariance::Covariant:
11030 if (!canAssignObjCObjectTypes(ctx, lhs: lhsArgs[i], rhs: rhsArgs[i]))
11031 return false;
11032 break;
11033
11034 case ObjCTypeParamVariance::Contravariant:
11035 if (!canAssignObjCObjectTypes(ctx, lhs: rhsArgs[i], rhs: lhsArgs[i]))
11036 return false;
11037 break;
11038 }
11039 }
11040
11041 return true;
11042}
11043
11044QualType ASTContext::areCommonBaseCompatible(
11045 const ObjCObjectPointerType *Lptr,
11046 const ObjCObjectPointerType *Rptr) {
11047 const ObjCObjectType *LHS = Lptr->getObjectType();
11048 const ObjCObjectType *RHS = Rptr->getObjectType();
11049 const ObjCInterfaceDecl* LDecl = LHS->getInterface();
11050 const ObjCInterfaceDecl* RDecl = RHS->getInterface();
11051
11052 if (!LDecl || !RDecl)
11053 return {};
11054
11055 // When either LHS or RHS is a kindof type, we should return a kindof type.
11056 // For example, for common base of kindof(ASub1) and kindof(ASub2), we return
11057 // kindof(A).
11058 bool anyKindOf = LHS->isKindOfType() || RHS->isKindOfType();
11059
11060 // Follow the left-hand side up the class hierarchy until we either hit a
11061 // root or find the RHS. Record the ancestors in case we don't find it.
11062 llvm::SmallDenseMap<const ObjCInterfaceDecl *, const ObjCObjectType *, 4>
11063 LHSAncestors;
11064 while (true) {
11065 // Record this ancestor. We'll need this if the common type isn't in the
11066 // path from the LHS to the root.
11067 LHSAncestors[LHS->getInterface()->getCanonicalDecl()] = LHS;
11068
11069 if (declaresSameEntity(D1: LHS->getInterface(), D2: RDecl)) {
11070 // Get the type arguments.
11071 ArrayRef<QualType> LHSTypeArgs = LHS->getTypeArgsAsWritten();
11072 bool anyChanges = false;
11073 if (LHS->isSpecialized() && RHS->isSpecialized()) {
11074 // Both have type arguments, compare them.
11075 if (!sameObjCTypeArgs(ctx&: *this, iface: LHS->getInterface(),
11076 lhsArgs: LHS->getTypeArgs(), rhsArgs: RHS->getTypeArgs(),
11077 /*stripKindOf=*/true))
11078 return {};
11079 } else if (LHS->isSpecialized() != RHS->isSpecialized()) {
11080 // If only one has type arguments, the result will not have type
11081 // arguments.
11082 LHSTypeArgs = {};
11083 anyChanges = true;
11084 }
11085
11086 // Compute the intersection of protocols.
11087 SmallVector<ObjCProtocolDecl *, 8> Protocols;
11088 getIntersectionOfProtocols(Context&: *this, CommonBase: LHS->getInterface(), LHSOPT: Lptr, RHSOPT: Rptr,
11089 IntersectionSet&: Protocols);
11090 if (!Protocols.empty())
11091 anyChanges = true;
11092
11093 // If anything in the LHS will have changed, build a new result type.
11094 // If we need to return a kindof type but LHS is not a kindof type, we
11095 // build a new result type.
11096 if (anyChanges || LHS->isKindOfType() != anyKindOf) {
11097 QualType Result = getObjCInterfaceType(Decl: LHS->getInterface());
11098 Result = getObjCObjectType(baseType: Result, typeArgs: LHSTypeArgs, protocols: Protocols,
11099 isKindOf: anyKindOf || LHS->isKindOfType());
11100 return getObjCObjectPointerType(ObjectT: Result);
11101 }
11102
11103 return getObjCObjectPointerType(ObjectT: QualType(LHS, 0));
11104 }
11105
11106 // Find the superclass.
11107 QualType LHSSuperType = LHS->getSuperClassType();
11108 if (LHSSuperType.isNull())
11109 break;
11110
11111 LHS = LHSSuperType->castAs<ObjCObjectType>();
11112 }
11113
11114 // We didn't find anything by following the LHS to its root; now check
11115 // the RHS against the cached set of ancestors.
11116 while (true) {
11117 auto KnownLHS = LHSAncestors.find(Val: RHS->getInterface()->getCanonicalDecl());
11118 if (KnownLHS != LHSAncestors.end()) {
11119 LHS = KnownLHS->second;
11120
11121 // Get the type arguments.
11122 ArrayRef<QualType> RHSTypeArgs = RHS->getTypeArgsAsWritten();
11123 bool anyChanges = false;
11124 if (LHS->isSpecialized() && RHS->isSpecialized()) {
11125 // Both have type arguments, compare them.
11126 if (!sameObjCTypeArgs(ctx&: *this, iface: LHS->getInterface(),
11127 lhsArgs: LHS->getTypeArgs(), rhsArgs: RHS->getTypeArgs(),
11128 /*stripKindOf=*/true))
11129 return {};
11130 } else if (LHS->isSpecialized() != RHS->isSpecialized()) {
11131 // If only one has type arguments, the result will not have type
11132 // arguments.
11133 RHSTypeArgs = {};
11134 anyChanges = true;
11135 }
11136
11137 // Compute the intersection of protocols.
11138 SmallVector<ObjCProtocolDecl *, 8> Protocols;
11139 getIntersectionOfProtocols(Context&: *this, CommonBase: RHS->getInterface(), LHSOPT: Lptr, RHSOPT: Rptr,
11140 IntersectionSet&: Protocols);
11141 if (!Protocols.empty())
11142 anyChanges = true;
11143
11144 // If we need to return a kindof type but RHS is not a kindof type, we
11145 // build a new result type.
11146 if (anyChanges || RHS->isKindOfType() != anyKindOf) {
11147 QualType Result = getObjCInterfaceType(Decl: RHS->getInterface());
11148 Result = getObjCObjectType(baseType: Result, typeArgs: RHSTypeArgs, protocols: Protocols,
11149 isKindOf: anyKindOf || RHS->isKindOfType());
11150 return getObjCObjectPointerType(ObjectT: Result);
11151 }
11152
11153 return getObjCObjectPointerType(ObjectT: QualType(RHS, 0));
11154 }
11155
11156 // Find the superclass of the RHS.
11157 QualType RHSSuperType = RHS->getSuperClassType();
11158 if (RHSSuperType.isNull())
11159 break;
11160
11161 RHS = RHSSuperType->castAs<ObjCObjectType>();
11162 }
11163
11164 return {};
11165}
11166
11167bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS,
11168 const ObjCObjectType *RHS) {
11169 assert(LHS->getInterface() && "LHS is not an interface type");
11170 assert(RHS->getInterface() && "RHS is not an interface type");
11171
11172 // Verify that the base decls are compatible: the RHS must be a subclass of
11173 // the LHS.
11174 ObjCInterfaceDecl *LHSInterface = LHS->getInterface();
11175 bool IsSuperClass = LHSInterface->isSuperClassOf(I: RHS->getInterface());
11176 if (!IsSuperClass)
11177 return false;
11178
11179 // If the LHS has protocol qualifiers, determine whether all of them are
11180 // satisfied by the RHS (i.e., the RHS has a superset of the protocols in the
11181 // LHS).
11182 if (LHS->getNumProtocols() > 0) {
11183 // OK if conversion of LHS to SuperClass results in narrowing of types
11184 // ; i.e., SuperClass may implement at least one of the protocols
11185 // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok.
11186 // But not SuperObj<P1,P2,P3> = lhs<P1,P2>.
11187 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols;
11188 CollectInheritedProtocols(CDecl: RHS->getInterface(), Protocols&: SuperClassInheritedProtocols);
11189 // Also, if RHS has explicit quelifiers, include them for comparing with LHS's
11190 // qualifiers.
11191 for (auto *RHSPI : RHS->quals())
11192 CollectInheritedProtocols(CDecl: RHSPI, Protocols&: SuperClassInheritedProtocols);
11193 // If there is no protocols associated with RHS, it is not a match.
11194 if (SuperClassInheritedProtocols.empty())
11195 return false;
11196
11197 for (const auto *LHSProto : LHS->quals()) {
11198 bool SuperImplementsProtocol = false;
11199 for (auto *SuperClassProto : SuperClassInheritedProtocols)
11200 if (SuperClassProto->lookupProtocolNamed(PName: LHSProto->getIdentifier())) {
11201 SuperImplementsProtocol = true;
11202 break;
11203 }
11204 if (!SuperImplementsProtocol)
11205 return false;
11206 }
11207 }
11208
11209 // If the LHS is specialized, we may need to check type arguments.
11210 if (LHS->isSpecialized()) {
11211 // Follow the superclass chain until we've matched the LHS class in the
11212 // hierarchy. This substitutes type arguments through.
11213 const ObjCObjectType *RHSSuper = RHS;
11214 while (!declaresSameEntity(D1: RHSSuper->getInterface(), D2: LHSInterface))
11215 RHSSuper = RHSSuper->getSuperClassType()->castAs<ObjCObjectType>();
11216
11217 // If the RHS is specializd, compare type arguments.
11218 if (RHSSuper->isSpecialized() &&
11219 !sameObjCTypeArgs(ctx&: *this, iface: LHS->getInterface(),
11220 lhsArgs: LHS->getTypeArgs(), rhsArgs: RHSSuper->getTypeArgs(),
11221 /*stripKindOf=*/true)) {
11222 return false;
11223 }
11224 }
11225
11226 return true;
11227}
11228
11229bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) {
11230 // get the "pointed to" types
11231 const auto *LHSOPT = LHS->getAs<ObjCObjectPointerType>();
11232 const auto *RHSOPT = RHS->getAs<ObjCObjectPointerType>();
11233
11234 if (!LHSOPT || !RHSOPT)
11235 return false;
11236
11237 return canAssignObjCInterfaces(LHSOPT, RHSOPT) ||
11238 canAssignObjCInterfaces(LHSOPT: RHSOPT, RHSOPT: LHSOPT);
11239}
11240
11241bool ASTContext::canBindObjCObjectType(QualType To, QualType From) {
11242 return canAssignObjCInterfaces(
11243 LHSOPT: getObjCObjectPointerType(ObjectT: To)->castAs<ObjCObjectPointerType>(),
11244 RHSOPT: getObjCObjectPointerType(ObjectT: From)->castAs<ObjCObjectPointerType>());
11245}
11246
11247/// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible,
11248/// both shall have the identically qualified version of a compatible type.
11249/// C99 6.2.7p1: Two types have compatible types if their types are the
11250/// same. See 6.7.[2,3,5] for additional rules.
11251bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS,
11252 bool CompareUnqualified) {
11253 if (getLangOpts().CPlusPlus)
11254 return hasSameType(T1: LHS, T2: RHS);
11255
11256 return !mergeTypes(LHS, RHS, OfBlockPointer: false, Unqualified: CompareUnqualified).isNull();
11257}
11258
11259bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) {
11260 return typesAreCompatible(LHS, RHS);
11261}
11262
11263bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) {
11264 return !mergeTypes(LHS, RHS, OfBlockPointer: true).isNull();
11265}
11266
11267/// mergeTransparentUnionType - if T is a transparent union type and a member
11268/// of T is compatible with SubType, return the merged type, else return
11269/// QualType()
11270QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType,
11271 bool OfBlockPointer,
11272 bool Unqualified) {
11273 if (const RecordType *UT = T->getAsUnionType()) {
11274 RecordDecl *UD = UT->getDecl();
11275 if (UD->hasAttr<TransparentUnionAttr>()) {
11276 for (const auto *I : UD->fields()) {
11277 QualType ET = I->getType().getUnqualifiedType();
11278 QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified);
11279 if (!MT.isNull())
11280 return MT;
11281 }
11282 }
11283 }
11284
11285 return {};
11286}
11287
11288/// mergeFunctionParameterTypes - merge two types which appear as function
11289/// parameter types
11290QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs,
11291 bool OfBlockPointer,
11292 bool Unqualified) {
11293 // GNU extension: two types are compatible if they appear as a function
11294 // argument, one of the types is a transparent union type and the other
11295 // type is compatible with a union member
11296 QualType lmerge = mergeTransparentUnionType(T: lhs, SubType: rhs, OfBlockPointer,
11297 Unqualified);
11298 if (!lmerge.isNull())
11299 return lmerge;
11300
11301 QualType rmerge = mergeTransparentUnionType(T: rhs, SubType: lhs, OfBlockPointer,
11302 Unqualified);
11303 if (!rmerge.isNull())
11304 return rmerge;
11305
11306 return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified);
11307}
11308
11309QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
11310 bool OfBlockPointer, bool Unqualified,
11311 bool AllowCXX,
11312 bool IsConditionalOperator) {
11313 const auto *lbase = lhs->castAs<FunctionType>();
11314 const auto *rbase = rhs->castAs<FunctionType>();
11315 const auto *lproto = dyn_cast<FunctionProtoType>(Val: lbase);
11316 const auto *rproto = dyn_cast<FunctionProtoType>(Val: rbase);
11317 bool allLTypes = true;
11318 bool allRTypes = true;
11319
11320 // Check return type
11321 QualType retType;
11322 if (OfBlockPointer) {
11323 QualType RHS = rbase->getReturnType();
11324 QualType LHS = lbase->getReturnType();
11325 bool UnqualifiedResult = Unqualified;
11326 if (!UnqualifiedResult)
11327 UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers());
11328 retType = mergeTypes(LHS, RHS, OfBlockPointer: true, Unqualified: UnqualifiedResult, BlockReturnType: true);
11329 }
11330 else
11331 retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), OfBlockPointer: false,
11332 Unqualified);
11333 if (retType.isNull())
11334 return {};
11335
11336 if (Unqualified)
11337 retType = retType.getUnqualifiedType();
11338
11339 CanQualType LRetType = getCanonicalType(T: lbase->getReturnType());
11340 CanQualType RRetType = getCanonicalType(T: rbase->getReturnType());
11341 if (Unqualified) {
11342 LRetType = LRetType.getUnqualifiedType();
11343 RRetType = RRetType.getUnqualifiedType();
11344 }
11345
11346 if (getCanonicalType(T: retType) != LRetType)
11347 allLTypes = false;
11348 if (getCanonicalType(T: retType) != RRetType)
11349 allRTypes = false;
11350
11351 // FIXME: double check this
11352 // FIXME: should we error if lbase->getRegParmAttr() != 0 &&
11353 // rbase->getRegParmAttr() != 0 &&
11354 // lbase->getRegParmAttr() != rbase->getRegParmAttr()?
11355 FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo();
11356 FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo();
11357
11358 // Compatible functions must have compatible calling conventions
11359 if (lbaseInfo.getCC() != rbaseInfo.getCC())
11360 return {};
11361
11362 // Regparm is part of the calling convention.
11363 if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm())
11364 return {};
11365 if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm())
11366 return {};
11367
11368 if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult())
11369 return {};
11370 if (lbaseInfo.getNoCallerSavedRegs() != rbaseInfo.getNoCallerSavedRegs())
11371 return {};
11372 if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck())
11373 return {};
11374
11375 // When merging declarations, it's common for supplemental information like
11376 // attributes to only be present in one of the declarations, and we generally
11377 // want type merging to preserve the union of information. So a merged
11378 // function type should be noreturn if it was noreturn in *either* operand
11379 // type.
11380 //
11381 // But for the conditional operator, this is backwards. The result of the
11382 // operator could be either operand, and its type should conservatively
11383 // reflect that. So a function type in a composite type is noreturn only
11384 // if it's noreturn in *both* operand types.
11385 //
11386 // Arguably, noreturn is a kind of subtype, and the conditional operator
11387 // ought to produce the most specific common supertype of its operand types.
11388 // That would differ from this rule in contravariant positions. However,
11389 // neither C nor C++ generally uses this kind of subtype reasoning. Also,
11390 // as a practical matter, it would only affect C code that does abstraction of
11391 // higher-order functions (taking noreturn callbacks!), which is uncommon to
11392 // say the least. So we use the simpler rule.
11393 bool NoReturn = IsConditionalOperator
11394 ? lbaseInfo.getNoReturn() && rbaseInfo.getNoReturn()
11395 : lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn();
11396 if (lbaseInfo.getNoReturn() != NoReturn)
11397 allLTypes = false;
11398 if (rbaseInfo.getNoReturn() != NoReturn)
11399 allRTypes = false;
11400
11401 FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(noReturn: NoReturn);
11402
11403 std::optional<FunctionEffectSet> MergedFX;
11404
11405 if (lproto && rproto) { // two C99 style function prototypes
11406 assert((AllowCXX ||
11407 (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) &&
11408 "C++ shouldn't be here");
11409 // Compatible functions must have the same number of parameters
11410 if (lproto->getNumParams() != rproto->getNumParams())
11411 return {};
11412
11413 // Variadic and non-variadic functions aren't compatible
11414 if (lproto->isVariadic() != rproto->isVariadic())
11415 return {};
11416
11417 if (lproto->getMethodQuals() != rproto->getMethodQuals())
11418 return {};
11419
11420 // Function effects are handled similarly to noreturn, see above.
11421 FunctionEffectsRef LHSFX = lproto->getFunctionEffects();
11422 FunctionEffectsRef RHSFX = rproto->getFunctionEffects();
11423 if (LHSFX != RHSFX) {
11424 if (IsConditionalOperator)
11425 MergedFX = FunctionEffectSet::getIntersection(LHS: LHSFX, RHS: RHSFX);
11426 else {
11427 FunctionEffectSet::Conflicts Errs;
11428 MergedFX = FunctionEffectSet::getUnion(LHS: LHSFX, RHS: RHSFX, Errs);
11429 // Here we're discarding a possible error due to conflicts in the effect
11430 // sets. But we're not in a context where we can report it. The
11431 // operation does however guarantee maintenance of invariants.
11432 }
11433 if (*MergedFX != LHSFX)
11434 allLTypes = false;
11435 if (*MergedFX != RHSFX)
11436 allRTypes = false;
11437 }
11438
11439 SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos;
11440 bool canUseLeft, canUseRight;
11441 if (!mergeExtParameterInfo(FirstFnType: lproto, SecondFnType: rproto, CanUseFirst&: canUseLeft, CanUseSecond&: canUseRight,
11442 NewParamInfos&: newParamInfos))
11443 return {};
11444
11445 if (!canUseLeft)
11446 allLTypes = false;
11447 if (!canUseRight)
11448 allRTypes = false;
11449
11450 // Check parameter type compatibility
11451 SmallVector<QualType, 10> types;
11452 for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) {
11453 QualType lParamType = lproto->getParamType(i).getUnqualifiedType();
11454 QualType rParamType = rproto->getParamType(i).getUnqualifiedType();
11455 QualType paramType = mergeFunctionParameterTypes(
11456 lhs: lParamType, rhs: rParamType, OfBlockPointer, Unqualified);
11457 if (paramType.isNull())
11458 return {};
11459
11460 if (Unqualified)
11461 paramType = paramType.getUnqualifiedType();
11462
11463 types.push_back(Elt: paramType);
11464 if (Unqualified) {
11465 lParamType = lParamType.getUnqualifiedType();
11466 rParamType = rParamType.getUnqualifiedType();
11467 }
11468
11469 if (getCanonicalType(T: paramType) != getCanonicalType(T: lParamType))
11470 allLTypes = false;
11471 if (getCanonicalType(T: paramType) != getCanonicalType(T: rParamType))
11472 allRTypes = false;
11473 }
11474
11475 if (allLTypes) return lhs;
11476 if (allRTypes) return rhs;
11477
11478 FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo();
11479 EPI.ExtInfo = einfo;
11480 EPI.ExtParameterInfos =
11481 newParamInfos.empty() ? nullptr : newParamInfos.data();
11482 if (MergedFX)
11483 EPI.FunctionEffects = *MergedFX;
11484 return getFunctionType(ResultTy: retType, Args: types, EPI);
11485 }
11486
11487 if (lproto) allRTypes = false;
11488 if (rproto) allLTypes = false;
11489
11490 const FunctionProtoType *proto = lproto ? lproto : rproto;
11491 if (proto) {
11492 assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here");
11493 if (proto->isVariadic())
11494 return {};
11495 // Check that the types are compatible with the types that
11496 // would result from default argument promotions (C99 6.7.5.3p15).
11497 // The only types actually affected are promotable integer
11498 // types and floats, which would be passed as a different
11499 // type depending on whether the prototype is visible.
11500 for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) {
11501 QualType paramTy = proto->getParamType(i);
11502
11503 // Look at the converted type of enum types, since that is the type used
11504 // to pass enum values.
11505 if (const auto *Enum = paramTy->getAs<EnumType>()) {
11506 paramTy = Enum->getDecl()->getIntegerType();
11507 if (paramTy.isNull())
11508 return {};
11509 }
11510
11511 if (isPromotableIntegerType(T: paramTy) ||
11512 getCanonicalType(T: paramTy).getUnqualifiedType() == FloatTy)
11513 return {};
11514 }
11515
11516 if (allLTypes) return lhs;
11517 if (allRTypes) return rhs;
11518
11519 FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo();
11520 EPI.ExtInfo = einfo;
11521 if (MergedFX)
11522 EPI.FunctionEffects = *MergedFX;
11523 return getFunctionType(ResultTy: retType, Args: proto->getParamTypes(), EPI);
11524 }
11525
11526 if (allLTypes) return lhs;
11527 if (allRTypes) return rhs;
11528 return getFunctionNoProtoType(ResultTy: retType, Info: einfo);
11529}
11530
11531/// Given that we have an enum type and a non-enum type, try to merge them.
11532static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET,
11533 QualType other, bool isBlockReturnType) {
11534 // C99 6.7.2.2p4: Each enumerated type shall be compatible with char,
11535 // a signed integer type, or an unsigned integer type.
11536 // Compatibility is based on the underlying type, not the promotion
11537 // type.
11538 QualType underlyingType = ET->getDecl()->getIntegerType();
11539 if (underlyingType.isNull())
11540 return {};
11541 if (Context.hasSameType(T1: underlyingType, T2: other))
11542 return other;
11543
11544 // In block return types, we're more permissive and accept any
11545 // integral type of the same size.
11546 if (isBlockReturnType && other->isIntegerType() &&
11547 Context.getTypeSize(T: underlyingType) == Context.getTypeSize(T: other))
11548 return other;
11549
11550 return {};
11551}
11552
11553QualType ASTContext::mergeTagDefinitions(QualType LHS, QualType RHS) {
11554 // C17 and earlier and C++ disallow two tag definitions within the same TU
11555 // from being compatible.
11556 if (LangOpts.CPlusPlus || !LangOpts.C23)
11557 return {};
11558
11559 // C23, on the other hand, requires the members to be "the same enough", so
11560 // we use a structural equivalence check.
11561 StructuralEquivalenceContext::NonEquivalentDeclSet NonEquivalentDecls;
11562 StructuralEquivalenceContext Ctx(
11563 getLangOpts(), *this, *this, NonEquivalentDecls,
11564 StructuralEquivalenceKind::Default, /*StrictTypeSpelling=*/false,
11565 /*Complain=*/false, /*ErrorOnTagTypeMismatch=*/true);
11566 return Ctx.IsEquivalent(T1: LHS, T2: RHS) ? LHS : QualType{};
11567}
11568
11569QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, bool OfBlockPointer,
11570 bool Unqualified, bool BlockReturnType,
11571 bool IsConditionalOperator) {
11572 // For C++ we will not reach this code with reference types (see below),
11573 // for OpenMP variant call overloading we might.
11574 //
11575 // C++ [expr]: If an expression initially has the type "reference to T", the
11576 // type is adjusted to "T" prior to any further analysis, the expression
11577 // designates the object or function denoted by the reference, and the
11578 // expression is an lvalue unless the reference is an rvalue reference and
11579 // the expression is a function call (possibly inside parentheses).
11580 auto *LHSRefTy = LHS->getAs<ReferenceType>();
11581 auto *RHSRefTy = RHS->getAs<ReferenceType>();
11582 if (LangOpts.OpenMP && LHSRefTy && RHSRefTy &&
11583 LHS->getTypeClass() == RHS->getTypeClass())
11584 return mergeTypes(LHS: LHSRefTy->getPointeeType(), RHS: RHSRefTy->getPointeeType(),
11585 OfBlockPointer, Unqualified, BlockReturnType);
11586 if (LHSRefTy || RHSRefTy)
11587 return {};
11588
11589 if (Unqualified) {
11590 LHS = LHS.getUnqualifiedType();
11591 RHS = RHS.getUnqualifiedType();
11592 }
11593
11594 QualType LHSCan = getCanonicalType(T: LHS),
11595 RHSCan = getCanonicalType(T: RHS);
11596
11597 // If two types are identical, they are compatible.
11598 if (LHSCan == RHSCan)
11599 return LHS;
11600
11601 // If the qualifiers are different, the types aren't compatible... mostly.
11602 Qualifiers LQuals = LHSCan.getLocalQualifiers();
11603 Qualifiers RQuals = RHSCan.getLocalQualifiers();
11604 if (LQuals != RQuals) {
11605 // If any of these qualifiers are different, we have a type
11606 // mismatch.
11607 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() ||
11608 LQuals.getAddressSpace() != RQuals.getAddressSpace() ||
11609 LQuals.getObjCLifetime() != RQuals.getObjCLifetime() ||
11610 !LQuals.getPointerAuth().isEquivalent(Other: RQuals.getPointerAuth()) ||
11611 LQuals.hasUnaligned() != RQuals.hasUnaligned())
11612 return {};
11613
11614 // Exactly one GC qualifier difference is allowed: __strong is
11615 // okay if the other type has no GC qualifier but is an Objective
11616 // C object pointer (i.e. implicitly strong by default). We fix
11617 // this by pretending that the unqualified type was actually
11618 // qualified __strong.
11619 Qualifiers::GC GC_L = LQuals.getObjCGCAttr();
11620 Qualifiers::GC GC_R = RQuals.getObjCGCAttr();
11621 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements");
11622
11623 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak)
11624 return {};
11625
11626 if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) {
11627 return mergeTypes(LHS, RHS: getObjCGCQualType(T: RHS, GCAttr: Qualifiers::Strong));
11628 }
11629 if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) {
11630 return mergeTypes(LHS: getObjCGCQualType(T: LHS, GCAttr: Qualifiers::Strong), RHS);
11631 }
11632 return {};
11633 }
11634
11635 // Okay, qualifiers are equal.
11636
11637 Type::TypeClass LHSClass = LHSCan->getTypeClass();
11638 Type::TypeClass RHSClass = RHSCan->getTypeClass();
11639
11640 // We want to consider the two function types to be the same for these
11641 // comparisons, just force one to the other.
11642 if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto;
11643 if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto;
11644
11645 // Same as above for arrays
11646 if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray)
11647 LHSClass = Type::ConstantArray;
11648 if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray)
11649 RHSClass = Type::ConstantArray;
11650
11651 // ObjCInterfaces are just specialized ObjCObjects.
11652 if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject;
11653 if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject;
11654
11655 // Canonicalize ExtVector -> Vector.
11656 if (LHSClass == Type::ExtVector) LHSClass = Type::Vector;
11657 if (RHSClass == Type::ExtVector) RHSClass = Type::Vector;
11658
11659 // If the canonical type classes don't match.
11660 if (LHSClass != RHSClass) {
11661 // Note that we only have special rules for turning block enum
11662 // returns into block int returns, not vice-versa.
11663 if (const auto *ETy = LHS->getAs<EnumType>()) {
11664 return mergeEnumWithInteger(Context&: *this, ET: ETy, other: RHS, isBlockReturnType: false);
11665 }
11666 if (const EnumType* ETy = RHS->getAs<EnumType>()) {
11667 return mergeEnumWithInteger(Context&: *this, ET: ETy, other: LHS, isBlockReturnType: BlockReturnType);
11668 }
11669 // allow block pointer type to match an 'id' type.
11670 if (OfBlockPointer && !BlockReturnType) {
11671 if (LHS->isObjCIdType() && RHS->isBlockPointerType())
11672 return LHS;
11673 if (RHS->isObjCIdType() && LHS->isBlockPointerType())
11674 return RHS;
11675 }
11676 // Allow __auto_type to match anything; it merges to the type with more
11677 // information.
11678 if (const auto *AT = LHS->getAs<AutoType>()) {
11679 if (!AT->isDeduced() && AT->isGNUAutoType())
11680 return RHS;
11681 }
11682 if (const auto *AT = RHS->getAs<AutoType>()) {
11683 if (!AT->isDeduced() && AT->isGNUAutoType())
11684 return LHS;
11685 }
11686 return {};
11687 }
11688
11689 // The canonical type classes match.
11690 switch (LHSClass) {
11691#define TYPE(Class, Base)
11692#define ABSTRACT_TYPE(Class, Base)
11693#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
11694#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
11695#define DEPENDENT_TYPE(Class, Base) case Type::Class:
11696#include "clang/AST/TypeNodes.inc"
11697 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
11698
11699 case Type::Auto:
11700 case Type::DeducedTemplateSpecialization:
11701 case Type::LValueReference:
11702 case Type::RValueReference:
11703 case Type::MemberPointer:
11704 llvm_unreachable("C++ should never be in mergeTypes");
11705
11706 case Type::ObjCInterface:
11707 case Type::IncompleteArray:
11708 case Type::VariableArray:
11709 case Type::FunctionProto:
11710 case Type::ExtVector:
11711 llvm_unreachable("Types are eliminated above");
11712
11713 case Type::Pointer:
11714 {
11715 // Merge two pointer types, while trying to preserve typedef info
11716 QualType LHSPointee = LHS->castAs<PointerType>()->getPointeeType();
11717 QualType RHSPointee = RHS->castAs<PointerType>()->getPointeeType();
11718 if (Unqualified) {
11719 LHSPointee = LHSPointee.getUnqualifiedType();
11720 RHSPointee = RHSPointee.getUnqualifiedType();
11721 }
11722 QualType ResultType = mergeTypes(LHS: LHSPointee, RHS: RHSPointee, OfBlockPointer: false,
11723 Unqualified);
11724 if (ResultType.isNull())
11725 return {};
11726 if (getCanonicalType(T: LHSPointee) == getCanonicalType(T: ResultType))
11727 return LHS;
11728 if (getCanonicalType(T: RHSPointee) == getCanonicalType(T: ResultType))
11729 return RHS;
11730 return getPointerType(T: ResultType);
11731 }
11732 case Type::BlockPointer:
11733 {
11734 // Merge two block pointer types, while trying to preserve typedef info
11735 QualType LHSPointee = LHS->castAs<BlockPointerType>()->getPointeeType();
11736 QualType RHSPointee = RHS->castAs<BlockPointerType>()->getPointeeType();
11737 if (Unqualified) {
11738 LHSPointee = LHSPointee.getUnqualifiedType();
11739 RHSPointee = RHSPointee.getUnqualifiedType();
11740 }
11741 if (getLangOpts().OpenCL) {
11742 Qualifiers LHSPteeQual = LHSPointee.getQualifiers();
11743 Qualifiers RHSPteeQual = RHSPointee.getQualifiers();
11744 // Blocks can't be an expression in a ternary operator (OpenCL v2.0
11745 // 6.12.5) thus the following check is asymmetric.
11746 if (!LHSPteeQual.isAddressSpaceSupersetOf(other: RHSPteeQual, Ctx: *this))
11747 return {};
11748 LHSPteeQual.removeAddressSpace();
11749 RHSPteeQual.removeAddressSpace();
11750 LHSPointee =
11751 QualType(LHSPointee.getTypePtr(), LHSPteeQual.getAsOpaqueValue());
11752 RHSPointee =
11753 QualType(RHSPointee.getTypePtr(), RHSPteeQual.getAsOpaqueValue());
11754 }
11755 QualType ResultType = mergeTypes(LHS: LHSPointee, RHS: RHSPointee, OfBlockPointer,
11756 Unqualified);
11757 if (ResultType.isNull())
11758 return {};
11759 if (getCanonicalType(T: LHSPointee) == getCanonicalType(T: ResultType))
11760 return LHS;
11761 if (getCanonicalType(T: RHSPointee) == getCanonicalType(T: ResultType))
11762 return RHS;
11763 return getBlockPointerType(T: ResultType);
11764 }
11765 case Type::Atomic:
11766 {
11767 // Merge two pointer types, while trying to preserve typedef info
11768 QualType LHSValue = LHS->castAs<AtomicType>()->getValueType();
11769 QualType RHSValue = RHS->castAs<AtomicType>()->getValueType();
11770 if (Unqualified) {
11771 LHSValue = LHSValue.getUnqualifiedType();
11772 RHSValue = RHSValue.getUnqualifiedType();
11773 }
11774 QualType ResultType = mergeTypes(LHS: LHSValue, RHS: RHSValue, OfBlockPointer: false,
11775 Unqualified);
11776 if (ResultType.isNull())
11777 return {};
11778 if (getCanonicalType(T: LHSValue) == getCanonicalType(T: ResultType))
11779 return LHS;
11780 if (getCanonicalType(T: RHSValue) == getCanonicalType(T: ResultType))
11781 return RHS;
11782 return getAtomicType(T: ResultType);
11783 }
11784 case Type::ConstantArray:
11785 {
11786 const ConstantArrayType* LCAT = getAsConstantArrayType(T: LHS);
11787 const ConstantArrayType* RCAT = getAsConstantArrayType(T: RHS);
11788 if (LCAT && RCAT && RCAT->getZExtSize() != LCAT->getZExtSize())
11789 return {};
11790
11791 QualType LHSElem = getAsArrayType(T: LHS)->getElementType();
11792 QualType RHSElem = getAsArrayType(T: RHS)->getElementType();
11793 if (Unqualified) {
11794 LHSElem = LHSElem.getUnqualifiedType();
11795 RHSElem = RHSElem.getUnqualifiedType();
11796 }
11797
11798 QualType ResultType = mergeTypes(LHS: LHSElem, RHS: RHSElem, OfBlockPointer: false, Unqualified);
11799 if (ResultType.isNull())
11800 return {};
11801
11802 const VariableArrayType* LVAT = getAsVariableArrayType(T: LHS);
11803 const VariableArrayType* RVAT = getAsVariableArrayType(T: RHS);
11804
11805 // If either side is a variable array, and both are complete, check whether
11806 // the current dimension is definite.
11807 if (LVAT || RVAT) {
11808 auto SizeFetch = [this](const VariableArrayType* VAT,
11809 const ConstantArrayType* CAT)
11810 -> std::pair<bool,llvm::APInt> {
11811 if (VAT) {
11812 std::optional<llvm::APSInt> TheInt;
11813 Expr *E = VAT->getSizeExpr();
11814 if (E && (TheInt = E->getIntegerConstantExpr(Ctx: *this)))
11815 return std::make_pair(x: true, y&: *TheInt);
11816 return std::make_pair(x: false, y: llvm::APSInt());
11817 }
11818 if (CAT)
11819 return std::make_pair(x: true, y: CAT->getSize());
11820 return std::make_pair(x: false, y: llvm::APInt());
11821 };
11822
11823 bool HaveLSize, HaveRSize;
11824 llvm::APInt LSize, RSize;
11825 std::tie(args&: HaveLSize, args&: LSize) = SizeFetch(LVAT, LCAT);
11826 std::tie(args&: HaveRSize, args&: RSize) = SizeFetch(RVAT, RCAT);
11827 if (HaveLSize && HaveRSize && !llvm::APInt::isSameValue(I1: LSize, I2: RSize))
11828 return {}; // Definite, but unequal, array dimension
11829 }
11830
11831 if (LCAT && getCanonicalType(T: LHSElem) == getCanonicalType(T: ResultType))
11832 return LHS;
11833 if (RCAT && getCanonicalType(T: RHSElem) == getCanonicalType(T: ResultType))
11834 return RHS;
11835 if (LCAT)
11836 return getConstantArrayType(EltTy: ResultType, ArySizeIn: LCAT->getSize(),
11837 SizeExpr: LCAT->getSizeExpr(), ASM: ArraySizeModifier(), IndexTypeQuals: 0);
11838 if (RCAT)
11839 return getConstantArrayType(EltTy: ResultType, ArySizeIn: RCAT->getSize(),
11840 SizeExpr: RCAT->getSizeExpr(), ASM: ArraySizeModifier(), IndexTypeQuals: 0);
11841 if (LVAT && getCanonicalType(T: LHSElem) == getCanonicalType(T: ResultType))
11842 return LHS;
11843 if (RVAT && getCanonicalType(T: RHSElem) == getCanonicalType(T: ResultType))
11844 return RHS;
11845 if (LVAT) {
11846 // FIXME: This isn't correct! But tricky to implement because
11847 // the array's size has to be the size of LHS, but the type
11848 // has to be different.
11849 return LHS;
11850 }
11851 if (RVAT) {
11852 // FIXME: This isn't correct! But tricky to implement because
11853 // the array's size has to be the size of RHS, but the type
11854 // has to be different.
11855 return RHS;
11856 }
11857 if (getCanonicalType(T: LHSElem) == getCanonicalType(T: ResultType)) return LHS;
11858 if (getCanonicalType(T: RHSElem) == getCanonicalType(T: ResultType)) return RHS;
11859 return getIncompleteArrayType(elementType: ResultType, ASM: ArraySizeModifier(), elementTypeQuals: 0);
11860 }
11861 case Type::FunctionNoProto:
11862 return mergeFunctionTypes(lhs: LHS, rhs: RHS, OfBlockPointer, Unqualified,
11863 /*AllowCXX=*/false, IsConditionalOperator);
11864 case Type::Record:
11865 case Type::Enum:
11866 return mergeTagDefinitions(LHS, RHS);
11867 case Type::Builtin:
11868 // Only exactly equal builtin types are compatible, which is tested above.
11869 return {};
11870 case Type::Complex:
11871 // Distinct complex types are incompatible.
11872 return {};
11873 case Type::Vector:
11874 // FIXME: The merged type should be an ExtVector!
11875 if (areCompatVectorTypes(LHS: LHSCan->castAs<VectorType>(),
11876 RHS: RHSCan->castAs<VectorType>()))
11877 return LHS;
11878 return {};
11879 case Type::ConstantMatrix:
11880 if (areCompatMatrixTypes(LHS: LHSCan->castAs<ConstantMatrixType>(),
11881 RHS: RHSCan->castAs<ConstantMatrixType>()))
11882 return LHS;
11883 return {};
11884 case Type::ObjCObject: {
11885 // Check if the types are assignment compatible.
11886 // FIXME: This should be type compatibility, e.g. whether
11887 // "LHS x; RHS x;" at global scope is legal.
11888 if (canAssignObjCInterfaces(LHS: LHS->castAs<ObjCObjectType>(),
11889 RHS: RHS->castAs<ObjCObjectType>()))
11890 return LHS;
11891 return {};
11892 }
11893 case Type::ObjCObjectPointer:
11894 if (OfBlockPointer) {
11895 if (canAssignObjCInterfacesInBlockPointer(
11896 LHSOPT: LHS->castAs<ObjCObjectPointerType>(),
11897 RHSOPT: RHS->castAs<ObjCObjectPointerType>(), BlockReturnType))
11898 return LHS;
11899 return {};
11900 }
11901 if (canAssignObjCInterfaces(LHSOPT: LHS->castAs<ObjCObjectPointerType>(),
11902 RHSOPT: RHS->castAs<ObjCObjectPointerType>()))
11903 return LHS;
11904 return {};
11905 case Type::Pipe:
11906 assert(LHS != RHS &&
11907 "Equivalent pipe types should have already been handled!");
11908 return {};
11909 case Type::ArrayParameter:
11910 assert(LHS != RHS &&
11911 "Equivalent ArrayParameter types should have already been handled!");
11912 return {};
11913 case Type::BitInt: {
11914 // Merge two bit-precise int types, while trying to preserve typedef info.
11915 bool LHSUnsigned = LHS->castAs<BitIntType>()->isUnsigned();
11916 bool RHSUnsigned = RHS->castAs<BitIntType>()->isUnsigned();
11917 unsigned LHSBits = LHS->castAs<BitIntType>()->getNumBits();
11918 unsigned RHSBits = RHS->castAs<BitIntType>()->getNumBits();
11919
11920 // Like unsigned/int, shouldn't have a type if they don't match.
11921 if (LHSUnsigned != RHSUnsigned)
11922 return {};
11923
11924 if (LHSBits != RHSBits)
11925 return {};
11926 return LHS;
11927 }
11928 case Type::HLSLAttributedResource: {
11929 const HLSLAttributedResourceType *LHSTy =
11930 LHS->castAs<HLSLAttributedResourceType>();
11931 const HLSLAttributedResourceType *RHSTy =
11932 RHS->castAs<HLSLAttributedResourceType>();
11933 assert(LHSTy->getWrappedType() == RHSTy->getWrappedType() &&
11934 LHSTy->getWrappedType()->isHLSLResourceType() &&
11935 "HLSLAttributedResourceType should always wrap __hlsl_resource_t");
11936
11937 if (LHSTy->getAttrs() == RHSTy->getAttrs() &&
11938 LHSTy->getContainedType() == RHSTy->getContainedType())
11939 return LHS;
11940 return {};
11941 }
11942 case Type::HLSLInlineSpirv:
11943 const HLSLInlineSpirvType *LHSTy = LHS->castAs<HLSLInlineSpirvType>();
11944 const HLSLInlineSpirvType *RHSTy = RHS->castAs<HLSLInlineSpirvType>();
11945
11946 if (LHSTy->getOpcode() == RHSTy->getOpcode() &&
11947 LHSTy->getSize() == RHSTy->getSize() &&
11948 LHSTy->getAlignment() == RHSTy->getAlignment()) {
11949 for (size_t I = 0; I < LHSTy->getOperands().size(); I++)
11950 if (LHSTy->getOperands()[I] != RHSTy->getOperands()[I])
11951 return {};
11952
11953 return LHS;
11954 }
11955 return {};
11956 }
11957
11958 llvm_unreachable("Invalid Type::Class!");
11959}
11960
11961bool ASTContext::mergeExtParameterInfo(
11962 const FunctionProtoType *FirstFnType, const FunctionProtoType *SecondFnType,
11963 bool &CanUseFirst, bool &CanUseSecond,
11964 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &NewParamInfos) {
11965 assert(NewParamInfos.empty() && "param info list not empty");
11966 CanUseFirst = CanUseSecond = true;
11967 bool FirstHasInfo = FirstFnType->hasExtParameterInfos();
11968 bool SecondHasInfo = SecondFnType->hasExtParameterInfos();
11969
11970 // Fast path: if the first type doesn't have ext parameter infos,
11971 // we match if and only if the second type also doesn't have them.
11972 if (!FirstHasInfo && !SecondHasInfo)
11973 return true;
11974
11975 bool NeedParamInfo = false;
11976 size_t E = FirstHasInfo ? FirstFnType->getExtParameterInfos().size()
11977 : SecondFnType->getExtParameterInfos().size();
11978
11979 for (size_t I = 0; I < E; ++I) {
11980 FunctionProtoType::ExtParameterInfo FirstParam, SecondParam;
11981 if (FirstHasInfo)
11982 FirstParam = FirstFnType->getExtParameterInfo(I);
11983 if (SecondHasInfo)
11984 SecondParam = SecondFnType->getExtParameterInfo(I);
11985
11986 // Cannot merge unless everything except the noescape flag matches.
11987 if (FirstParam.withIsNoEscape(NoEscape: false) != SecondParam.withIsNoEscape(NoEscape: false))
11988 return false;
11989
11990 bool FirstNoEscape = FirstParam.isNoEscape();
11991 bool SecondNoEscape = SecondParam.isNoEscape();
11992 bool IsNoEscape = FirstNoEscape && SecondNoEscape;
11993 NewParamInfos.push_back(Elt: FirstParam.withIsNoEscape(NoEscape: IsNoEscape));
11994 if (NewParamInfos.back().getOpaqueValue())
11995 NeedParamInfo = true;
11996 if (FirstNoEscape != IsNoEscape)
11997 CanUseFirst = false;
11998 if (SecondNoEscape != IsNoEscape)
11999 CanUseSecond = false;
12000 }
12001
12002 if (!NeedParamInfo)
12003 NewParamInfos.clear();
12004
12005 return true;
12006}
12007
12008void ASTContext::ResetObjCLayout(const ObjCInterfaceDecl *D) {
12009 if (auto It = ObjCLayouts.find(Val: D); It != ObjCLayouts.end()) {
12010 It->second = nullptr;
12011 for (auto *SubClass : ObjCSubClasses[D])
12012 ResetObjCLayout(D: SubClass);
12013 }
12014}
12015
12016/// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and
12017/// 'RHS' attributes and returns the merged version; including for function
12018/// return types.
12019QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) {
12020 QualType LHSCan = getCanonicalType(T: LHS),
12021 RHSCan = getCanonicalType(T: RHS);
12022 // If two types are identical, they are compatible.
12023 if (LHSCan == RHSCan)
12024 return LHS;
12025 if (RHSCan->isFunctionType()) {
12026 if (!LHSCan->isFunctionType())
12027 return {};
12028 QualType OldReturnType =
12029 cast<FunctionType>(Val: RHSCan.getTypePtr())->getReturnType();
12030 QualType NewReturnType =
12031 cast<FunctionType>(Val: LHSCan.getTypePtr())->getReturnType();
12032 QualType ResReturnType =
12033 mergeObjCGCQualifiers(LHS: NewReturnType, RHS: OldReturnType);
12034 if (ResReturnType.isNull())
12035 return {};
12036 if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) {
12037 // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo();
12038 // In either case, use OldReturnType to build the new function type.
12039 const auto *F = LHS->castAs<FunctionType>();
12040 if (const auto *FPT = cast<FunctionProtoType>(Val: F)) {
12041 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
12042 EPI.ExtInfo = getFunctionExtInfo(t: LHS);
12043 QualType ResultType =
12044 getFunctionType(ResultTy: OldReturnType, Args: FPT->getParamTypes(), EPI);
12045 return ResultType;
12046 }
12047 }
12048 return {};
12049 }
12050
12051 // If the qualifiers are different, the types can still be merged.
12052 Qualifiers LQuals = LHSCan.getLocalQualifiers();
12053 Qualifiers RQuals = RHSCan.getLocalQualifiers();
12054 if (LQuals != RQuals) {
12055 // If any of these qualifiers are different, we have a type mismatch.
12056 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() ||
12057 LQuals.getAddressSpace() != RQuals.getAddressSpace())
12058 return {};
12059
12060 // Exactly one GC qualifier difference is allowed: __strong is
12061 // okay if the other type has no GC qualifier but is an Objective
12062 // C object pointer (i.e. implicitly strong by default). We fix
12063 // this by pretending that the unqualified type was actually
12064 // qualified __strong.
12065 Qualifiers::GC GC_L = LQuals.getObjCGCAttr();
12066 Qualifiers::GC GC_R = RQuals.getObjCGCAttr();
12067 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements");
12068
12069 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak)
12070 return {};
12071
12072 if (GC_L == Qualifiers::Strong)
12073 return LHS;
12074 if (GC_R == Qualifiers::Strong)
12075 return RHS;
12076 return {};
12077 }
12078
12079 if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) {
12080 QualType LHSBaseQT = LHS->castAs<ObjCObjectPointerType>()->getPointeeType();
12081 QualType RHSBaseQT = RHS->castAs<ObjCObjectPointerType>()->getPointeeType();
12082 QualType ResQT = mergeObjCGCQualifiers(LHS: LHSBaseQT, RHS: RHSBaseQT);
12083 if (ResQT == LHSBaseQT)
12084 return LHS;
12085 if (ResQT == RHSBaseQT)
12086 return RHS;
12087 }
12088 return {};
12089}
12090
12091//===----------------------------------------------------------------------===//
12092// Integer Predicates
12093//===----------------------------------------------------------------------===//
12094
12095unsigned ASTContext::getIntWidth(QualType T) const {
12096 if (const auto *ET = T->getAs<EnumType>())
12097 T = ET->getDecl()->getIntegerType();
12098 if (T->isBooleanType())
12099 return 1;
12100 if (const auto *EIT = T->getAs<BitIntType>())
12101 return EIT->getNumBits();
12102 // For builtin types, just use the standard type sizing method
12103 return (unsigned)getTypeSize(T);
12104}
12105
12106QualType ASTContext::getCorrespondingUnsignedType(QualType T) const {
12107 assert((T->hasIntegerRepresentation() || T->isEnumeralType() ||
12108 T->isFixedPointType()) &&
12109 "Unexpected type");
12110
12111 // Turn <4 x signed int> -> <4 x unsigned int>
12112 if (const auto *VTy = T->getAs<VectorType>())
12113 return getVectorType(vecType: getCorrespondingUnsignedType(T: VTy->getElementType()),
12114 NumElts: VTy->getNumElements(), VecKind: VTy->getVectorKind());
12115
12116 // For _BitInt, return an unsigned _BitInt with same width.
12117 if (const auto *EITy = T->getAs<BitIntType>())
12118 return getBitIntType(/*Unsigned=*/IsUnsigned: true, NumBits: EITy->getNumBits());
12119
12120 // For enums, get the underlying integer type of the enum, and let the general
12121 // integer type signchanging code handle it.
12122 if (const auto *ETy = T->getAs<EnumType>())
12123 T = ETy->getDecl()->getIntegerType();
12124
12125 switch (T->castAs<BuiltinType>()->getKind()) {
12126 case BuiltinType::Char_U:
12127 // Plain `char` is mapped to `unsigned char` even if it's already unsigned
12128 case BuiltinType::Char_S:
12129 case BuiltinType::SChar:
12130 case BuiltinType::Char8:
12131 return UnsignedCharTy;
12132 case BuiltinType::Short:
12133 return UnsignedShortTy;
12134 case BuiltinType::Int:
12135 return UnsignedIntTy;
12136 case BuiltinType::Long:
12137 return UnsignedLongTy;
12138 case BuiltinType::LongLong:
12139 return UnsignedLongLongTy;
12140 case BuiltinType::Int128:
12141 return UnsignedInt128Ty;
12142 // wchar_t is special. It is either signed or not, but when it's signed,
12143 // there's no matching "unsigned wchar_t". Therefore we return the unsigned
12144 // version of its underlying type instead.
12145 case BuiltinType::WChar_S:
12146 return getUnsignedWCharType();
12147
12148 case BuiltinType::ShortAccum:
12149 return UnsignedShortAccumTy;
12150 case BuiltinType::Accum:
12151 return UnsignedAccumTy;
12152 case BuiltinType::LongAccum:
12153 return UnsignedLongAccumTy;
12154 case BuiltinType::SatShortAccum:
12155 return SatUnsignedShortAccumTy;
12156 case BuiltinType::SatAccum:
12157 return SatUnsignedAccumTy;
12158 case BuiltinType::SatLongAccum:
12159 return SatUnsignedLongAccumTy;
12160 case BuiltinType::ShortFract:
12161 return UnsignedShortFractTy;
12162 case BuiltinType::Fract:
12163 return UnsignedFractTy;
12164 case BuiltinType::LongFract:
12165 return UnsignedLongFractTy;
12166 case BuiltinType::SatShortFract:
12167 return SatUnsignedShortFractTy;
12168 case BuiltinType::SatFract:
12169 return SatUnsignedFractTy;
12170 case BuiltinType::SatLongFract:
12171 return SatUnsignedLongFractTy;
12172 default:
12173 assert((T->hasUnsignedIntegerRepresentation() ||
12174 T->isUnsignedFixedPointType()) &&
12175 "Unexpected signed integer or fixed point type");
12176 return T;
12177 }
12178}
12179
12180QualType ASTContext::getCorrespondingSignedType(QualType T) const {
12181 assert((T->hasIntegerRepresentation() || T->isEnumeralType() ||
12182 T->isFixedPointType()) &&
12183 "Unexpected type");
12184
12185 // Turn <4 x unsigned int> -> <4 x signed int>
12186 if (const auto *VTy = T->getAs<VectorType>())
12187 return getVectorType(vecType: getCorrespondingSignedType(T: VTy->getElementType()),
12188 NumElts: VTy->getNumElements(), VecKind: VTy->getVectorKind());
12189
12190 // For _BitInt, return a signed _BitInt with same width.
12191 if (const auto *EITy = T->getAs<BitIntType>())
12192 return getBitIntType(/*Unsigned=*/IsUnsigned: false, NumBits: EITy->getNumBits());
12193
12194 // For enums, get the underlying integer type of the enum, and let the general
12195 // integer type signchanging code handle it.
12196 if (const auto *ETy = T->getAs<EnumType>())
12197 T = ETy->getDecl()->getIntegerType();
12198
12199 switch (T->castAs<BuiltinType>()->getKind()) {
12200 case BuiltinType::Char_S:
12201 // Plain `char` is mapped to `signed char` even if it's already signed
12202 case BuiltinType::Char_U:
12203 case BuiltinType::UChar:
12204 case BuiltinType::Char8:
12205 return SignedCharTy;
12206 case BuiltinType::UShort:
12207 return ShortTy;
12208 case BuiltinType::UInt:
12209 return IntTy;
12210 case BuiltinType::ULong:
12211 return LongTy;
12212 case BuiltinType::ULongLong:
12213 return LongLongTy;
12214 case BuiltinType::UInt128:
12215 return Int128Ty;
12216 // wchar_t is special. It is either unsigned or not, but when it's unsigned,
12217 // there's no matching "signed wchar_t". Therefore we return the signed
12218 // version of its underlying type instead.
12219 case BuiltinType::WChar_U:
12220 return getSignedWCharType();
12221
12222 case BuiltinType::UShortAccum:
12223 return ShortAccumTy;
12224 case BuiltinType::UAccum:
12225 return AccumTy;
12226 case BuiltinType::ULongAccum:
12227 return LongAccumTy;
12228 case BuiltinType::SatUShortAccum:
12229 return SatShortAccumTy;
12230 case BuiltinType::SatUAccum:
12231 return SatAccumTy;
12232 case BuiltinType::SatULongAccum:
12233 return SatLongAccumTy;
12234 case BuiltinType::UShortFract:
12235 return ShortFractTy;
12236 case BuiltinType::UFract:
12237 return FractTy;
12238 case BuiltinType::ULongFract:
12239 return LongFractTy;
12240 case BuiltinType::SatUShortFract:
12241 return SatShortFractTy;
12242 case BuiltinType::SatUFract:
12243 return SatFractTy;
12244 case BuiltinType::SatULongFract:
12245 return SatLongFractTy;
12246 default:
12247 assert(
12248 (T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) &&
12249 "Unexpected signed integer or fixed point type");
12250 return T;
12251 }
12252}
12253
12254ASTMutationListener::~ASTMutationListener() = default;
12255
12256void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD,
12257 QualType ReturnType) {}
12258
12259//===----------------------------------------------------------------------===//
12260// Builtin Type Computation
12261//===----------------------------------------------------------------------===//
12262
12263/// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the
12264/// pointer over the consumed characters. This returns the resultant type. If
12265/// AllowTypeModifiers is false then modifier like * are not parsed, just basic
12266/// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of
12267/// a vector of "i*".
12268///
12269/// RequiresICE is filled in on return to indicate whether the value is required
12270/// to be an Integer Constant Expression.
12271static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
12272 ASTContext::GetBuiltinTypeError &Error,
12273 bool &RequiresICE,
12274 bool AllowTypeModifiers) {
12275 // Modifiers.
12276 int HowLong = 0;
12277 bool Signed = false, Unsigned = false;
12278 RequiresICE = false;
12279
12280 // Read the prefixed modifiers first.
12281 bool Done = false;
12282 #ifndef NDEBUG
12283 bool IsSpecial = false;
12284 #endif
12285 while (!Done) {
12286 switch (*Str++) {
12287 default: Done = true; --Str; break;
12288 case 'I':
12289 RequiresICE = true;
12290 break;
12291 case 'S':
12292 assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!");
12293 assert(!Signed && "Can't use 'S' modifier multiple times!");
12294 Signed = true;
12295 break;
12296 case 'U':
12297 assert(!Signed && "Can't use both 'S' and 'U' modifiers!");
12298 assert(!Unsigned && "Can't use 'U' modifier multiple times!");
12299 Unsigned = true;
12300 break;
12301 case 'L':
12302 assert(!IsSpecial && "Can't use 'L' with 'W', 'N', 'Z' or 'O' modifiers");
12303 assert(HowLong <= 2 && "Can't have LLLL modifier");
12304 ++HowLong;
12305 break;
12306 case 'N':
12307 // 'N' behaves like 'L' for all non LP64 targets and 'int' otherwise.
12308 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
12309 assert(HowLong == 0 && "Can't use both 'L' and 'N' modifiers!");
12310 #ifndef NDEBUG
12311 IsSpecial = true;
12312 #endif
12313 if (Context.getTargetInfo().getLongWidth() == 32)
12314 ++HowLong;
12315 break;
12316 case 'W':
12317 // This modifier represents int64 type.
12318 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
12319 assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!");
12320 #ifndef NDEBUG
12321 IsSpecial = true;
12322 #endif
12323 switch (Context.getTargetInfo().getInt64Type()) {
12324 default:
12325 llvm_unreachable("Unexpected integer type");
12326 case TargetInfo::SignedLong:
12327 HowLong = 1;
12328 break;
12329 case TargetInfo::SignedLongLong:
12330 HowLong = 2;
12331 break;
12332 }
12333 break;
12334 case 'Z':
12335 // This modifier represents int32 type.
12336 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
12337 assert(HowLong == 0 && "Can't use both 'L' and 'Z' modifiers!");
12338 #ifndef NDEBUG
12339 IsSpecial = true;
12340 #endif
12341 switch (Context.getTargetInfo().getIntTypeByWidth(BitWidth: 32, IsSigned: true)) {
12342 default:
12343 llvm_unreachable("Unexpected integer type");
12344 case TargetInfo::SignedInt:
12345 HowLong = 0;
12346 break;
12347 case TargetInfo::SignedLong:
12348 HowLong = 1;
12349 break;
12350 case TargetInfo::SignedLongLong:
12351 HowLong = 2;
12352 break;
12353 }
12354 break;
12355 case 'O':
12356 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
12357 assert(HowLong == 0 && "Can't use both 'L' and 'O' modifiers!");
12358 #ifndef NDEBUG
12359 IsSpecial = true;
12360 #endif
12361 if (Context.getLangOpts().OpenCL)
12362 HowLong = 1;
12363 else
12364 HowLong = 2;
12365 break;
12366 }
12367 }
12368
12369 QualType Type;
12370
12371 // Read the base type.
12372 switch (*Str++) {
12373 default: llvm_unreachable("Unknown builtin type letter!");
12374 case 'x':
12375 assert(HowLong == 0 && !Signed && !Unsigned &&
12376 "Bad modifiers used with 'x'!");
12377 Type = Context.Float16Ty;
12378 break;
12379 case 'y':
12380 assert(HowLong == 0 && !Signed && !Unsigned &&
12381 "Bad modifiers used with 'y'!");
12382 Type = Context.BFloat16Ty;
12383 break;
12384 case 'v':
12385 assert(HowLong == 0 && !Signed && !Unsigned &&
12386 "Bad modifiers used with 'v'!");
12387 Type = Context.VoidTy;
12388 break;
12389 case 'h':
12390 assert(HowLong == 0 && !Signed && !Unsigned &&
12391 "Bad modifiers used with 'h'!");
12392 Type = Context.HalfTy;
12393 break;
12394 case 'f':
12395 assert(HowLong == 0 && !Signed && !Unsigned &&
12396 "Bad modifiers used with 'f'!");
12397 Type = Context.FloatTy;
12398 break;
12399 case 'd':
12400 assert(HowLong < 3 && !Signed && !Unsigned &&
12401 "Bad modifiers used with 'd'!");
12402 if (HowLong == 1)
12403 Type = Context.LongDoubleTy;
12404 else if (HowLong == 2)
12405 Type = Context.Float128Ty;
12406 else
12407 Type = Context.DoubleTy;
12408 break;
12409 case 's':
12410 assert(HowLong == 0 && "Bad modifiers used with 's'!");
12411 if (Unsigned)
12412 Type = Context.UnsignedShortTy;
12413 else
12414 Type = Context.ShortTy;
12415 break;
12416 case 'i':
12417 if (HowLong == 3)
12418 Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty;
12419 else if (HowLong == 2)
12420 Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy;
12421 else if (HowLong == 1)
12422 Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy;
12423 else
12424 Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy;
12425 break;
12426 case 'c':
12427 assert(HowLong == 0 && "Bad modifiers used with 'c'!");
12428 if (Signed)
12429 Type = Context.SignedCharTy;
12430 else if (Unsigned)
12431 Type = Context.UnsignedCharTy;
12432 else
12433 Type = Context.CharTy;
12434 break;
12435 case 'b': // boolean
12436 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!");
12437 Type = Context.BoolTy;
12438 break;
12439 case 'z': // size_t.
12440 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!");
12441 Type = Context.getSizeType();
12442 break;
12443 case 'w': // wchar_t.
12444 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'w'!");
12445 Type = Context.getWideCharType();
12446 break;
12447 case 'F':
12448 Type = Context.getCFConstantStringType();
12449 break;
12450 case 'G':
12451 Type = Context.getObjCIdType();
12452 break;
12453 case 'H':
12454 Type = Context.getObjCSelType();
12455 break;
12456 case 'M':
12457 Type = Context.getObjCSuperType();
12458 break;
12459 case 'a':
12460 Type = Context.getBuiltinVaListType();
12461 assert(!Type.isNull() && "builtin va list type not initialized!");
12462 break;
12463 case 'A':
12464 // This is a "reference" to a va_list; however, what exactly
12465 // this means depends on how va_list is defined. There are two
12466 // different kinds of va_list: ones passed by value, and ones
12467 // passed by reference. An example of a by-value va_list is
12468 // x86, where va_list is a char*. An example of by-ref va_list
12469 // is x86-64, where va_list is a __va_list_tag[1]. For x86,
12470 // we want this argument to be a char*&; for x86-64, we want
12471 // it to be a __va_list_tag*.
12472 Type = Context.getBuiltinVaListType();
12473 assert(!Type.isNull() && "builtin va list type not initialized!");
12474 if (Type->isArrayType())
12475 Type = Context.getArrayDecayedType(Ty: Type);
12476 else
12477 Type = Context.getLValueReferenceType(T: Type);
12478 break;
12479 case 'q': {
12480 char *End;
12481 unsigned NumElements = strtoul(nptr: Str, endptr: &End, base: 10);
12482 assert(End != Str && "Missing vector size");
12483 Str = End;
12484
12485 QualType ElementType = DecodeTypeFromStr(Str, Context, Error,
12486 RequiresICE, AllowTypeModifiers: false);
12487 assert(!RequiresICE && "Can't require vector ICE");
12488
12489 Type = Context.getScalableVectorType(EltTy: ElementType, NumElts: NumElements);
12490 break;
12491 }
12492 case 'Q': {
12493 switch (*Str++) {
12494 case 'a': {
12495 Type = Context.SveCountTy;
12496 break;
12497 }
12498 case 'b': {
12499 Type = Context.AMDGPUBufferRsrcTy;
12500 break;
12501 }
12502 default:
12503 llvm_unreachable("Unexpected target builtin type");
12504 }
12505 break;
12506 }
12507 case 'V': {
12508 char *End;
12509 unsigned NumElements = strtoul(nptr: Str, endptr: &End, base: 10);
12510 assert(End != Str && "Missing vector size");
12511 Str = End;
12512
12513 QualType ElementType = DecodeTypeFromStr(Str, Context, Error,
12514 RequiresICE, AllowTypeModifiers: false);
12515 assert(!RequiresICE && "Can't require vector ICE");
12516
12517 // TODO: No way to make AltiVec vectors in builtins yet.
12518 Type = Context.getVectorType(vecType: ElementType, NumElts: NumElements, VecKind: VectorKind::Generic);
12519 break;
12520 }
12521 case 'E': {
12522 char *End;
12523
12524 unsigned NumElements = strtoul(nptr: Str, endptr: &End, base: 10);
12525 assert(End != Str && "Missing vector size");
12526
12527 Str = End;
12528
12529 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE,
12530 AllowTypeModifiers: false);
12531 Type = Context.getExtVectorType(vecType: ElementType, NumElts: NumElements);
12532 break;
12533 }
12534 case 'X': {
12535 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE,
12536 AllowTypeModifiers: false);
12537 assert(!RequiresICE && "Can't require complex ICE");
12538 Type = Context.getComplexType(T: ElementType);
12539 break;
12540 }
12541 case 'Y':
12542 Type = Context.getPointerDiffType();
12543 break;
12544 case 'P':
12545 Type = Context.getFILEType();
12546 if (Type.isNull()) {
12547 Error = ASTContext::GE_Missing_stdio;
12548 return {};
12549 }
12550 break;
12551 case 'J':
12552 if (Signed)
12553 Type = Context.getsigjmp_bufType();
12554 else
12555 Type = Context.getjmp_bufType();
12556
12557 if (Type.isNull()) {
12558 Error = ASTContext::GE_Missing_setjmp;
12559 return {};
12560 }
12561 break;
12562 case 'K':
12563 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!");
12564 Type = Context.getucontext_tType();
12565
12566 if (Type.isNull()) {
12567 Error = ASTContext::GE_Missing_ucontext;
12568 return {};
12569 }
12570 break;
12571 case 'p':
12572 Type = Context.getProcessIDType();
12573 break;
12574 case 'm':
12575 Type = Context.MFloat8Ty;
12576 break;
12577 }
12578
12579 // If there are modifiers and if we're allowed to parse them, go for it.
12580 Done = !AllowTypeModifiers;
12581 while (!Done) {
12582 switch (char c = *Str++) {
12583 default: Done = true; --Str; break;
12584 case '*':
12585 case '&': {
12586 // Both pointers and references can have their pointee types
12587 // qualified with an address space.
12588 char *End;
12589 unsigned AddrSpace = strtoul(nptr: Str, endptr: &End, base: 10);
12590 if (End != Str) {
12591 // Note AddrSpace == 0 is not the same as an unspecified address space.
12592 Type = Context.getAddrSpaceQualType(
12593 T: Type,
12594 AddressSpace: Context.getLangASForBuiltinAddressSpace(AS: AddrSpace));
12595 Str = End;
12596 }
12597 if (c == '*')
12598 Type = Context.getPointerType(T: Type);
12599 else
12600 Type = Context.getLValueReferenceType(T: Type);
12601 break;
12602 }
12603 // FIXME: There's no way to have a built-in with an rvalue ref arg.
12604 case 'C':
12605 Type = Type.withConst();
12606 break;
12607 case 'D':
12608 Type = Context.getVolatileType(T: Type);
12609 break;
12610 case 'R':
12611 Type = Type.withRestrict();
12612 break;
12613 }
12614 }
12615
12616 assert((!RequiresICE || Type->isIntegralOrEnumerationType()) &&
12617 "Integer constant 'I' type must be an integer");
12618
12619 return Type;
12620}
12621
12622// On some targets such as PowerPC, some of the builtins are defined with custom
12623// type descriptors for target-dependent types. These descriptors are decoded in
12624// other functions, but it may be useful to be able to fall back to default
12625// descriptor decoding to define builtins mixing target-dependent and target-
12626// independent types. This function allows decoding one type descriptor with
12627// default decoding.
12628QualType ASTContext::DecodeTypeStr(const char *&Str, const ASTContext &Context,
12629 GetBuiltinTypeError &Error, bool &RequireICE,
12630 bool AllowTypeModifiers) const {
12631 return DecodeTypeFromStr(Str, Context, Error, RequiresICE&: RequireICE, AllowTypeModifiers);
12632}
12633
12634/// GetBuiltinType - Return the type for the specified builtin.
12635QualType ASTContext::GetBuiltinType(unsigned Id,
12636 GetBuiltinTypeError &Error,
12637 unsigned *IntegerConstantArgs) const {
12638 const char *TypeStr = BuiltinInfo.getTypeString(ID: Id);
12639 if (TypeStr[0] == '\0') {
12640 Error = GE_Missing_type;
12641 return {};
12642 }
12643
12644 SmallVector<QualType, 8> ArgTypes;
12645
12646 bool RequiresICE = false;
12647 Error = GE_None;
12648 QualType ResType = DecodeTypeFromStr(Str&: TypeStr, Context: *this, Error,
12649 RequiresICE, AllowTypeModifiers: true);
12650 if (Error != GE_None)
12651 return {};
12652
12653 assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE");
12654
12655 while (TypeStr[0] && TypeStr[0] != '.') {
12656 QualType Ty = DecodeTypeFromStr(Str&: TypeStr, Context: *this, Error, RequiresICE, AllowTypeModifiers: true);
12657 if (Error != GE_None)
12658 return {};
12659
12660 // If this argument is required to be an IntegerConstantExpression and the
12661 // caller cares, fill in the bitmask we return.
12662 if (RequiresICE && IntegerConstantArgs)
12663 *IntegerConstantArgs |= 1 << ArgTypes.size();
12664
12665 // Do array -> pointer decay. The builtin should use the decayed type.
12666 if (Ty->isArrayType())
12667 Ty = getArrayDecayedType(Ty);
12668
12669 ArgTypes.push_back(Elt: Ty);
12670 }
12671
12672 if (Id == Builtin::BI__GetExceptionInfo)
12673 return {};
12674
12675 assert((TypeStr[0] != '.' || TypeStr[1] == 0) &&
12676 "'.' should only occur at end of builtin type list!");
12677
12678 bool Variadic = (TypeStr[0] == '.');
12679
12680 FunctionType::ExtInfo EI(getDefaultCallingConvention(
12681 IsVariadic: Variadic, /*IsCXXMethod=*/false, /*IsBuiltin=*/true));
12682 if (BuiltinInfo.isNoReturn(ID: Id)) EI = EI.withNoReturn(noReturn: true);
12683
12684
12685 // We really shouldn't be making a no-proto type here.
12686 if (ArgTypes.empty() && Variadic && !getLangOpts().requiresStrictPrototypes())
12687 return getFunctionNoProtoType(ResultTy: ResType, Info: EI);
12688
12689 FunctionProtoType::ExtProtoInfo EPI;
12690 EPI.ExtInfo = EI;
12691 EPI.Variadic = Variadic;
12692 if (getLangOpts().CPlusPlus && BuiltinInfo.isNoThrow(ID: Id))
12693 EPI.ExceptionSpec.Type =
12694 getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone;
12695
12696 return getFunctionType(ResultTy: ResType, Args: ArgTypes, EPI);
12697}
12698
12699static GVALinkage basicGVALinkageForFunction(const ASTContext &Context,
12700 const FunctionDecl *FD) {
12701 if (!FD->isExternallyVisible())
12702 return GVA_Internal;
12703
12704 // Non-user-provided functions get emitted as weak definitions with every
12705 // use, no matter whether they've been explicitly instantiated etc.
12706 if (!FD->isUserProvided())
12707 return GVA_DiscardableODR;
12708
12709 GVALinkage External;
12710 switch (FD->getTemplateSpecializationKind()) {
12711 case TSK_Undeclared:
12712 case TSK_ExplicitSpecialization:
12713 External = GVA_StrongExternal;
12714 break;
12715
12716 case TSK_ExplicitInstantiationDefinition:
12717 return GVA_StrongODR;
12718
12719 // C++11 [temp.explicit]p10:
12720 // [ Note: The intent is that an inline function that is the subject of
12721 // an explicit instantiation declaration will still be implicitly
12722 // instantiated when used so that the body can be considered for
12723 // inlining, but that no out-of-line copy of the inline function would be
12724 // generated in the translation unit. -- end note ]
12725 case TSK_ExplicitInstantiationDeclaration:
12726 return GVA_AvailableExternally;
12727
12728 case TSK_ImplicitInstantiation:
12729 External = GVA_DiscardableODR;
12730 break;
12731 }
12732
12733 if (!FD->isInlined())
12734 return External;
12735
12736 if ((!Context.getLangOpts().CPlusPlus &&
12737 !Context.getTargetInfo().getCXXABI().isMicrosoft() &&
12738 !FD->hasAttr<DLLExportAttr>()) ||
12739 FD->hasAttr<GNUInlineAttr>()) {
12740 // FIXME: This doesn't match gcc's behavior for dllexport inline functions.
12741
12742 // GNU or C99 inline semantics. Determine whether this symbol should be
12743 // externally visible.
12744 if (FD->isInlineDefinitionExternallyVisible())
12745 return External;
12746
12747 // C99 inline semantics, where the symbol is not externally visible.
12748 return GVA_AvailableExternally;
12749 }
12750
12751 // Functions specified with extern and inline in -fms-compatibility mode
12752 // forcibly get emitted. While the body of the function cannot be later
12753 // replaced, the function definition cannot be discarded.
12754 if (FD->isMSExternInline())
12755 return GVA_StrongODR;
12756
12757 if (Context.getTargetInfo().getCXXABI().isMicrosoft() &&
12758 isa<CXXConstructorDecl>(Val: FD) &&
12759 cast<CXXConstructorDecl>(Val: FD)->isInheritingConstructor())
12760 // Our approach to inheriting constructors is fundamentally different from
12761 // that used by the MS ABI, so keep our inheriting constructor thunks
12762 // internal rather than trying to pick an unambiguous mangling for them.
12763 return GVA_Internal;
12764
12765 return GVA_DiscardableODR;
12766}
12767
12768static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context,
12769 const Decl *D, GVALinkage L) {
12770 // See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx
12771 // dllexport/dllimport on inline functions.
12772 if (D->hasAttr<DLLImportAttr>()) {
12773 if (L == GVA_DiscardableODR || L == GVA_StrongODR)
12774 return GVA_AvailableExternally;
12775 } else if (D->hasAttr<DLLExportAttr>()) {
12776 if (L == GVA_DiscardableODR)
12777 return GVA_StrongODR;
12778 } else if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice) {
12779 // Device-side functions with __global__ attribute must always be
12780 // visible externally so they can be launched from host.
12781 if (D->hasAttr<CUDAGlobalAttr>() &&
12782 (L == GVA_DiscardableODR || L == GVA_Internal))
12783 return GVA_StrongODR;
12784 // Single source offloading languages like CUDA/HIP need to be able to
12785 // access static device variables from host code of the same compilation
12786 // unit. This is done by externalizing the static variable with a shared
12787 // name between the host and device compilation which is the same for the
12788 // same compilation unit whereas different among different compilation
12789 // units.
12790 if (Context.shouldExternalize(D))
12791 return GVA_StrongExternal;
12792 }
12793 return L;
12794}
12795
12796/// Adjust the GVALinkage for a declaration based on what an external AST source
12797/// knows about whether there can be other definitions of this declaration.
12798static GVALinkage
12799adjustGVALinkageForExternalDefinitionKind(const ASTContext &Ctx, const Decl *D,
12800 GVALinkage L) {
12801 ExternalASTSource *Source = Ctx.getExternalSource();
12802 if (!Source)
12803 return L;
12804
12805 switch (Source->hasExternalDefinitions(D)) {
12806 case ExternalASTSource::EK_Never:
12807 // Other translation units rely on us to provide the definition.
12808 if (L == GVA_DiscardableODR)
12809 return GVA_StrongODR;
12810 break;
12811
12812 case ExternalASTSource::EK_Always:
12813 return GVA_AvailableExternally;
12814
12815 case ExternalASTSource::EK_ReplyHazy:
12816 break;
12817 }
12818 return L;
12819}
12820
12821GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const {
12822 return adjustGVALinkageForExternalDefinitionKind(Ctx: *this, D: FD,
12823 L: adjustGVALinkageForAttributes(Context: *this, D: FD,
12824 L: basicGVALinkageForFunction(Context: *this, FD)));
12825}
12826
12827static GVALinkage basicGVALinkageForVariable(const ASTContext &Context,
12828 const VarDecl *VD) {
12829 // As an extension for interactive REPLs, make sure constant variables are
12830 // only emitted once instead of LinkageComputer::getLVForNamespaceScopeDecl
12831 // marking them as internal.
12832 if (Context.getLangOpts().CPlusPlus &&
12833 Context.getLangOpts().IncrementalExtensions &&
12834 VD->getType().isConstQualified() &&
12835 !VD->getType().isVolatileQualified() && !VD->isInline() &&
12836 !isa<VarTemplateSpecializationDecl>(Val: VD) && !VD->getDescribedVarTemplate())
12837 return GVA_DiscardableODR;
12838
12839 if (!VD->isExternallyVisible())
12840 return GVA_Internal;
12841
12842 if (VD->isStaticLocal()) {
12843 const DeclContext *LexicalContext = VD->getParentFunctionOrMethod();
12844 while (LexicalContext && !isa<FunctionDecl>(Val: LexicalContext))
12845 LexicalContext = LexicalContext->getLexicalParent();
12846
12847 // ObjC Blocks can create local variables that don't have a FunctionDecl
12848 // LexicalContext.
12849 if (!LexicalContext)
12850 return GVA_DiscardableODR;
12851
12852 // Otherwise, let the static local variable inherit its linkage from the
12853 // nearest enclosing function.
12854 auto StaticLocalLinkage =
12855 Context.GetGVALinkageForFunction(FD: cast<FunctionDecl>(Val: LexicalContext));
12856
12857 // Itanium ABI 5.2.2: "Each COMDAT group [for a static local variable] must
12858 // be emitted in any object with references to the symbol for the object it
12859 // contains, whether inline or out-of-line."
12860 // Similar behavior is observed with MSVC. An alternative ABI could use
12861 // StrongODR/AvailableExternally to match the function, but none are
12862 // known/supported currently.
12863 if (StaticLocalLinkage == GVA_StrongODR ||
12864 StaticLocalLinkage == GVA_AvailableExternally)
12865 return GVA_DiscardableODR;
12866 return StaticLocalLinkage;
12867 }
12868
12869 // MSVC treats in-class initialized static data members as definitions.
12870 // By giving them non-strong linkage, out-of-line definitions won't
12871 // cause link errors.
12872 if (Context.isMSStaticDataMemberInlineDefinition(VD))
12873 return GVA_DiscardableODR;
12874
12875 // Most non-template variables have strong linkage; inline variables are
12876 // linkonce_odr or (occasionally, for compatibility) weak_odr.
12877 GVALinkage StrongLinkage;
12878 switch (Context.getInlineVariableDefinitionKind(VD)) {
12879 case ASTContext::InlineVariableDefinitionKind::None:
12880 StrongLinkage = GVA_StrongExternal;
12881 break;
12882 case ASTContext::InlineVariableDefinitionKind::Weak:
12883 case ASTContext::InlineVariableDefinitionKind::WeakUnknown:
12884 StrongLinkage = GVA_DiscardableODR;
12885 break;
12886 case ASTContext::InlineVariableDefinitionKind::Strong:
12887 StrongLinkage = GVA_StrongODR;
12888 break;
12889 }
12890
12891 switch (VD->getTemplateSpecializationKind()) {
12892 case TSK_Undeclared:
12893 return StrongLinkage;
12894
12895 case TSK_ExplicitSpecialization:
12896 return Context.getTargetInfo().getCXXABI().isMicrosoft() &&
12897 VD->isStaticDataMember()
12898 ? GVA_StrongODR
12899 : StrongLinkage;
12900
12901 case TSK_ExplicitInstantiationDefinition:
12902 return GVA_StrongODR;
12903
12904 case TSK_ExplicitInstantiationDeclaration:
12905 return GVA_AvailableExternally;
12906
12907 case TSK_ImplicitInstantiation:
12908 return GVA_DiscardableODR;
12909 }
12910
12911 llvm_unreachable("Invalid Linkage!");
12912}
12913
12914GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) const {
12915 return adjustGVALinkageForExternalDefinitionKind(Ctx: *this, D: VD,
12916 L: adjustGVALinkageForAttributes(Context: *this, D: VD,
12917 L: basicGVALinkageForVariable(Context: *this, VD)));
12918}
12919
12920bool ASTContext::DeclMustBeEmitted(const Decl *D) {
12921 if (const auto *VD = dyn_cast<VarDecl>(Val: D)) {
12922 if (!VD->isFileVarDecl())
12923 return false;
12924 // Global named register variables (GNU extension) are never emitted.
12925 if (VD->getStorageClass() == SC_Register)
12926 return false;
12927 if (VD->getDescribedVarTemplate() ||
12928 isa<VarTemplatePartialSpecializationDecl>(Val: VD))
12929 return false;
12930 } else if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
12931 // We never need to emit an uninstantiated function template.
12932 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate)
12933 return false;
12934 } else if (isa<PragmaCommentDecl>(Val: D))
12935 return true;
12936 else if (isa<PragmaDetectMismatchDecl>(Val: D))
12937 return true;
12938 else if (isa<OMPRequiresDecl>(Val: D))
12939 return true;
12940 else if (isa<OMPThreadPrivateDecl>(Val: D))
12941 return !D->getDeclContext()->isDependentContext();
12942 else if (isa<OMPAllocateDecl>(Val: D))
12943 return !D->getDeclContext()->isDependentContext();
12944 else if (isa<OMPDeclareReductionDecl>(Val: D) || isa<OMPDeclareMapperDecl>(Val: D))
12945 return !D->getDeclContext()->isDependentContext();
12946 else if (isa<ImportDecl>(Val: D))
12947 return true;
12948 else
12949 return false;
12950
12951 // If this is a member of a class template, we do not need to emit it.
12952 if (D->getDeclContext()->isDependentContext())
12953 return false;
12954
12955 // Weak references don't produce any output by themselves.
12956 if (D->hasAttr<WeakRefAttr>())
12957 return false;
12958
12959 // Aliases and used decls are required.
12960 if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>())
12961 return true;
12962
12963 if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
12964 // Forward declarations aren't required.
12965 if (!FD->doesThisDeclarationHaveABody())
12966 return FD->doesDeclarationForceExternallyVisibleDefinition();
12967
12968 // Function definitions with the sycl_kernel_entry_point attribute are
12969 // required during device compilation so that SYCL kernel caller offload
12970 // entry points are emitted.
12971 if (LangOpts.SYCLIsDevice && FD->hasAttr<SYCLKernelEntryPointAttr>())
12972 return true;
12973
12974 // FIXME: Functions declared with SYCL_EXTERNAL are required during
12975 // device compilation.
12976
12977 // Constructors and destructors are required.
12978 if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>())
12979 return true;
12980
12981 // The key function for a class is required. This rule only comes
12982 // into play when inline functions can be key functions, though.
12983 if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) {
12984 if (const auto *MD = dyn_cast<CXXMethodDecl>(Val: FD)) {
12985 const CXXRecordDecl *RD = MD->getParent();
12986 if (MD->isOutOfLine() && RD->isDynamicClass()) {
12987 const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD);
12988 if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl())
12989 return true;
12990 }
12991 }
12992 }
12993
12994 GVALinkage Linkage = GetGVALinkageForFunction(FD);
12995
12996 // static, static inline, always_inline, and extern inline functions can
12997 // always be deferred. Normal inline functions can be deferred in C99/C++.
12998 // Implicit template instantiations can also be deferred in C++.
12999 return !isDiscardableGVALinkage(L: Linkage);
13000 }
13001
13002 const auto *VD = cast<VarDecl>(Val: D);
13003 assert(VD->isFileVarDecl() && "Expected file scoped var");
13004
13005 // If the decl is marked as `declare target to`, it should be emitted for the
13006 // host and for the device.
13007 if (LangOpts.OpenMP &&
13008 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
13009 return true;
13010
13011 if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly &&
13012 !isMSStaticDataMemberInlineDefinition(VD))
13013 return false;
13014
13015 if (VD->shouldEmitInExternalSource())
13016 return false;
13017
13018 // Variables that can be needed in other TUs are required.
13019 auto Linkage = GetGVALinkageForVariable(VD);
13020 if (!isDiscardableGVALinkage(L: Linkage))
13021 return true;
13022
13023 // We never need to emit a variable that is available in another TU.
13024 if (Linkage == GVA_AvailableExternally)
13025 return false;
13026
13027 // Variables that have destruction with side-effects are required.
13028 if (VD->needsDestruction(Ctx: *this))
13029 return true;
13030
13031 // Variables that have initialization with side-effects are required.
13032 if (VD->hasInitWithSideEffects())
13033 return true;
13034
13035 // Likewise, variables with tuple-like bindings are required if their
13036 // bindings have side-effects.
13037 if (const auto *DD = dyn_cast<DecompositionDecl>(Val: VD)) {
13038 for (const auto *BD : DD->flat_bindings())
13039 if (const auto *BindingVD = BD->getHoldingVar())
13040 if (DeclMustBeEmitted(D: BindingVD))
13041 return true;
13042 }
13043
13044 return false;
13045}
13046
13047void ASTContext::forEachMultiversionedFunctionVersion(
13048 const FunctionDecl *FD,
13049 llvm::function_ref<void(FunctionDecl *)> Pred) const {
13050 assert(FD->isMultiVersion() && "Only valid for multiversioned functions");
13051 llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls;
13052 FD = FD->getMostRecentDecl();
13053 // FIXME: The order of traversal here matters and depends on the order of
13054 // lookup results, which happens to be (mostly) oldest-to-newest, but we
13055 // shouldn't rely on that.
13056 for (auto *CurDecl :
13057 FD->getDeclContext()->getRedeclContext()->lookup(Name: FD->getDeclName())) {
13058 FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl();
13059 if (CurFD && hasSameType(T1: CurFD->getType(), T2: FD->getType()) &&
13060 SeenDecls.insert(V: CurFD).second) {
13061 Pred(CurFD);
13062 }
13063 }
13064}
13065
13066CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic,
13067 bool IsCXXMethod,
13068 bool IsBuiltin) const {
13069 // Pass through to the C++ ABI object
13070 if (IsCXXMethod)
13071 return ABI->getDefaultMethodCallConv(isVariadic: IsVariadic);
13072
13073 // Builtins ignore user-specified default calling convention and remain the
13074 // Target's default calling convention.
13075 if (!IsBuiltin) {
13076 switch (LangOpts.getDefaultCallingConv()) {
13077 case LangOptions::DCC_None:
13078 break;
13079 case LangOptions::DCC_CDecl:
13080 return CC_C;
13081 case LangOptions::DCC_FastCall:
13082 if (getTargetInfo().hasFeature(Feature: "sse2") && !IsVariadic)
13083 return CC_X86FastCall;
13084 break;
13085 case LangOptions::DCC_StdCall:
13086 if (!IsVariadic)
13087 return CC_X86StdCall;
13088 break;
13089 case LangOptions::DCC_VectorCall:
13090 // __vectorcall cannot be applied to variadic functions.
13091 if (!IsVariadic)
13092 return CC_X86VectorCall;
13093 break;
13094 case LangOptions::DCC_RegCall:
13095 // __regcall cannot be applied to variadic functions.
13096 if (!IsVariadic)
13097 return CC_X86RegCall;
13098 break;
13099 case LangOptions::DCC_RtdCall:
13100 if (!IsVariadic)
13101 return CC_M68kRTD;
13102 break;
13103 }
13104 }
13105 return Target->getDefaultCallingConv();
13106}
13107
13108bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const {
13109 // Pass through to the C++ ABI object
13110 return ABI->isNearlyEmpty(RD);
13111}
13112
13113VTableContextBase *ASTContext::getVTableContext() {
13114 if (!VTContext) {
13115 auto ABI = Target->getCXXABI();
13116 if (ABI.isMicrosoft())
13117 VTContext.reset(p: new MicrosoftVTableContext(*this));
13118 else {
13119 auto ComponentLayout = getLangOpts().RelativeCXXABIVTables
13120 ? ItaniumVTableContext::Relative
13121 : ItaniumVTableContext::Pointer;
13122 VTContext.reset(p: new ItaniumVTableContext(*this, ComponentLayout));
13123 }
13124 }
13125 return VTContext.get();
13126}
13127
13128MangleContext *ASTContext::createMangleContext(const TargetInfo *T) {
13129 if (!T)
13130 T = Target;
13131 switch (T->getCXXABI().getKind()) {
13132 case TargetCXXABI::AppleARM64:
13133 case TargetCXXABI::Fuchsia:
13134 case TargetCXXABI::GenericAArch64:
13135 case TargetCXXABI::GenericItanium:
13136 case TargetCXXABI::GenericARM:
13137 case TargetCXXABI::GenericMIPS:
13138 case TargetCXXABI::iOS:
13139 case TargetCXXABI::WebAssembly:
13140 case TargetCXXABI::WatchOS:
13141 case TargetCXXABI::XL:
13142 return ItaniumMangleContext::create(Context&: *this, Diags&: getDiagnostics());
13143 case TargetCXXABI::Microsoft:
13144 return MicrosoftMangleContext::create(Context&: *this, Diags&: getDiagnostics());
13145 }
13146 llvm_unreachable("Unsupported ABI");
13147}
13148
13149MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) {
13150 assert(T.getCXXABI().getKind() != TargetCXXABI::Microsoft &&
13151 "Device mangle context does not support Microsoft mangling.");
13152 switch (T.getCXXABI().getKind()) {
13153 case TargetCXXABI::AppleARM64:
13154 case TargetCXXABI::Fuchsia:
13155 case TargetCXXABI::GenericAArch64:
13156 case TargetCXXABI::GenericItanium:
13157 case TargetCXXABI::GenericARM:
13158 case TargetCXXABI::GenericMIPS:
13159 case TargetCXXABI::iOS:
13160 case TargetCXXABI::WebAssembly:
13161 case TargetCXXABI::WatchOS:
13162 case TargetCXXABI::XL:
13163 return ItaniumMangleContext::create(
13164 Context&: *this, Diags&: getDiagnostics(),
13165 Discriminator: [](ASTContext &, const NamedDecl *ND) -> UnsignedOrNone {
13166 if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: ND))
13167 return RD->getDeviceLambdaManglingNumber();
13168 return std::nullopt;
13169 },
13170 /*IsAux=*/true);
13171 case TargetCXXABI::Microsoft:
13172 return MicrosoftMangleContext::create(Context&: *this, Diags&: getDiagnostics(),
13173 /*IsAux=*/true);
13174 }
13175 llvm_unreachable("Unsupported ABI");
13176}
13177
13178CXXABI::~CXXABI() = default;
13179
13180size_t ASTContext::getSideTableAllocatedMemory() const {
13181 return ASTRecordLayouts.getMemorySize() +
13182 llvm::capacity_in_bytes(X: ObjCLayouts) +
13183 llvm::capacity_in_bytes(X: KeyFunctions) +
13184 llvm::capacity_in_bytes(X: ObjCImpls) +
13185 llvm::capacity_in_bytes(X: BlockVarCopyInits) +
13186 llvm::capacity_in_bytes(X: DeclAttrs) +
13187 llvm::capacity_in_bytes(X: TemplateOrInstantiation) +
13188 llvm::capacity_in_bytes(X: InstantiatedFromUsingDecl) +
13189 llvm::capacity_in_bytes(X: InstantiatedFromUsingShadowDecl) +
13190 llvm::capacity_in_bytes(X: InstantiatedFromUnnamedFieldDecl) +
13191 llvm::capacity_in_bytes(X: OverriddenMethods) +
13192 llvm::capacity_in_bytes(X: Types) +
13193 llvm::capacity_in_bytes(x: VariableArrayTypes);
13194}
13195
13196/// getIntTypeForBitwidth -
13197/// sets integer QualTy according to specified details:
13198/// bitwidth, signed/unsigned.
13199/// Returns empty type if there is no appropriate target types.
13200QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth,
13201 unsigned Signed) const {
13202 TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(BitWidth: DestWidth, IsSigned: Signed);
13203 CanQualType QualTy = getFromTargetType(Type: Ty);
13204 if (!QualTy && DestWidth == 128)
13205 return Signed ? Int128Ty : UnsignedInt128Ty;
13206 return QualTy;
13207}
13208
13209/// getRealTypeForBitwidth -
13210/// sets floating point QualTy according to specified bitwidth.
13211/// Returns empty type if there is no appropriate target types.
13212QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth,
13213 FloatModeKind ExplicitType) const {
13214 FloatModeKind Ty =
13215 getTargetInfo().getRealTypeByWidth(BitWidth: DestWidth, ExplicitType);
13216 switch (Ty) {
13217 case FloatModeKind::Half:
13218 return HalfTy;
13219 case FloatModeKind::Float:
13220 return FloatTy;
13221 case FloatModeKind::Double:
13222 return DoubleTy;
13223 case FloatModeKind::LongDouble:
13224 return LongDoubleTy;
13225 case FloatModeKind::Float128:
13226 return Float128Ty;
13227 case FloatModeKind::Ibm128:
13228 return Ibm128Ty;
13229 case FloatModeKind::NoFloat:
13230 return {};
13231 }
13232
13233 llvm_unreachable("Unhandled TargetInfo::RealType value");
13234}
13235
13236void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) {
13237 if (Number <= 1)
13238 return;
13239
13240 MangleNumbers[ND] = Number;
13241
13242 if (Listener)
13243 Listener->AddedManglingNumber(D: ND, Number);
13244}
13245
13246unsigned ASTContext::getManglingNumber(const NamedDecl *ND,
13247 bool ForAuxTarget) const {
13248 auto I = MangleNumbers.find(Key: ND);
13249 unsigned Res = I != MangleNumbers.end() ? I->second : 1;
13250 // CUDA/HIP host compilation encodes host and device mangling numbers
13251 // as lower and upper half of 32 bit integer.
13252 if (LangOpts.CUDA && !LangOpts.CUDAIsDevice) {
13253 Res = ForAuxTarget ? Res >> 16 : Res & 0xFFFF;
13254 } else {
13255 assert(!ForAuxTarget && "Only CUDA/HIP host compilation supports mangling "
13256 "number for aux target");
13257 }
13258 return Res > 1 ? Res : 1;
13259}
13260
13261void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) {
13262 if (Number <= 1)
13263 return;
13264
13265 StaticLocalNumbers[VD] = Number;
13266
13267 if (Listener)
13268 Listener->AddedStaticLocalNumbers(D: VD, Number);
13269}
13270
13271unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const {
13272 auto I = StaticLocalNumbers.find(Key: VD);
13273 return I != StaticLocalNumbers.end() ? I->second : 1;
13274}
13275
13276void ASTContext::setIsDestroyingOperatorDelete(const FunctionDecl *FD,
13277 bool IsDestroying) {
13278 if (!IsDestroying) {
13279 assert(!DestroyingOperatorDeletes.contains(FD->getCanonicalDecl()));
13280 return;
13281 }
13282 DestroyingOperatorDeletes.insert(V: FD->getCanonicalDecl());
13283}
13284
13285bool ASTContext::isDestroyingOperatorDelete(const FunctionDecl *FD) const {
13286 return DestroyingOperatorDeletes.contains(V: FD->getCanonicalDecl());
13287}
13288
13289void ASTContext::setIsTypeAwareOperatorNewOrDelete(const FunctionDecl *FD,
13290 bool IsTypeAware) {
13291 if (!IsTypeAware) {
13292 assert(!TypeAwareOperatorNewAndDeletes.contains(FD->getCanonicalDecl()));
13293 return;
13294 }
13295 TypeAwareOperatorNewAndDeletes.insert(V: FD->getCanonicalDecl());
13296}
13297
13298bool ASTContext::isTypeAwareOperatorNewOrDelete(const FunctionDecl *FD) const {
13299 return TypeAwareOperatorNewAndDeletes.contains(V: FD->getCanonicalDecl());
13300}
13301
13302MangleNumberingContext &
13303ASTContext::getManglingNumberContext(const DeclContext *DC) {
13304 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C.
13305 std::unique_ptr<MangleNumberingContext> &MCtx = MangleNumberingContexts[DC];
13306 if (!MCtx)
13307 MCtx = createMangleNumberingContext();
13308 return *MCtx;
13309}
13310
13311MangleNumberingContext &
13312ASTContext::getManglingNumberContext(NeedExtraManglingDecl_t, const Decl *D) {
13313 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C.
13314 std::unique_ptr<MangleNumberingContext> &MCtx =
13315 ExtraMangleNumberingContexts[D];
13316 if (!MCtx)
13317 MCtx = createMangleNumberingContext();
13318 return *MCtx;
13319}
13320
13321std::unique_ptr<MangleNumberingContext>
13322ASTContext::createMangleNumberingContext() const {
13323 return ABI->createMangleNumberingContext();
13324}
13325
13326const CXXConstructorDecl *
13327ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) {
13328 return ABI->getCopyConstructorForExceptionObject(
13329 cast<CXXRecordDecl>(Val: RD->getFirstDecl()));
13330}
13331
13332void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD,
13333 CXXConstructorDecl *CD) {
13334 return ABI->addCopyConstructorForExceptionObject(
13335 cast<CXXRecordDecl>(Val: RD->getFirstDecl()),
13336 cast<CXXConstructorDecl>(Val: CD->getFirstDecl()));
13337}
13338
13339void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD,
13340 TypedefNameDecl *DD) {
13341 return ABI->addTypedefNameForUnnamedTagDecl(TD, DD);
13342}
13343
13344TypedefNameDecl *
13345ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl *TD) {
13346 return ABI->getTypedefNameForUnnamedTagDecl(TD);
13347}
13348
13349void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl *TD,
13350 DeclaratorDecl *DD) {
13351 return ABI->addDeclaratorForUnnamedTagDecl(TD, DD);
13352}
13353
13354DeclaratorDecl *ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl *TD) {
13355 return ABI->getDeclaratorForUnnamedTagDecl(TD);
13356}
13357
13358void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) {
13359 ParamIndices[D] = index;
13360}
13361
13362unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const {
13363 ParameterIndexTable::const_iterator I = ParamIndices.find(Val: D);
13364 assert(I != ParamIndices.end() &&
13365 "ParmIndices lacks entry set by ParmVarDecl");
13366 return I->second;
13367}
13368
13369QualType ASTContext::getStringLiteralArrayType(QualType EltTy,
13370 unsigned Length) const {
13371 // A C++ string literal has a const-qualified element type (C++ 2.13.4p1).
13372 if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings)
13373 EltTy = EltTy.withConst();
13374
13375 EltTy = adjustStringLiteralBaseType(Ty: EltTy);
13376
13377 // Get an array type for the string, according to C99 6.4.5. This includes
13378 // the null terminator character.
13379 return getConstantArrayType(EltTy, ArySizeIn: llvm::APInt(32, Length + 1), SizeExpr: nullptr,
13380 ASM: ArraySizeModifier::Normal, /*IndexTypeQuals*/ 0);
13381}
13382
13383StringLiteral *
13384ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const {
13385 StringLiteral *&Result = StringLiteralCache[Key];
13386 if (!Result)
13387 Result = StringLiteral::Create(
13388 Ctx: *this, Str: Key, Kind: StringLiteralKind::Ordinary,
13389 /*Pascal*/ false, Ty: getStringLiteralArrayType(EltTy: CharTy, Length: Key.size()),
13390 Locs: SourceLocation());
13391 return Result;
13392}
13393
13394MSGuidDecl *
13395ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const {
13396 assert(MSGuidTagDecl && "building MS GUID without MS extensions?");
13397
13398 llvm::FoldingSetNodeID ID;
13399 MSGuidDecl::Profile(ID, P: Parts);
13400
13401 void *InsertPos;
13402 if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos))
13403 return Existing;
13404
13405 QualType GUIDType = getMSGuidType().withConst();
13406 MSGuidDecl *New = MSGuidDecl::Create(C: *this, T: GUIDType, P: Parts);
13407 MSGuidDecls.InsertNode(N: New, InsertPos);
13408 return New;
13409}
13410
13411UnnamedGlobalConstantDecl *
13412ASTContext::getUnnamedGlobalConstantDecl(QualType Ty,
13413 const APValue &APVal) const {
13414 llvm::FoldingSetNodeID ID;
13415 UnnamedGlobalConstantDecl::Profile(ID, Ty, APVal);
13416
13417 void *InsertPos;
13418 if (UnnamedGlobalConstantDecl *Existing =
13419 UnnamedGlobalConstantDecls.FindNodeOrInsertPos(ID, InsertPos))
13420 return Existing;
13421
13422 UnnamedGlobalConstantDecl *New =
13423 UnnamedGlobalConstantDecl::Create(C: *this, T: Ty, APVal);
13424 UnnamedGlobalConstantDecls.InsertNode(N: New, InsertPos);
13425 return New;
13426}
13427
13428TemplateParamObjectDecl *
13429ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const {
13430 assert(T->isRecordType() && "template param object of unexpected type");
13431
13432 // C++ [temp.param]p8:
13433 // [...] a static storage duration object of type 'const T' [...]
13434 T.addConst();
13435
13436 llvm::FoldingSetNodeID ID;
13437 TemplateParamObjectDecl::Profile(ID, T, V);
13438
13439 void *InsertPos;
13440 if (TemplateParamObjectDecl *Existing =
13441 TemplateParamObjectDecls.FindNodeOrInsertPos(ID, InsertPos))
13442 return Existing;
13443
13444 TemplateParamObjectDecl *New = TemplateParamObjectDecl::Create(C: *this, T, V);
13445 TemplateParamObjectDecls.InsertNode(N: New, InsertPos);
13446 return New;
13447}
13448
13449bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const {
13450 const llvm::Triple &T = getTargetInfo().getTriple();
13451 if (!T.isOSDarwin())
13452 return false;
13453
13454 if (!(T.isiOS() && T.isOSVersionLT(Major: 7)) &&
13455 !(T.isMacOSX() && T.isOSVersionLT(Major: 10, Minor: 9)))
13456 return false;
13457
13458 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
13459 CharUnits sizeChars = getTypeSizeInChars(T: AtomicTy);
13460 uint64_t Size = sizeChars.getQuantity();
13461 CharUnits alignChars = getTypeAlignInChars(T: AtomicTy);
13462 unsigned Align = alignChars.getQuantity();
13463 unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth();
13464 return (Size != Align || toBits(CharSize: sizeChars) > MaxInlineWidthInBits);
13465}
13466
13467bool
13468ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl,
13469 const ObjCMethodDecl *MethodImpl) {
13470 // No point trying to match an unavailable/deprecated mothod.
13471 if (MethodDecl->hasAttr<UnavailableAttr>()
13472 || MethodDecl->hasAttr<DeprecatedAttr>())
13473 return false;
13474 if (MethodDecl->getObjCDeclQualifier() !=
13475 MethodImpl->getObjCDeclQualifier())
13476 return false;
13477 if (!hasSameType(T1: MethodDecl->getReturnType(), T2: MethodImpl->getReturnType()))
13478 return false;
13479
13480 if (MethodDecl->param_size() != MethodImpl->param_size())
13481 return false;
13482
13483 for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(),
13484 IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(),
13485 EF = MethodDecl->param_end();
13486 IM != EM && IF != EF; ++IM, ++IF) {
13487 const ParmVarDecl *DeclVar = (*IF);
13488 const ParmVarDecl *ImplVar = (*IM);
13489 if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier())
13490 return false;
13491 if (!hasSameType(T1: DeclVar->getType(), T2: ImplVar->getType()))
13492 return false;
13493 }
13494
13495 return (MethodDecl->isVariadic() == MethodImpl->isVariadic());
13496}
13497
13498uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const {
13499 LangAS AS;
13500 if (QT->getUnqualifiedDesugaredType()->isNullPtrType())
13501 AS = LangAS::Default;
13502 else
13503 AS = QT->getPointeeType().getAddressSpace();
13504
13505 return getTargetInfo().getNullPointerValue(AddrSpace: AS);
13506}
13507
13508unsigned ASTContext::getTargetAddressSpace(LangAS AS) const {
13509 return getTargetInfo().getTargetAddressSpace(AS);
13510}
13511
13512bool ASTContext::hasSameExpr(const Expr *X, const Expr *Y) const {
13513 if (X == Y)
13514 return true;
13515 if (!X || !Y)
13516 return false;
13517 llvm::FoldingSetNodeID IDX, IDY;
13518 X->Profile(ID&: IDX, Context: *this, /*Canonical=*/true);
13519 Y->Profile(ID&: IDY, Context: *this, /*Canonical=*/true);
13520 return IDX == IDY;
13521}
13522
13523// The getCommon* helpers return, for given 'same' X and Y entities given as
13524// inputs, another entity which is also the 'same' as the inputs, but which
13525// is closer to the canonical form of the inputs, each according to a given
13526// criteria.
13527// The getCommon*Checked variants are 'null inputs not-allowed' equivalents of
13528// the regular ones.
13529
13530static Decl *getCommonDecl(Decl *X, Decl *Y) {
13531 if (!declaresSameEntity(D1: X, D2: Y))
13532 return nullptr;
13533 for (const Decl *DX : X->redecls()) {
13534 // If we reach Y before reaching the first decl, that means X is older.
13535 if (DX == Y)
13536 return X;
13537 // If we reach the first decl, then Y is older.
13538 if (DX->isFirstDecl())
13539 return Y;
13540 }
13541 llvm_unreachable("Corrupt redecls chain");
13542}
13543
13544template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true>
13545static T *getCommonDecl(T *X, T *Y) {
13546 return cast_or_null<T>(
13547 getCommonDecl(X: const_cast<Decl *>(cast_or_null<Decl>(X)),
13548 Y: const_cast<Decl *>(cast_or_null<Decl>(Y))));
13549}
13550
13551template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true>
13552static T *getCommonDeclChecked(T *X, T *Y) {
13553 return cast<T>(getCommonDecl(X: const_cast<Decl *>(cast<Decl>(X)),
13554 Y: const_cast<Decl *>(cast<Decl>(Y))));
13555}
13556
13557static TemplateName getCommonTemplateName(ASTContext &Ctx, TemplateName X,
13558 TemplateName Y,
13559 bool IgnoreDeduced = false) {
13560 if (X.getAsVoidPointer() == Y.getAsVoidPointer())
13561 return X;
13562 // FIXME: There are cases here where we could find a common template name
13563 // with more sugar. For example one could be a SubstTemplateTemplate*
13564 // replacing the other.
13565 TemplateName CX = Ctx.getCanonicalTemplateName(Name: X, IgnoreDeduced);
13566 if (CX.getAsVoidPointer() !=
13567 Ctx.getCanonicalTemplateName(Name: Y).getAsVoidPointer())
13568 return TemplateName();
13569 return CX;
13570}
13571
13572static TemplateName getCommonTemplateNameChecked(ASTContext &Ctx,
13573 TemplateName X, TemplateName Y,
13574 bool IgnoreDeduced) {
13575 TemplateName R = getCommonTemplateName(Ctx, X, Y, IgnoreDeduced);
13576 assert(R.getAsVoidPointer() != nullptr);
13577 return R;
13578}
13579
13580static auto getCommonTypes(ASTContext &Ctx, ArrayRef<QualType> Xs,
13581 ArrayRef<QualType> Ys, bool Unqualified = false) {
13582 assert(Xs.size() == Ys.size());
13583 SmallVector<QualType, 8> Rs(Xs.size());
13584 for (size_t I = 0; I < Rs.size(); ++I)
13585 Rs[I] = Ctx.getCommonSugaredType(X: Xs[I], Y: Ys[I], Unqualified);
13586 return Rs;
13587}
13588
13589template <class T>
13590static SourceLocation getCommonAttrLoc(const T *X, const T *Y) {
13591 return X->getAttributeLoc() == Y->getAttributeLoc() ? X->getAttributeLoc()
13592 : SourceLocation();
13593}
13594
13595static TemplateArgument getCommonTemplateArgument(ASTContext &Ctx,
13596 const TemplateArgument &X,
13597 const TemplateArgument &Y) {
13598 if (X.getKind() != Y.getKind())
13599 return TemplateArgument();
13600
13601 switch (X.getKind()) {
13602 case TemplateArgument::ArgKind::Type:
13603 if (!Ctx.hasSameType(T1: X.getAsType(), T2: Y.getAsType()))
13604 return TemplateArgument();
13605 return TemplateArgument(
13606 Ctx.getCommonSugaredType(X: X.getAsType(), Y: Y.getAsType()));
13607 case TemplateArgument::ArgKind::NullPtr:
13608 if (!Ctx.hasSameType(T1: X.getNullPtrType(), T2: Y.getNullPtrType()))
13609 return TemplateArgument();
13610 return TemplateArgument(
13611 Ctx.getCommonSugaredType(X: X.getNullPtrType(), Y: Y.getNullPtrType()),
13612 /*Unqualified=*/true);
13613 case TemplateArgument::ArgKind::Expression:
13614 if (!Ctx.hasSameType(T1: X.getAsExpr()->getType(), T2: Y.getAsExpr()->getType()))
13615 return TemplateArgument();
13616 // FIXME: Try to keep the common sugar.
13617 return X;
13618 case TemplateArgument::ArgKind::Template: {
13619 TemplateName TX = X.getAsTemplate(), TY = Y.getAsTemplate();
13620 TemplateName CTN = ::getCommonTemplateName(Ctx, X: TX, Y: TY);
13621 if (!CTN.getAsVoidPointer())
13622 return TemplateArgument();
13623 return TemplateArgument(CTN);
13624 }
13625 case TemplateArgument::ArgKind::TemplateExpansion: {
13626 TemplateName TX = X.getAsTemplateOrTemplatePattern(),
13627 TY = Y.getAsTemplateOrTemplatePattern();
13628 TemplateName CTN = ::getCommonTemplateName(Ctx, X: TX, Y: TY);
13629 if (!CTN.getAsVoidPointer())
13630 return TemplateName();
13631 auto NExpX = X.getNumTemplateExpansions();
13632 assert(NExpX == Y.getNumTemplateExpansions());
13633 return TemplateArgument(CTN, NExpX);
13634 }
13635 default:
13636 // FIXME: Handle the other argument kinds.
13637 return X;
13638 }
13639}
13640
13641static bool getCommonTemplateArguments(ASTContext &Ctx,
13642 SmallVectorImpl<TemplateArgument> &R,
13643 ArrayRef<TemplateArgument> Xs,
13644 ArrayRef<TemplateArgument> Ys) {
13645 if (Xs.size() != Ys.size())
13646 return true;
13647 R.resize(N: Xs.size());
13648 for (size_t I = 0; I < R.size(); ++I) {
13649 R[I] = getCommonTemplateArgument(Ctx, X: Xs[I], Y: Ys[I]);
13650 if (R[I].isNull())
13651 return true;
13652 }
13653 return false;
13654}
13655
13656static auto getCommonTemplateArguments(ASTContext &Ctx,
13657 ArrayRef<TemplateArgument> Xs,
13658 ArrayRef<TemplateArgument> Ys) {
13659 SmallVector<TemplateArgument, 8> R;
13660 bool Different = getCommonTemplateArguments(Ctx, R, Xs, Ys);
13661 assert(!Different);
13662 (void)Different;
13663 return R;
13664}
13665
13666template <class T>
13667static ElaboratedTypeKeyword getCommonTypeKeyword(const T *X, const T *Y) {
13668 return X->getKeyword() == Y->getKeyword() ? X->getKeyword()
13669 : ElaboratedTypeKeyword::None;
13670}
13671
13672/// Returns a NestedNameSpecifier which has only the common sugar
13673/// present in both NNS1 and NNS2.
13674static NestedNameSpecifier *getCommonNNS(ASTContext &Ctx,
13675 NestedNameSpecifier *NNS1,
13676 NestedNameSpecifier *NNS2,
13677 bool IsSame) {
13678 // If they are identical, all sugar is common.
13679 if (NNS1 == NNS2)
13680 return NNS1;
13681
13682 // IsSame implies both NNSes are equivalent.
13683 NestedNameSpecifier *Canon = Ctx.getCanonicalNestedNameSpecifier(NNS: NNS1);
13684 if (Canon != Ctx.getCanonicalNestedNameSpecifier(NNS: NNS2)) {
13685 assert(!IsSame && "Should be the same NestedNameSpecifier");
13686 // If they are not the same, there is nothing to unify.
13687 // FIXME: It would be useful here if we could represent a canonically
13688 // empty NNS, which is not identical to an empty-as-written NNS.
13689 return nullptr;
13690 }
13691
13692 NestedNameSpecifier *R = nullptr;
13693 NestedNameSpecifier::SpecifierKind K1 = NNS1->getKind(), K2 = NNS2->getKind();
13694 switch (K1) {
13695 case NestedNameSpecifier::SpecifierKind::Identifier: {
13696 assert(K2 == NestedNameSpecifier::SpecifierKind::Identifier);
13697 IdentifierInfo *II = NNS1->getAsIdentifier();
13698 assert(II == NNS2->getAsIdentifier());
13699 // For an identifier, the prefixes are significant, so they must be the
13700 // same.
13701 NestedNameSpecifier *P = ::getCommonNNS(Ctx, NNS1: NNS1->getPrefix(),
13702 NNS2: NNS2->getPrefix(), /*IsSame=*/true);
13703 R = NestedNameSpecifier::Create(Context: Ctx, Prefix: P, II);
13704 break;
13705 }
13706 case NestedNameSpecifier::SpecifierKind::Namespace:
13707 case NestedNameSpecifier::SpecifierKind::NamespaceAlias: {
13708 assert(K2 == NestedNameSpecifier::SpecifierKind::Namespace ||
13709 K2 == NestedNameSpecifier::SpecifierKind::NamespaceAlias);
13710 // The prefixes for namespaces are not significant, its declaration
13711 // identifies it uniquely.
13712 NestedNameSpecifier *P =
13713 ::getCommonNNS(Ctx, NNS1: NNS1->getPrefix(), NNS2: NNS2->getPrefix(),
13714 /*IsSame=*/false);
13715 NamespaceAliasDecl *A1 = NNS1->getAsNamespaceAlias(),
13716 *A2 = NNS2->getAsNamespaceAlias();
13717 // Are they the same namespace alias?
13718 if (declaresSameEntity(D1: A1, D2: A2)) {
13719 R = NestedNameSpecifier::Create(Context: Ctx, Prefix: P, Alias: ::getCommonDeclChecked(X: A1, Y: A2));
13720 break;
13721 }
13722 // Otherwise, look at the namespaces only.
13723 NamespaceDecl *N1 = A1 ? A1->getNamespace() : NNS1->getAsNamespace(),
13724 *N2 = A2 ? A2->getNamespace() : NNS2->getAsNamespace();
13725 R = NestedNameSpecifier::Create(Context: Ctx, Prefix: P, NS: ::getCommonDeclChecked(X: N1, Y: N2));
13726 break;
13727 }
13728 case NestedNameSpecifier::SpecifierKind::TypeSpec: {
13729 // FIXME: See comment below, on Super case.
13730 if (K2 == NestedNameSpecifier::SpecifierKind::Super)
13731 return Ctx.getCanonicalNestedNameSpecifier(NNS: NNS1);
13732
13733 assert(K2 == NestedNameSpecifier::SpecifierKind::TypeSpec);
13734
13735 const Type *T1 = NNS1->getAsType(), *T2 = NNS2->getAsType();
13736 if (T1 == T2) {
13737 // If the types are indentical, then only the prefixes differ.
13738 // A well-formed NNS never has these types, as they have
13739 // special normalized forms.
13740 assert((!isa<DependentNameType, ElaboratedType>(T1)));
13741 // Only for a DependentTemplateSpecializationType the prefix
13742 // is actually significant. A DependentName, which would be another
13743 // plausible case, cannot occur here, as explained above.
13744 bool IsSame = isa<DependentTemplateSpecializationType>(Val: T1);
13745 NestedNameSpecifier *P =
13746 ::getCommonNNS(Ctx, NNS1: NNS1->getPrefix(), NNS2: NNS2->getPrefix(), IsSame);
13747 R = NestedNameSpecifier::Create(Context: Ctx, Prefix: P, T: T1);
13748 break;
13749 }
13750 // TODO: Try to salvage the original prefix.
13751 // If getCommonSugaredType removed any top level sugar, the original prefix
13752 // is not applicable anymore.
13753 const Type *T = Ctx.getCommonSugaredType(X: QualType(T1, 0), Y: QualType(T2, 0),
13754 /*Unqualified=*/true)
13755 .getTypePtr();
13756
13757 // A NestedNameSpecifier has special normalization rules for certain types.
13758 switch (T->getTypeClass()) {
13759 case Type::Elaborated: {
13760 // An ElaboratedType is stripped off, it's Qualifier becomes the prefix.
13761 auto *ET = cast<ElaboratedType>(Val: T);
13762 R = NestedNameSpecifier::Create(Context: Ctx, Prefix: ET->getQualifier(),
13763 T: ET->getNamedType().getTypePtr());
13764 break;
13765 }
13766 case Type::DependentName: {
13767 // A DependentName is turned into an Identifier NNS.
13768 auto *DN = cast<DependentNameType>(Val: T);
13769 R = NestedNameSpecifier::Create(Context: Ctx, Prefix: DN->getQualifier(),
13770 II: DN->getIdentifier());
13771 break;
13772 }
13773 case Type::DependentTemplateSpecialization: {
13774 // A DependentTemplateSpecializationType loses it's Qualifier, which
13775 // is turned into the prefix.
13776 auto *DTST = cast<DependentTemplateSpecializationType>(Val: T);
13777 const DependentTemplateStorage &DTN = DTST->getDependentTemplateName();
13778 DependentTemplateStorage NewDTN(/*Qualifier=*/nullptr, DTN.getName(),
13779 DTN.hasTemplateKeyword());
13780 T = Ctx.getDependentTemplateSpecializationType(Keyword: DTST->getKeyword(), Name: NewDTN,
13781 Args: DTST->template_arguments())
13782 .getTypePtr();
13783 R = NestedNameSpecifier::Create(Context: Ctx, Prefix: DTN.getQualifier(), T);
13784 break;
13785 }
13786 default:
13787 R = NestedNameSpecifier::Create(Context: Ctx, /*Prefix=*/nullptr, T);
13788 break;
13789 }
13790 break;
13791 }
13792 case NestedNameSpecifier::SpecifierKind::Super:
13793 // FIXME: Can __super even be used with data members?
13794 // If it's only usable in functions, we will never see it here,
13795 // unless we save the qualifiers used in function types.
13796 // In that case, it might be possible NNS2 is a type,
13797 // in which case we should degrade the result to
13798 // a CXXRecordType.
13799 return Ctx.getCanonicalNestedNameSpecifier(NNS: NNS1);
13800 case NestedNameSpecifier::SpecifierKind::Global:
13801 // The global NNS is a singleton.
13802 assert(K2 == NestedNameSpecifier::SpecifierKind::Global &&
13803 "Global NNS cannot be equivalent to any other kind");
13804 llvm_unreachable("Global NestedNameSpecifiers did not compare equal");
13805 }
13806 assert(Ctx.getCanonicalNestedNameSpecifier(R) == Canon);
13807 return R;
13808}
13809
13810template <class T>
13811static NestedNameSpecifier *getCommonQualifier(ASTContext &Ctx, const T *X,
13812 const T *Y, bool IsSame) {
13813 return ::getCommonNNS(Ctx, NNS1: X->getQualifier(), NNS2: Y->getQualifier(), IsSame);
13814}
13815
13816template <class T>
13817static QualType getCommonElementType(ASTContext &Ctx, const T *X, const T *Y) {
13818 return Ctx.getCommonSugaredType(X: X->getElementType(), Y: Y->getElementType());
13819}
13820
13821template <class T>
13822static QualType getCommonArrayElementType(ASTContext &Ctx, const T *X,
13823 Qualifiers &QX, const T *Y,
13824 Qualifiers &QY) {
13825 QualType EX = X->getElementType(), EY = Y->getElementType();
13826 QualType R = Ctx.getCommonSugaredType(X: EX, Y: EY,
13827 /*Unqualified=*/true);
13828 // Qualifiers common to both element types.
13829 Qualifiers RQ = R.getQualifiers();
13830 // For each side, move to the top level any qualifiers which are not common to
13831 // both element types. The caller must assume top level qualifiers might
13832 // be different, even if they are the same type, and can be treated as sugar.
13833 QX += EX.getQualifiers() - RQ;
13834 QY += EY.getQualifiers() - RQ;
13835 return R;
13836}
13837
13838template <class T>
13839static QualType getCommonPointeeType(ASTContext &Ctx, const T *X, const T *Y) {
13840 return Ctx.getCommonSugaredType(X: X->getPointeeType(), Y: Y->getPointeeType());
13841}
13842
13843template <class T> static auto *getCommonSizeExpr(ASTContext &Ctx, T *X, T *Y) {
13844 assert(Ctx.hasSameExpr(X->getSizeExpr(), Y->getSizeExpr()));
13845 return X->getSizeExpr();
13846}
13847
13848static auto getCommonSizeModifier(const ArrayType *X, const ArrayType *Y) {
13849 assert(X->getSizeModifier() == Y->getSizeModifier());
13850 return X->getSizeModifier();
13851}
13852
13853static auto getCommonIndexTypeCVRQualifiers(const ArrayType *X,
13854 const ArrayType *Y) {
13855 assert(X->getIndexTypeCVRQualifiers() == Y->getIndexTypeCVRQualifiers());
13856 return X->getIndexTypeCVRQualifiers();
13857}
13858
13859// Merges two type lists such that the resulting vector will contain
13860// each type (in a canonical sense) only once, in the order they appear
13861// from X to Y. If they occur in both X and Y, the result will contain
13862// the common sugared type between them.
13863static void mergeTypeLists(ASTContext &Ctx, SmallVectorImpl<QualType> &Out,
13864 ArrayRef<QualType> X, ArrayRef<QualType> Y) {
13865 llvm::DenseMap<QualType, unsigned> Found;
13866 for (auto Ts : {X, Y}) {
13867 for (QualType T : Ts) {
13868 auto Res = Found.try_emplace(Key: Ctx.getCanonicalType(T), Args: Out.size());
13869 if (!Res.second) {
13870 QualType &U = Out[Res.first->second];
13871 U = Ctx.getCommonSugaredType(X: U, Y: T);
13872 } else {
13873 Out.emplace_back(Args&: T);
13874 }
13875 }
13876 }
13877}
13878
13879FunctionProtoType::ExceptionSpecInfo
13880ASTContext::mergeExceptionSpecs(FunctionProtoType::ExceptionSpecInfo ESI1,
13881 FunctionProtoType::ExceptionSpecInfo ESI2,
13882 SmallVectorImpl<QualType> &ExceptionTypeStorage,
13883 bool AcceptDependent) {
13884 ExceptionSpecificationType EST1 = ESI1.Type, EST2 = ESI2.Type;
13885
13886 // If either of them can throw anything, that is the result.
13887 for (auto I : {EST_None, EST_MSAny, EST_NoexceptFalse}) {
13888 if (EST1 == I)
13889 return ESI1;
13890 if (EST2 == I)
13891 return ESI2;
13892 }
13893
13894 // If either of them is non-throwing, the result is the other.
13895 for (auto I :
13896 {EST_NoThrow, EST_DynamicNone, EST_BasicNoexcept, EST_NoexceptTrue}) {
13897 if (EST1 == I)
13898 return ESI2;
13899 if (EST2 == I)
13900 return ESI1;
13901 }
13902
13903 // If we're left with value-dependent computed noexcept expressions, we're
13904 // stuck. Before C++17, we can just drop the exception specification entirely,
13905 // since it's not actually part of the canonical type. And this should never
13906 // happen in C++17, because it would mean we were computing the composite
13907 // pointer type of dependent types, which should never happen.
13908 if (EST1 == EST_DependentNoexcept || EST2 == EST_DependentNoexcept) {
13909 assert(AcceptDependent &&
13910 "computing composite pointer type of dependent types");
13911 return FunctionProtoType::ExceptionSpecInfo();
13912 }
13913
13914 // Switch over the possibilities so that people adding new values know to
13915 // update this function.
13916 switch (EST1) {
13917 case EST_None:
13918 case EST_DynamicNone:
13919 case EST_MSAny:
13920 case EST_BasicNoexcept:
13921 case EST_DependentNoexcept:
13922 case EST_NoexceptFalse:
13923 case EST_NoexceptTrue:
13924 case EST_NoThrow:
13925 llvm_unreachable("These ESTs should be handled above");
13926
13927 case EST_Dynamic: {
13928 // This is the fun case: both exception specifications are dynamic. Form
13929 // the union of the two lists.
13930 assert(EST2 == EST_Dynamic && "other cases should already be handled");
13931 mergeTypeLists(Ctx&: *this, Out&: ExceptionTypeStorage, X: ESI1.Exceptions,
13932 Y: ESI2.Exceptions);
13933 FunctionProtoType::ExceptionSpecInfo Result(EST_Dynamic);
13934 Result.Exceptions = ExceptionTypeStorage;
13935 return Result;
13936 }
13937
13938 case EST_Unevaluated:
13939 case EST_Uninstantiated:
13940 case EST_Unparsed:
13941 llvm_unreachable("shouldn't see unresolved exception specifications here");
13942 }
13943
13944 llvm_unreachable("invalid ExceptionSpecificationType");
13945}
13946
13947static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X,
13948 Qualifiers &QX, const Type *Y,
13949 Qualifiers &QY) {
13950 Type::TypeClass TC = X->getTypeClass();
13951 assert(TC == Y->getTypeClass());
13952 switch (TC) {
13953#define UNEXPECTED_TYPE(Class, Kind) \
13954 case Type::Class: \
13955 llvm_unreachable("Unexpected " Kind ": " #Class);
13956
13957#define NON_CANONICAL_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "non-canonical")
13958#define TYPE(Class, Base)
13959#include "clang/AST/TypeNodes.inc"
13960
13961#define SUGAR_FREE_TYPE(Class) UNEXPECTED_TYPE(Class, "sugar-free")
13962 SUGAR_FREE_TYPE(Builtin)
13963 SUGAR_FREE_TYPE(DeducedTemplateSpecialization)
13964 SUGAR_FREE_TYPE(DependentBitInt)
13965 SUGAR_FREE_TYPE(Enum)
13966 SUGAR_FREE_TYPE(BitInt)
13967 SUGAR_FREE_TYPE(ObjCInterface)
13968 SUGAR_FREE_TYPE(Record)
13969 SUGAR_FREE_TYPE(SubstTemplateTypeParmPack)
13970 SUGAR_FREE_TYPE(UnresolvedUsing)
13971 SUGAR_FREE_TYPE(HLSLAttributedResource)
13972 SUGAR_FREE_TYPE(HLSLInlineSpirv)
13973#undef SUGAR_FREE_TYPE
13974#define NON_UNIQUE_TYPE(Class) UNEXPECTED_TYPE(Class, "non-unique")
13975 NON_UNIQUE_TYPE(TypeOfExpr)
13976 NON_UNIQUE_TYPE(VariableArray)
13977#undef NON_UNIQUE_TYPE
13978
13979 UNEXPECTED_TYPE(TypeOf, "sugar")
13980
13981#undef UNEXPECTED_TYPE
13982
13983 case Type::Auto: {
13984 const auto *AX = cast<AutoType>(Val: X), *AY = cast<AutoType>(Val: Y);
13985 assert(AX->getDeducedType().isNull());
13986 assert(AY->getDeducedType().isNull());
13987 assert(AX->getKeyword() == AY->getKeyword());
13988 assert(AX->isInstantiationDependentType() ==
13989 AY->isInstantiationDependentType());
13990 auto As = getCommonTemplateArguments(Ctx, Xs: AX->getTypeConstraintArguments(),
13991 Ys: AY->getTypeConstraintArguments());
13992 return Ctx.getAutoType(DeducedType: QualType(), Keyword: AX->getKeyword(),
13993 IsDependent: AX->isInstantiationDependentType(),
13994 IsPack: AX->containsUnexpandedParameterPack(),
13995 TypeConstraintConcept: getCommonDeclChecked(X: AX->getTypeConstraintConcept(),
13996 Y: AY->getTypeConstraintConcept()),
13997 TypeConstraintArgs: As);
13998 }
13999 case Type::IncompleteArray: {
14000 const auto *AX = cast<IncompleteArrayType>(Val: X),
14001 *AY = cast<IncompleteArrayType>(Val: Y);
14002 return Ctx.getIncompleteArrayType(
14003 elementType: getCommonArrayElementType(Ctx, X: AX, QX, Y: AY, QY),
14004 ASM: getCommonSizeModifier(X: AX, Y: AY), elementTypeQuals: getCommonIndexTypeCVRQualifiers(X: AX, Y: AY));
14005 }
14006 case Type::DependentSizedArray: {
14007 const auto *AX = cast<DependentSizedArrayType>(Val: X),
14008 *AY = cast<DependentSizedArrayType>(Val: Y);
14009 return Ctx.getDependentSizedArrayType(
14010 elementType: getCommonArrayElementType(Ctx, X: AX, QX, Y: AY, QY),
14011 numElements: getCommonSizeExpr(Ctx, X: AX, Y: AY), ASM: getCommonSizeModifier(X: AX, Y: AY),
14012 elementTypeQuals: getCommonIndexTypeCVRQualifiers(X: AX, Y: AY));
14013 }
14014 case Type::ConstantArray: {
14015 const auto *AX = cast<ConstantArrayType>(Val: X),
14016 *AY = cast<ConstantArrayType>(Val: Y);
14017 assert(AX->getSize() == AY->getSize());
14018 const Expr *SizeExpr = Ctx.hasSameExpr(X: AX->getSizeExpr(), Y: AY->getSizeExpr())
14019 ? AX->getSizeExpr()
14020 : nullptr;
14021 return Ctx.getConstantArrayType(
14022 EltTy: getCommonArrayElementType(Ctx, X: AX, QX, Y: AY, QY), ArySizeIn: AX->getSize(), SizeExpr,
14023 ASM: getCommonSizeModifier(X: AX, Y: AY), IndexTypeQuals: getCommonIndexTypeCVRQualifiers(X: AX, Y: AY));
14024 }
14025 case Type::ArrayParameter: {
14026 const auto *AX = cast<ArrayParameterType>(Val: X),
14027 *AY = cast<ArrayParameterType>(Val: Y);
14028 assert(AX->getSize() == AY->getSize());
14029 const Expr *SizeExpr = Ctx.hasSameExpr(X: AX->getSizeExpr(), Y: AY->getSizeExpr())
14030 ? AX->getSizeExpr()
14031 : nullptr;
14032 auto ArrayTy = Ctx.getConstantArrayType(
14033 EltTy: getCommonArrayElementType(Ctx, X: AX, QX, Y: AY, QY), ArySizeIn: AX->getSize(), SizeExpr,
14034 ASM: getCommonSizeModifier(X: AX, Y: AY), IndexTypeQuals: getCommonIndexTypeCVRQualifiers(X: AX, Y: AY));
14035 return Ctx.getArrayParameterType(Ty: ArrayTy);
14036 }
14037 case Type::Atomic: {
14038 const auto *AX = cast<AtomicType>(Val: X), *AY = cast<AtomicType>(Val: Y);
14039 return Ctx.getAtomicType(
14040 T: Ctx.getCommonSugaredType(X: AX->getValueType(), Y: AY->getValueType()));
14041 }
14042 case Type::Complex: {
14043 const auto *CX = cast<ComplexType>(Val: X), *CY = cast<ComplexType>(Val: Y);
14044 return Ctx.getComplexType(T: getCommonArrayElementType(Ctx, X: CX, QX, Y: CY, QY));
14045 }
14046 case Type::Pointer: {
14047 const auto *PX = cast<PointerType>(Val: X), *PY = cast<PointerType>(Val: Y);
14048 return Ctx.getPointerType(T: getCommonPointeeType(Ctx, X: PX, Y: PY));
14049 }
14050 case Type::BlockPointer: {
14051 const auto *PX = cast<BlockPointerType>(Val: X), *PY = cast<BlockPointerType>(Val: Y);
14052 return Ctx.getBlockPointerType(T: getCommonPointeeType(Ctx, X: PX, Y: PY));
14053 }
14054 case Type::ObjCObjectPointer: {
14055 const auto *PX = cast<ObjCObjectPointerType>(Val: X),
14056 *PY = cast<ObjCObjectPointerType>(Val: Y);
14057 return Ctx.getObjCObjectPointerType(ObjectT: getCommonPointeeType(Ctx, X: PX, Y: PY));
14058 }
14059 case Type::MemberPointer: {
14060 const auto *PX = cast<MemberPointerType>(Val: X),
14061 *PY = cast<MemberPointerType>(Val: Y);
14062 assert(declaresSameEntity(PX->getMostRecentCXXRecordDecl(),
14063 PY->getMostRecentCXXRecordDecl()));
14064 return Ctx.getMemberPointerType(
14065 T: getCommonPointeeType(Ctx, X: PX, Y: PY),
14066 Qualifier: getCommonQualifier(Ctx, X: PX, Y: PY, /*IsSame=*/true),
14067 Cls: PX->getMostRecentCXXRecordDecl());
14068 }
14069 case Type::LValueReference: {
14070 const auto *PX = cast<LValueReferenceType>(Val: X),
14071 *PY = cast<LValueReferenceType>(Val: Y);
14072 // FIXME: Preserve PointeeTypeAsWritten.
14073 return Ctx.getLValueReferenceType(T: getCommonPointeeType(Ctx, X: PX, Y: PY),
14074 SpelledAsLValue: PX->isSpelledAsLValue() ||
14075 PY->isSpelledAsLValue());
14076 }
14077 case Type::RValueReference: {
14078 const auto *PX = cast<RValueReferenceType>(Val: X),
14079 *PY = cast<RValueReferenceType>(Val: Y);
14080 // FIXME: Preserve PointeeTypeAsWritten.
14081 return Ctx.getRValueReferenceType(T: getCommonPointeeType(Ctx, X: PX, Y: PY));
14082 }
14083 case Type::DependentAddressSpace: {
14084 const auto *PX = cast<DependentAddressSpaceType>(Val: X),
14085 *PY = cast<DependentAddressSpaceType>(Val: Y);
14086 assert(Ctx.hasSameExpr(PX->getAddrSpaceExpr(), PY->getAddrSpaceExpr()));
14087 return Ctx.getDependentAddressSpaceType(PointeeType: getCommonPointeeType(Ctx, X: PX, Y: PY),
14088 AddrSpaceExpr: PX->getAddrSpaceExpr(),
14089 AttrLoc: getCommonAttrLoc(X: PX, Y: PY));
14090 }
14091 case Type::FunctionNoProto: {
14092 const auto *FX = cast<FunctionNoProtoType>(Val: X),
14093 *FY = cast<FunctionNoProtoType>(Val: Y);
14094 assert(FX->getExtInfo() == FY->getExtInfo());
14095 return Ctx.getFunctionNoProtoType(
14096 ResultTy: Ctx.getCommonSugaredType(X: FX->getReturnType(), Y: FY->getReturnType()),
14097 Info: FX->getExtInfo());
14098 }
14099 case Type::FunctionProto: {
14100 const auto *FX = cast<FunctionProtoType>(Val: X),
14101 *FY = cast<FunctionProtoType>(Val: Y);
14102 FunctionProtoType::ExtProtoInfo EPIX = FX->getExtProtoInfo(),
14103 EPIY = FY->getExtProtoInfo();
14104 assert(EPIX.ExtInfo == EPIY.ExtInfo);
14105 assert(EPIX.ExtParameterInfos == EPIY.ExtParameterInfos);
14106 assert(EPIX.RefQualifier == EPIY.RefQualifier);
14107 assert(EPIX.TypeQuals == EPIY.TypeQuals);
14108 assert(EPIX.Variadic == EPIY.Variadic);
14109
14110 // FIXME: Can we handle an empty EllipsisLoc?
14111 // Use emtpy EllipsisLoc if X and Y differ.
14112
14113 EPIX.HasTrailingReturn = EPIX.HasTrailingReturn && EPIY.HasTrailingReturn;
14114
14115 QualType R =
14116 Ctx.getCommonSugaredType(X: FX->getReturnType(), Y: FY->getReturnType());
14117 auto P = getCommonTypes(Ctx, Xs: FX->param_types(), Ys: FY->param_types(),
14118 /*Unqualified=*/true);
14119
14120 SmallVector<QualType, 8> Exceptions;
14121 EPIX.ExceptionSpec = Ctx.mergeExceptionSpecs(
14122 ESI1: EPIX.ExceptionSpec, ESI2: EPIY.ExceptionSpec, ExceptionTypeStorage&: Exceptions, AcceptDependent: true);
14123 return Ctx.getFunctionType(ResultTy: R, Args: P, EPI: EPIX);
14124 }
14125 case Type::ObjCObject: {
14126 const auto *OX = cast<ObjCObjectType>(Val: X), *OY = cast<ObjCObjectType>(Val: Y);
14127 assert(
14128 std::equal(OX->getProtocols().begin(), OX->getProtocols().end(),
14129 OY->getProtocols().begin(), OY->getProtocols().end(),
14130 [](const ObjCProtocolDecl *P0, const ObjCProtocolDecl *P1) {
14131 return P0->getCanonicalDecl() == P1->getCanonicalDecl();
14132 }) &&
14133 "protocol lists must be the same");
14134 auto TAs = getCommonTypes(Ctx, Xs: OX->getTypeArgsAsWritten(),
14135 Ys: OY->getTypeArgsAsWritten());
14136 return Ctx.getObjCObjectType(
14137 baseType: Ctx.getCommonSugaredType(X: OX->getBaseType(), Y: OY->getBaseType()), typeArgs: TAs,
14138 protocols: OX->getProtocols(),
14139 isKindOf: OX->isKindOfTypeAsWritten() && OY->isKindOfTypeAsWritten());
14140 }
14141 case Type::ConstantMatrix: {
14142 const auto *MX = cast<ConstantMatrixType>(Val: X),
14143 *MY = cast<ConstantMatrixType>(Val: Y);
14144 assert(MX->getNumRows() == MY->getNumRows());
14145 assert(MX->getNumColumns() == MY->getNumColumns());
14146 return Ctx.getConstantMatrixType(ElementTy: getCommonElementType(Ctx, X: MX, Y: MY),
14147 NumRows: MX->getNumRows(), NumColumns: MX->getNumColumns());
14148 }
14149 case Type::DependentSizedMatrix: {
14150 const auto *MX = cast<DependentSizedMatrixType>(Val: X),
14151 *MY = cast<DependentSizedMatrixType>(Val: Y);
14152 assert(Ctx.hasSameExpr(MX->getRowExpr(), MY->getRowExpr()));
14153 assert(Ctx.hasSameExpr(MX->getColumnExpr(), MY->getColumnExpr()));
14154 return Ctx.getDependentSizedMatrixType(
14155 ElementTy: getCommonElementType(Ctx, X: MX, Y: MY), RowExpr: MX->getRowExpr(),
14156 ColumnExpr: MX->getColumnExpr(), AttrLoc: getCommonAttrLoc(X: MX, Y: MY));
14157 }
14158 case Type::Vector: {
14159 const auto *VX = cast<VectorType>(Val: X), *VY = cast<VectorType>(Val: Y);
14160 assert(VX->getNumElements() == VY->getNumElements());
14161 assert(VX->getVectorKind() == VY->getVectorKind());
14162 return Ctx.getVectorType(vecType: getCommonElementType(Ctx, X: VX, Y: VY),
14163 NumElts: VX->getNumElements(), VecKind: VX->getVectorKind());
14164 }
14165 case Type::ExtVector: {
14166 const auto *VX = cast<ExtVectorType>(Val: X), *VY = cast<ExtVectorType>(Val: Y);
14167 assert(VX->getNumElements() == VY->getNumElements());
14168 return Ctx.getExtVectorType(vecType: getCommonElementType(Ctx, X: VX, Y: VY),
14169 NumElts: VX->getNumElements());
14170 }
14171 case Type::DependentSizedExtVector: {
14172 const auto *VX = cast<DependentSizedExtVectorType>(Val: X),
14173 *VY = cast<DependentSizedExtVectorType>(Val: Y);
14174 return Ctx.getDependentSizedExtVectorType(vecType: getCommonElementType(Ctx, X: VX, Y: VY),
14175 SizeExpr: getCommonSizeExpr(Ctx, X: VX, Y: VY),
14176 AttrLoc: getCommonAttrLoc(X: VX, Y: VY));
14177 }
14178 case Type::DependentVector: {
14179 const auto *VX = cast<DependentVectorType>(Val: X),
14180 *VY = cast<DependentVectorType>(Val: Y);
14181 assert(VX->getVectorKind() == VY->getVectorKind());
14182 return Ctx.getDependentVectorType(
14183 VecType: getCommonElementType(Ctx, X: VX, Y: VY), SizeExpr: getCommonSizeExpr(Ctx, X: VX, Y: VY),
14184 AttrLoc: getCommonAttrLoc(X: VX, Y: VY), VecKind: VX->getVectorKind());
14185 }
14186 case Type::InjectedClassName: {
14187 const auto *IX = cast<InjectedClassNameType>(Val: X),
14188 *IY = cast<InjectedClassNameType>(Val: Y);
14189 return Ctx.getInjectedClassNameType(
14190 Decl: getCommonDeclChecked(X: IX->getDecl(), Y: IY->getDecl()),
14191 TST: Ctx.getCommonSugaredType(X: IX->getInjectedSpecializationType(),
14192 Y: IY->getInjectedSpecializationType()));
14193 }
14194 case Type::TemplateSpecialization: {
14195 const auto *TX = cast<TemplateSpecializationType>(Val: X),
14196 *TY = cast<TemplateSpecializationType>(Val: Y);
14197 auto As = getCommonTemplateArguments(Ctx, Xs: TX->template_arguments(),
14198 Ys: TY->template_arguments());
14199 return Ctx.getTemplateSpecializationType(
14200 Template: ::getCommonTemplateNameChecked(Ctx, X: TX->getTemplateName(),
14201 Y: TY->getTemplateName(),
14202 /*IgnoreDeduced=*/true),
14203 SpecifiedArgs: As, /*CanonicalArgs=*/{}, Underlying: X->getCanonicalTypeInternal());
14204 }
14205 case Type::Decltype: {
14206 const auto *DX = cast<DecltypeType>(Val: X);
14207 [[maybe_unused]] const auto *DY = cast<DecltypeType>(Val: Y);
14208 assert(DX->isDependentType());
14209 assert(DY->isDependentType());
14210 assert(Ctx.hasSameExpr(DX->getUnderlyingExpr(), DY->getUnderlyingExpr()));
14211 // As Decltype is not uniqued, building a common type would be wasteful.
14212 return QualType(DX, 0);
14213 }
14214 case Type::PackIndexing: {
14215 const auto *DX = cast<PackIndexingType>(Val: X);
14216 [[maybe_unused]] const auto *DY = cast<PackIndexingType>(Val: Y);
14217 assert(DX->isDependentType());
14218 assert(DY->isDependentType());
14219 assert(Ctx.hasSameExpr(DX->getIndexExpr(), DY->getIndexExpr()));
14220 return QualType(DX, 0);
14221 }
14222 case Type::DependentName: {
14223 const auto *NX = cast<DependentNameType>(Val: X),
14224 *NY = cast<DependentNameType>(Val: Y);
14225 assert(NX->getIdentifier() == NY->getIdentifier());
14226 return Ctx.getDependentNameType(
14227 Keyword: getCommonTypeKeyword(X: NX, Y: NY),
14228 NNS: getCommonQualifier(Ctx, X: NX, Y: NY, /*IsSame=*/true), Name: NX->getIdentifier());
14229 }
14230 case Type::DependentTemplateSpecialization: {
14231 const auto *TX = cast<DependentTemplateSpecializationType>(Val: X),
14232 *TY = cast<DependentTemplateSpecializationType>(Val: Y);
14233 auto As = getCommonTemplateArguments(Ctx, Xs: TX->template_arguments(),
14234 Ys: TY->template_arguments());
14235 const DependentTemplateStorage &SX = TX->getDependentTemplateName(),
14236 &SY = TY->getDependentTemplateName();
14237 assert(SX.getName() == SY.getName());
14238 DependentTemplateStorage Name(
14239 getCommonNNS(Ctx, NNS1: SX.getQualifier(), NNS2: SY.getQualifier(),
14240 /*IsSame=*/true),
14241 SX.getName(), SX.hasTemplateKeyword() || SY.hasTemplateKeyword());
14242 return Ctx.getDependentTemplateSpecializationType(
14243 Keyword: getCommonTypeKeyword(X: TX, Y: TY), Name, Args: As);
14244 }
14245 case Type::UnaryTransform: {
14246 const auto *TX = cast<UnaryTransformType>(Val: X),
14247 *TY = cast<UnaryTransformType>(Val: Y);
14248 assert(TX->getUTTKind() == TY->getUTTKind());
14249 return Ctx.getUnaryTransformType(
14250 BaseType: Ctx.getCommonSugaredType(X: TX->getBaseType(), Y: TY->getBaseType()),
14251 UnderlyingType: Ctx.getCommonSugaredType(X: TX->getUnderlyingType(),
14252 Y: TY->getUnderlyingType()),
14253 Kind: TX->getUTTKind());
14254 }
14255 case Type::PackExpansion: {
14256 const auto *PX = cast<PackExpansionType>(Val: X),
14257 *PY = cast<PackExpansionType>(Val: Y);
14258 assert(PX->getNumExpansions() == PY->getNumExpansions());
14259 return Ctx.getPackExpansionType(
14260 Pattern: Ctx.getCommonSugaredType(X: PX->getPattern(), Y: PY->getPattern()),
14261 NumExpansions: PX->getNumExpansions(), ExpectPackInType: false);
14262 }
14263 case Type::Pipe: {
14264 const auto *PX = cast<PipeType>(Val: X), *PY = cast<PipeType>(Val: Y);
14265 assert(PX->isReadOnly() == PY->isReadOnly());
14266 auto MP = PX->isReadOnly() ? &ASTContext::getReadPipeType
14267 : &ASTContext::getWritePipeType;
14268 return (Ctx.*MP)(getCommonElementType(Ctx, X: PX, Y: PY));
14269 }
14270 case Type::TemplateTypeParm: {
14271 const auto *TX = cast<TemplateTypeParmType>(Val: X),
14272 *TY = cast<TemplateTypeParmType>(Val: Y);
14273 assert(TX->getDepth() == TY->getDepth());
14274 assert(TX->getIndex() == TY->getIndex());
14275 assert(TX->isParameterPack() == TY->isParameterPack());
14276 return Ctx.getTemplateTypeParmType(
14277 Depth: TX->getDepth(), Index: TX->getIndex(), ParameterPack: TX->isParameterPack(),
14278 TTPDecl: getCommonDecl(X: TX->getDecl(), Y: TY->getDecl()));
14279 }
14280 }
14281 llvm_unreachable("Unknown Type Class");
14282}
14283
14284static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X,
14285 const Type *Y,
14286 SplitQualType Underlying) {
14287 Type::TypeClass TC = X->getTypeClass();
14288 if (TC != Y->getTypeClass())
14289 return QualType();
14290 switch (TC) {
14291#define UNEXPECTED_TYPE(Class, Kind) \
14292 case Type::Class: \
14293 llvm_unreachable("Unexpected " Kind ": " #Class);
14294#define TYPE(Class, Base)
14295#define DEPENDENT_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "dependent")
14296#include "clang/AST/TypeNodes.inc"
14297
14298#define CANONICAL_TYPE(Class) UNEXPECTED_TYPE(Class, "canonical")
14299 CANONICAL_TYPE(Atomic)
14300 CANONICAL_TYPE(BitInt)
14301 CANONICAL_TYPE(BlockPointer)
14302 CANONICAL_TYPE(Builtin)
14303 CANONICAL_TYPE(Complex)
14304 CANONICAL_TYPE(ConstantArray)
14305 CANONICAL_TYPE(ArrayParameter)
14306 CANONICAL_TYPE(ConstantMatrix)
14307 CANONICAL_TYPE(Enum)
14308 CANONICAL_TYPE(ExtVector)
14309 CANONICAL_TYPE(FunctionNoProto)
14310 CANONICAL_TYPE(FunctionProto)
14311 CANONICAL_TYPE(IncompleteArray)
14312 CANONICAL_TYPE(HLSLAttributedResource)
14313 CANONICAL_TYPE(HLSLInlineSpirv)
14314 CANONICAL_TYPE(LValueReference)
14315 CANONICAL_TYPE(ObjCInterface)
14316 CANONICAL_TYPE(ObjCObject)
14317 CANONICAL_TYPE(ObjCObjectPointer)
14318 CANONICAL_TYPE(Pipe)
14319 CANONICAL_TYPE(Pointer)
14320 CANONICAL_TYPE(Record)
14321 CANONICAL_TYPE(RValueReference)
14322 CANONICAL_TYPE(VariableArray)
14323 CANONICAL_TYPE(Vector)
14324#undef CANONICAL_TYPE
14325
14326#undef UNEXPECTED_TYPE
14327
14328 case Type::Adjusted: {
14329 const auto *AX = cast<AdjustedType>(Val: X), *AY = cast<AdjustedType>(Val: Y);
14330 QualType OX = AX->getOriginalType(), OY = AY->getOriginalType();
14331 if (!Ctx.hasSameType(T1: OX, T2: OY))
14332 return QualType();
14333 // FIXME: It's inefficient to have to unify the original types.
14334 return Ctx.getAdjustedType(Orig: Ctx.getCommonSugaredType(X: OX, Y: OY),
14335 New: Ctx.getQualifiedType(split: Underlying));
14336 }
14337 case Type::Decayed: {
14338 const auto *DX = cast<DecayedType>(Val: X), *DY = cast<DecayedType>(Val: Y);
14339 QualType OX = DX->getOriginalType(), OY = DY->getOriginalType();
14340 if (!Ctx.hasSameType(T1: OX, T2: OY))
14341 return QualType();
14342 // FIXME: It's inefficient to have to unify the original types.
14343 return Ctx.getDecayedType(Orig: Ctx.getCommonSugaredType(X: OX, Y: OY),
14344 Decayed: Ctx.getQualifiedType(split: Underlying));
14345 }
14346 case Type::Attributed: {
14347 const auto *AX = cast<AttributedType>(Val: X), *AY = cast<AttributedType>(Val: Y);
14348 AttributedType::Kind Kind = AX->getAttrKind();
14349 if (Kind != AY->getAttrKind())
14350 return QualType();
14351 QualType MX = AX->getModifiedType(), MY = AY->getModifiedType();
14352 if (!Ctx.hasSameType(T1: MX, T2: MY))
14353 return QualType();
14354 // FIXME: It's inefficient to have to unify the modified types.
14355 return Ctx.getAttributedType(attrKind: Kind, modifiedType: Ctx.getCommonSugaredType(X: MX, Y: MY),
14356 equivalentType: Ctx.getQualifiedType(split: Underlying),
14357 attr: AX->getAttr());
14358 }
14359 case Type::BTFTagAttributed: {
14360 const auto *BX = cast<BTFTagAttributedType>(Val: X);
14361 const BTFTypeTagAttr *AX = BX->getAttr();
14362 // The attribute is not uniqued, so just compare the tag.
14363 if (AX->getBTFTypeTag() !=
14364 cast<BTFTagAttributedType>(Val: Y)->getAttr()->getBTFTypeTag())
14365 return QualType();
14366 return Ctx.getBTFTagAttributedType(BTFAttr: AX, Wrapped: Ctx.getQualifiedType(split: Underlying));
14367 }
14368 case Type::Auto: {
14369 const auto *AX = cast<AutoType>(Val: X), *AY = cast<AutoType>(Val: Y);
14370
14371 AutoTypeKeyword KW = AX->getKeyword();
14372 if (KW != AY->getKeyword())
14373 return QualType();
14374
14375 ConceptDecl *CD = ::getCommonDecl(X: AX->getTypeConstraintConcept(),
14376 Y: AY->getTypeConstraintConcept());
14377 SmallVector<TemplateArgument, 8> As;
14378 if (CD &&
14379 getCommonTemplateArguments(Ctx, R&: As, Xs: AX->getTypeConstraintArguments(),
14380 Ys: AY->getTypeConstraintArguments())) {
14381 CD = nullptr; // The arguments differ, so make it unconstrained.
14382 As.clear();
14383 }
14384
14385 // Both auto types can't be dependent, otherwise they wouldn't have been
14386 // sugar. This implies they can't contain unexpanded packs either.
14387 return Ctx.getAutoType(DeducedType: Ctx.getQualifiedType(split: Underlying), Keyword: AX->getKeyword(),
14388 /*IsDependent=*/false, /*IsPack=*/false, TypeConstraintConcept: CD, TypeConstraintArgs: As);
14389 }
14390 case Type::PackIndexing:
14391 case Type::Decltype:
14392 return QualType();
14393 case Type::DeducedTemplateSpecialization:
14394 // FIXME: Try to merge these.
14395 return QualType();
14396
14397 case Type::Elaborated: {
14398 const auto *EX = cast<ElaboratedType>(Val: X), *EY = cast<ElaboratedType>(Val: Y);
14399 return Ctx.getElaboratedType(
14400 Keyword: ::getCommonTypeKeyword(X: EX, Y: EY),
14401 NNS: ::getCommonQualifier(Ctx, X: EX, Y: EY, /*IsSame=*/false),
14402 NamedType: Ctx.getQualifiedType(split: Underlying),
14403 OwnedTagDecl: ::getCommonDecl(X: EX->getOwnedTagDecl(), Y: EY->getOwnedTagDecl()));
14404 }
14405 case Type::MacroQualified: {
14406 const auto *MX = cast<MacroQualifiedType>(Val: X),
14407 *MY = cast<MacroQualifiedType>(Val: Y);
14408 const IdentifierInfo *IX = MX->getMacroIdentifier();
14409 if (IX != MY->getMacroIdentifier())
14410 return QualType();
14411 return Ctx.getMacroQualifiedType(UnderlyingTy: Ctx.getQualifiedType(split: Underlying), MacroII: IX);
14412 }
14413 case Type::SubstTemplateTypeParm: {
14414 const auto *SX = cast<SubstTemplateTypeParmType>(Val: X),
14415 *SY = cast<SubstTemplateTypeParmType>(Val: Y);
14416 Decl *CD =
14417 ::getCommonDecl(X: SX->getAssociatedDecl(), Y: SY->getAssociatedDecl());
14418 if (!CD)
14419 return QualType();
14420 unsigned Index = SX->getIndex();
14421 if (Index != SY->getIndex())
14422 return QualType();
14423 auto PackIndex = SX->getPackIndex();
14424 if (PackIndex != SY->getPackIndex())
14425 return QualType();
14426 return Ctx.getSubstTemplateTypeParmType(Replacement: Ctx.getQualifiedType(split: Underlying),
14427 AssociatedDecl: CD, Index, PackIndex,
14428 Final: SX->getFinal() && SY->getFinal());
14429 }
14430 case Type::ObjCTypeParam:
14431 // FIXME: Try to merge these.
14432 return QualType();
14433 case Type::Paren:
14434 return Ctx.getParenType(InnerType: Ctx.getQualifiedType(split: Underlying));
14435
14436 case Type::TemplateSpecialization: {
14437 const auto *TX = cast<TemplateSpecializationType>(Val: X),
14438 *TY = cast<TemplateSpecializationType>(Val: Y);
14439 TemplateName CTN =
14440 ::getCommonTemplateName(Ctx, X: TX->getTemplateName(),
14441 Y: TY->getTemplateName(), /*IgnoreDeduced=*/true);
14442 if (!CTN.getAsVoidPointer())
14443 return QualType();
14444 SmallVector<TemplateArgument, 8> As;
14445 if (getCommonTemplateArguments(Ctx, R&: As, Xs: TX->template_arguments(),
14446 Ys: TY->template_arguments()))
14447 return QualType();
14448 return Ctx.getTemplateSpecializationType(Template: CTN, SpecifiedArgs: As,
14449 /*CanonicalArgs=*/{},
14450 Underlying: Ctx.getQualifiedType(split: Underlying));
14451 }
14452 case Type::Typedef: {
14453 const auto *TX = cast<TypedefType>(Val: X), *TY = cast<TypedefType>(Val: Y);
14454 const TypedefNameDecl *CD = ::getCommonDecl(X: TX->getDecl(), Y: TY->getDecl());
14455 if (!CD)
14456 return QualType();
14457 return Ctx.getTypedefType(Decl: CD, Underlying: Ctx.getQualifiedType(split: Underlying));
14458 }
14459 case Type::TypeOf: {
14460 // The common sugar between two typeof expressions, where one is
14461 // potentially a typeof_unqual and the other is not, we unify to the
14462 // qualified type as that retains the most information along with the type.
14463 // We only return a typeof_unqual type when both types are unqual types.
14464 TypeOfKind Kind = TypeOfKind::Qualified;
14465 if (cast<TypeOfType>(Val: X)->getKind() == cast<TypeOfType>(Val: Y)->getKind() &&
14466 cast<TypeOfType>(Val: X)->getKind() == TypeOfKind::Unqualified)
14467 Kind = TypeOfKind::Unqualified;
14468 return Ctx.getTypeOfType(tofType: Ctx.getQualifiedType(split: Underlying), Kind);
14469 }
14470 case Type::TypeOfExpr:
14471 return QualType();
14472
14473 case Type::UnaryTransform: {
14474 const auto *UX = cast<UnaryTransformType>(Val: X),
14475 *UY = cast<UnaryTransformType>(Val: Y);
14476 UnaryTransformType::UTTKind KX = UX->getUTTKind();
14477 if (KX != UY->getUTTKind())
14478 return QualType();
14479 QualType BX = UX->getBaseType(), BY = UY->getBaseType();
14480 if (!Ctx.hasSameType(T1: BX, T2: BY))
14481 return QualType();
14482 // FIXME: It's inefficient to have to unify the base types.
14483 return Ctx.getUnaryTransformType(BaseType: Ctx.getCommonSugaredType(X: BX, Y: BY),
14484 UnderlyingType: Ctx.getQualifiedType(split: Underlying), Kind: KX);
14485 }
14486 case Type::Using: {
14487 const auto *UX = cast<UsingType>(Val: X), *UY = cast<UsingType>(Val: Y);
14488 const UsingShadowDecl *CD =
14489 ::getCommonDecl(X: UX->getFoundDecl(), Y: UY->getFoundDecl());
14490 if (!CD)
14491 return QualType();
14492 return Ctx.getUsingType(Found: CD, Underlying: Ctx.getQualifiedType(split: Underlying));
14493 }
14494 case Type::MemberPointer: {
14495 const auto *PX = cast<MemberPointerType>(Val: X),
14496 *PY = cast<MemberPointerType>(Val: Y);
14497 CXXRecordDecl *Cls = PX->getMostRecentCXXRecordDecl();
14498 assert(Cls == PY->getMostRecentCXXRecordDecl());
14499 return Ctx.getMemberPointerType(
14500 T: ::getCommonPointeeType(Ctx, X: PX, Y: PY),
14501 Qualifier: ::getCommonQualifier(Ctx, X: PX, Y: PY, /*IsSame=*/false), Cls);
14502 }
14503 case Type::CountAttributed: {
14504 const auto *DX = cast<CountAttributedType>(Val: X),
14505 *DY = cast<CountAttributedType>(Val: Y);
14506 if (DX->isCountInBytes() != DY->isCountInBytes())
14507 return QualType();
14508 if (DX->isOrNull() != DY->isOrNull())
14509 return QualType();
14510 Expr *CEX = DX->getCountExpr();
14511 Expr *CEY = DY->getCountExpr();
14512 ArrayRef<clang::TypeCoupledDeclRefInfo> CDX = DX->getCoupledDecls();
14513 if (Ctx.hasSameExpr(X: CEX, Y: CEY))
14514 return Ctx.getCountAttributedType(WrappedTy: Ctx.getQualifiedType(split: Underlying), CountExpr: CEX,
14515 CountInBytes: DX->isCountInBytes(), OrNull: DX->isOrNull(),
14516 DependentDecls: CDX);
14517 if (!CEX->isIntegerConstantExpr(Ctx) || !CEY->isIntegerConstantExpr(Ctx))
14518 return QualType();
14519 // Two declarations with the same integer constant may still differ in their
14520 // expression pointers, so we need to evaluate them.
14521 llvm::APSInt VX = *CEX->getIntegerConstantExpr(Ctx);
14522 llvm::APSInt VY = *CEY->getIntegerConstantExpr(Ctx);
14523 if (VX != VY)
14524 return QualType();
14525 return Ctx.getCountAttributedType(WrappedTy: Ctx.getQualifiedType(split: Underlying), CountExpr: CEX,
14526 CountInBytes: DX->isCountInBytes(), OrNull: DX->isOrNull(),
14527 DependentDecls: CDX);
14528 }
14529 }
14530 llvm_unreachable("Unhandled Type Class");
14531}
14532
14533static auto unwrapSugar(SplitQualType &T, Qualifiers &QTotal) {
14534 SmallVector<SplitQualType, 8> R;
14535 while (true) {
14536 QTotal.addConsistentQualifiers(qs: T.Quals);
14537 QualType NT = T.Ty->getLocallyUnqualifiedSingleStepDesugaredType();
14538 if (NT == QualType(T.Ty, 0))
14539 break;
14540 R.push_back(Elt: T);
14541 T = NT.split();
14542 }
14543 return R;
14544}
14545
14546QualType ASTContext::getCommonSugaredType(QualType X, QualType Y,
14547 bool Unqualified) {
14548 assert(Unqualified ? hasSameUnqualifiedType(X, Y) : hasSameType(X, Y));
14549 if (X == Y)
14550 return X;
14551 if (!Unqualified) {
14552 if (X.isCanonical())
14553 return X;
14554 if (Y.isCanonical())
14555 return Y;
14556 }
14557
14558 SplitQualType SX = X.split(), SY = Y.split();
14559 Qualifiers QX, QY;
14560 // Desugar SX and SY, setting the sugar and qualifiers aside into Xs and Ys,
14561 // until we reach their underlying "canonical nodes". Note these are not
14562 // necessarily canonical types, as they may still have sugared properties.
14563 // QX and QY will store the sum of all qualifiers in Xs and Ys respectively.
14564 auto Xs = ::unwrapSugar(T&: SX, QTotal&: QX), Ys = ::unwrapSugar(T&: SY, QTotal&: QY);
14565
14566 // If this is an ArrayType, the element qualifiers are interchangeable with
14567 // the top level qualifiers.
14568 // * In case the canonical nodes are the same, the elements types are already
14569 // the same.
14570 // * Otherwise, the element types will be made the same, and any different
14571 // element qualifiers will be moved up to the top level qualifiers, per
14572 // 'getCommonArrayElementType'.
14573 // In both cases, this means there may be top level qualifiers which differ
14574 // between X and Y. If so, these differing qualifiers are redundant with the
14575 // element qualifiers, and can be removed without changing the canonical type.
14576 // The desired behaviour is the same as for the 'Unqualified' case here:
14577 // treat the redundant qualifiers as sugar, remove the ones which are not
14578 // common to both sides.
14579 bool KeepCommonQualifiers = Unqualified || isa<ArrayType>(Val: SX.Ty);
14580
14581 if (SX.Ty != SY.Ty) {
14582 // The canonical nodes differ. Build a common canonical node out of the two,
14583 // unifying their sugar. This may recurse back here.
14584 SX.Ty =
14585 ::getCommonNonSugarTypeNode(Ctx&: *this, X: SX.Ty, QX, Y: SY.Ty, QY).getTypePtr();
14586 } else {
14587 // The canonical nodes were identical: We may have desugared too much.
14588 // Add any common sugar back in.
14589 while (!Xs.empty() && !Ys.empty() && Xs.back().Ty == Ys.back().Ty) {
14590 QX -= SX.Quals;
14591 QY -= SY.Quals;
14592 SX = Xs.pop_back_val();
14593 SY = Ys.pop_back_val();
14594 }
14595 }
14596 if (KeepCommonQualifiers)
14597 QX = Qualifiers::removeCommonQualifiers(L&: QX, R&: QY);
14598 else
14599 assert(QX == QY);
14600
14601 // Even though the remaining sugar nodes in Xs and Ys differ, some may be
14602 // related. Walk up these nodes, unifying them and adding the result.
14603 while (!Xs.empty() && !Ys.empty()) {
14604 auto Underlying = SplitQualType(
14605 SX.Ty, Qualifiers::removeCommonQualifiers(L&: SX.Quals, R&: SY.Quals));
14606 SX = Xs.pop_back_val();
14607 SY = Ys.pop_back_val();
14608 SX.Ty = ::getCommonSugarTypeNode(Ctx&: *this, X: SX.Ty, Y: SY.Ty, Underlying)
14609 .getTypePtrOrNull();
14610 // Stop at the first pair which is unrelated.
14611 if (!SX.Ty) {
14612 SX.Ty = Underlying.Ty;
14613 break;
14614 }
14615 QX -= Underlying.Quals;
14616 };
14617
14618 // Add back the missing accumulated qualifiers, which were stripped off
14619 // with the sugar nodes we could not unify.
14620 QualType R = getQualifiedType(T: SX.Ty, Qs: QX);
14621 assert(Unqualified ? hasSameUnqualifiedType(R, X) : hasSameType(R, X));
14622 return R;
14623}
14624
14625QualType ASTContext::getCorrespondingUnsaturatedType(QualType Ty) const {
14626 assert(Ty->isFixedPointType());
14627
14628 if (Ty->isUnsaturatedFixedPointType())
14629 return Ty;
14630
14631 switch (Ty->castAs<BuiltinType>()->getKind()) {
14632 default:
14633 llvm_unreachable("Not a saturated fixed point type!");
14634 case BuiltinType::SatShortAccum:
14635 return ShortAccumTy;
14636 case BuiltinType::SatAccum:
14637 return AccumTy;
14638 case BuiltinType::SatLongAccum:
14639 return LongAccumTy;
14640 case BuiltinType::SatUShortAccum:
14641 return UnsignedShortAccumTy;
14642 case BuiltinType::SatUAccum:
14643 return UnsignedAccumTy;
14644 case BuiltinType::SatULongAccum:
14645 return UnsignedLongAccumTy;
14646 case BuiltinType::SatShortFract:
14647 return ShortFractTy;
14648 case BuiltinType::SatFract:
14649 return FractTy;
14650 case BuiltinType::SatLongFract:
14651 return LongFractTy;
14652 case BuiltinType::SatUShortFract:
14653 return UnsignedShortFractTy;
14654 case BuiltinType::SatUFract:
14655 return UnsignedFractTy;
14656 case BuiltinType::SatULongFract:
14657 return UnsignedLongFractTy;
14658 }
14659}
14660
14661QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const {
14662 assert(Ty->isFixedPointType());
14663
14664 if (Ty->isSaturatedFixedPointType()) return Ty;
14665
14666 switch (Ty->castAs<BuiltinType>()->getKind()) {
14667 default:
14668 llvm_unreachable("Not a fixed point type!");
14669 case BuiltinType::ShortAccum:
14670 return SatShortAccumTy;
14671 case BuiltinType::Accum:
14672 return SatAccumTy;
14673 case BuiltinType::LongAccum:
14674 return SatLongAccumTy;
14675 case BuiltinType::UShortAccum:
14676 return SatUnsignedShortAccumTy;
14677 case BuiltinType::UAccum:
14678 return SatUnsignedAccumTy;
14679 case BuiltinType::ULongAccum:
14680 return SatUnsignedLongAccumTy;
14681 case BuiltinType::ShortFract:
14682 return SatShortFractTy;
14683 case BuiltinType::Fract:
14684 return SatFractTy;
14685 case BuiltinType::LongFract:
14686 return SatLongFractTy;
14687 case BuiltinType::UShortFract:
14688 return SatUnsignedShortFractTy;
14689 case BuiltinType::UFract:
14690 return SatUnsignedFractTy;
14691 case BuiltinType::ULongFract:
14692 return SatUnsignedLongFractTy;
14693 }
14694}
14695
14696LangAS ASTContext::getLangASForBuiltinAddressSpace(unsigned AS) const {
14697 if (LangOpts.OpenCL)
14698 return getTargetInfo().getOpenCLBuiltinAddressSpace(AS);
14699
14700 if (LangOpts.CUDA)
14701 return getTargetInfo().getCUDABuiltinAddressSpace(AS);
14702
14703 return getLangASFromTargetAS(TargetAS: AS);
14704}
14705
14706// Explicitly instantiate this in case a Redeclarable<T> is used from a TU that
14707// doesn't include ASTContext.h
14708template
14709clang::LazyGenerationalUpdatePtr<
14710 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::ValueType
14711clang::LazyGenerationalUpdatePtr<
14712 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue(
14713 const clang::ASTContext &Ctx, Decl *Value);
14714
14715unsigned char ASTContext::getFixedPointScale(QualType Ty) const {
14716 assert(Ty->isFixedPointType());
14717
14718 const TargetInfo &Target = getTargetInfo();
14719 switch (Ty->castAs<BuiltinType>()->getKind()) {
14720 default:
14721 llvm_unreachable("Not a fixed point type!");
14722 case BuiltinType::ShortAccum:
14723 case BuiltinType::SatShortAccum:
14724 return Target.getShortAccumScale();
14725 case BuiltinType::Accum:
14726 case BuiltinType::SatAccum:
14727 return Target.getAccumScale();
14728 case BuiltinType::LongAccum:
14729 case BuiltinType::SatLongAccum:
14730 return Target.getLongAccumScale();
14731 case BuiltinType::UShortAccum:
14732 case BuiltinType::SatUShortAccum:
14733 return Target.getUnsignedShortAccumScale();
14734 case BuiltinType::UAccum:
14735 case BuiltinType::SatUAccum:
14736 return Target.getUnsignedAccumScale();
14737 case BuiltinType::ULongAccum:
14738 case BuiltinType::SatULongAccum:
14739 return Target.getUnsignedLongAccumScale();
14740 case BuiltinType::ShortFract:
14741 case BuiltinType::SatShortFract:
14742 return Target.getShortFractScale();
14743 case BuiltinType::Fract:
14744 case BuiltinType::SatFract:
14745 return Target.getFractScale();
14746 case BuiltinType::LongFract:
14747 case BuiltinType::SatLongFract:
14748 return Target.getLongFractScale();
14749 case BuiltinType::UShortFract:
14750 case BuiltinType::SatUShortFract:
14751 return Target.getUnsignedShortFractScale();
14752 case BuiltinType::UFract:
14753 case BuiltinType::SatUFract:
14754 return Target.getUnsignedFractScale();
14755 case BuiltinType::ULongFract:
14756 case BuiltinType::SatULongFract:
14757 return Target.getUnsignedLongFractScale();
14758 }
14759}
14760
14761unsigned char ASTContext::getFixedPointIBits(QualType Ty) const {
14762 assert(Ty->isFixedPointType());
14763
14764 const TargetInfo &Target = getTargetInfo();
14765 switch (Ty->castAs<BuiltinType>()->getKind()) {
14766 default:
14767 llvm_unreachable("Not a fixed point type!");
14768 case BuiltinType::ShortAccum:
14769 case BuiltinType::SatShortAccum:
14770 return Target.getShortAccumIBits();
14771 case BuiltinType::Accum:
14772 case BuiltinType::SatAccum:
14773 return Target.getAccumIBits();
14774 case BuiltinType::LongAccum:
14775 case BuiltinType::SatLongAccum:
14776 return Target.getLongAccumIBits();
14777 case BuiltinType::UShortAccum:
14778 case BuiltinType::SatUShortAccum:
14779 return Target.getUnsignedShortAccumIBits();
14780 case BuiltinType::UAccum:
14781 case BuiltinType::SatUAccum:
14782 return Target.getUnsignedAccumIBits();
14783 case BuiltinType::ULongAccum:
14784 case BuiltinType::SatULongAccum:
14785 return Target.getUnsignedLongAccumIBits();
14786 case BuiltinType::ShortFract:
14787 case BuiltinType::SatShortFract:
14788 case BuiltinType::Fract:
14789 case BuiltinType::SatFract:
14790 case BuiltinType::LongFract:
14791 case BuiltinType::SatLongFract:
14792 case BuiltinType::UShortFract:
14793 case BuiltinType::SatUShortFract:
14794 case BuiltinType::UFract:
14795 case BuiltinType::SatUFract:
14796 case BuiltinType::ULongFract:
14797 case BuiltinType::SatULongFract:
14798 return 0;
14799 }
14800}
14801
14802llvm::FixedPointSemantics
14803ASTContext::getFixedPointSemantics(QualType Ty) const {
14804 assert((Ty->isFixedPointType() || Ty->isIntegerType()) &&
14805 "Can only get the fixed point semantics for a "
14806 "fixed point or integer type.");
14807 if (Ty->isIntegerType())
14808 return llvm::FixedPointSemantics::GetIntegerSemantics(
14809 Width: getIntWidth(T: Ty), IsSigned: Ty->isSignedIntegerType());
14810
14811 bool isSigned = Ty->isSignedFixedPointType();
14812 return llvm::FixedPointSemantics(
14813 static_cast<unsigned>(getTypeSize(T: Ty)), getFixedPointScale(Ty), isSigned,
14814 Ty->isSaturatedFixedPointType(),
14815 !isSigned && getTargetInfo().doUnsignedFixedPointTypesHavePadding());
14816}
14817
14818llvm::APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const {
14819 assert(Ty->isFixedPointType());
14820 return llvm::APFixedPoint::getMax(Sema: getFixedPointSemantics(Ty));
14821}
14822
14823llvm::APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const {
14824 assert(Ty->isFixedPointType());
14825 return llvm::APFixedPoint::getMin(Sema: getFixedPointSemantics(Ty));
14826}
14827
14828QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const {
14829 assert(Ty->isUnsignedFixedPointType() &&
14830 "Expected unsigned fixed point type");
14831
14832 switch (Ty->castAs<BuiltinType>()->getKind()) {
14833 case BuiltinType::UShortAccum:
14834 return ShortAccumTy;
14835 case BuiltinType::UAccum:
14836 return AccumTy;
14837 case BuiltinType::ULongAccum:
14838 return LongAccumTy;
14839 case BuiltinType::SatUShortAccum:
14840 return SatShortAccumTy;
14841 case BuiltinType::SatUAccum:
14842 return SatAccumTy;
14843 case BuiltinType::SatULongAccum:
14844 return SatLongAccumTy;
14845 case BuiltinType::UShortFract:
14846 return ShortFractTy;
14847 case BuiltinType::UFract:
14848 return FractTy;
14849 case BuiltinType::ULongFract:
14850 return LongFractTy;
14851 case BuiltinType::SatUShortFract:
14852 return SatShortFractTy;
14853 case BuiltinType::SatUFract:
14854 return SatFractTy;
14855 case BuiltinType::SatULongFract:
14856 return SatLongFractTy;
14857 default:
14858 llvm_unreachable("Unexpected unsigned fixed point type");
14859 }
14860}
14861
14862// Given a list of FMV features, return a concatenated list of the
14863// corresponding backend features (which may contain duplicates).
14864static std::vector<std::string> getFMVBackendFeaturesFor(
14865 const llvm::SmallVectorImpl<StringRef> &FMVFeatStrings) {
14866 std::vector<std::string> BackendFeats;
14867 llvm::AArch64::ExtensionSet FeatureBits;
14868 for (StringRef F : FMVFeatStrings)
14869 if (auto FMVExt = llvm::AArch64::parseFMVExtension(Extension: F))
14870 if (FMVExt->ID)
14871 FeatureBits.enable(E: *FMVExt->ID);
14872 FeatureBits.toLLVMFeatureList(Features&: BackendFeats);
14873 return BackendFeats;
14874}
14875
14876ParsedTargetAttr
14877ASTContext::filterFunctionTargetAttrs(const TargetAttr *TD) const {
14878 assert(TD != nullptr);
14879 ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(Str: TD->getFeaturesStr());
14880
14881 llvm::erase_if(C&: ParsedAttr.Features, P: [&](const std::string &Feat) {
14882 return !Target->isValidFeatureName(Feature: StringRef{Feat}.substr(Start: 1));
14883 });
14884 return ParsedAttr;
14885}
14886
14887void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
14888 const FunctionDecl *FD) const {
14889 if (FD)
14890 getFunctionFeatureMap(FeatureMap, GD: GlobalDecl().getWithDecl(D: FD));
14891 else
14892 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(),
14893 CPU: Target->getTargetOpts().CPU,
14894 FeatureVec: Target->getTargetOpts().Features);
14895}
14896
14897// Fills in the supplied string map with the set of target features for the
14898// passed in function.
14899void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
14900 GlobalDecl GD) const {
14901 StringRef TargetCPU = Target->getTargetOpts().CPU;
14902 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
14903 if (const auto *TD = FD->getAttr<TargetAttr>()) {
14904 ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD);
14905
14906 // Make a copy of the features as passed on the command line into the
14907 // beginning of the additional features from the function to override.
14908 // AArch64 handles command line option features in parseTargetAttr().
14909 if (!Target->getTriple().isAArch64())
14910 ParsedAttr.Features.insert(
14911 position: ParsedAttr.Features.begin(),
14912 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
14913 last: Target->getTargetOpts().FeaturesAsWritten.end());
14914
14915 if (ParsedAttr.CPU != "" && Target->isValidCPUName(Name: ParsedAttr.CPU))
14916 TargetCPU = ParsedAttr.CPU;
14917
14918 // Now populate the feature map, first with the TargetCPU which is either
14919 // the default or a new one from the target attribute string. Then we'll use
14920 // the passed in features (FeaturesAsWritten) along with the new ones from
14921 // the attribute.
14922 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU,
14923 FeatureVec: ParsedAttr.Features);
14924 } else if (const auto *SD = FD->getAttr<CPUSpecificAttr>()) {
14925 llvm::SmallVector<StringRef, 32> FeaturesTmp;
14926 Target->getCPUSpecificCPUDispatchFeatures(
14927 Name: SD->getCPUName(Index: GD.getMultiVersionIndex())->getName(), Features&: FeaturesTmp);
14928 std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end());
14929 Features.insert(position: Features.begin(),
14930 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
14931 last: Target->getTargetOpts().FeaturesAsWritten.end());
14932 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
14933 } else if (const auto *TC = FD->getAttr<TargetClonesAttr>()) {
14934 if (Target->getTriple().isAArch64()) {
14935 llvm::SmallVector<StringRef, 8> Feats;
14936 TC->getFeatures(Out&: Feats, Index: GD.getMultiVersionIndex());
14937 std::vector<std::string> Features = getFMVBackendFeaturesFor(FMVFeatStrings: Feats);
14938 Features.insert(position: Features.begin(),
14939 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
14940 last: Target->getTargetOpts().FeaturesAsWritten.end());
14941 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
14942 } else if (Target->getTriple().isRISCV()) {
14943 StringRef VersionStr = TC->getFeatureStr(Index: GD.getMultiVersionIndex());
14944 std::vector<std::string> Features;
14945 if (VersionStr != "default") {
14946 ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(Str: VersionStr);
14947 Features.insert(position: Features.begin(), first: ParsedAttr.Features.begin(),
14948 last: ParsedAttr.Features.end());
14949 }
14950 Features.insert(position: Features.begin(),
14951 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
14952 last: Target->getTargetOpts().FeaturesAsWritten.end());
14953 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
14954 } else {
14955 std::vector<std::string> Features;
14956 StringRef VersionStr = TC->getFeatureStr(Index: GD.getMultiVersionIndex());
14957 if (VersionStr.starts_with(Prefix: "arch="))
14958 TargetCPU = VersionStr.drop_front(N: sizeof("arch=") - 1);
14959 else if (VersionStr != "default")
14960 Features.push_back(x: (StringRef{"+"} + VersionStr).str());
14961 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
14962 }
14963 } else if (const auto *TV = FD->getAttr<TargetVersionAttr>()) {
14964 std::vector<std::string> Features;
14965 if (Target->getTriple().isRISCV()) {
14966 ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(Str: TV->getName());
14967 Features.insert(position: Features.begin(), first: ParsedAttr.Features.begin(),
14968 last: ParsedAttr.Features.end());
14969 } else {
14970 assert(Target->getTriple().isAArch64());
14971 llvm::SmallVector<StringRef, 8> Feats;
14972 TV->getFeatures(Out&: Feats);
14973 Features = getFMVBackendFeaturesFor(FMVFeatStrings: Feats);
14974 }
14975 Features.insert(position: Features.begin(),
14976 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
14977 last: Target->getTargetOpts().FeaturesAsWritten.end());
14978 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
14979 } else {
14980 FeatureMap = Target->getTargetOpts().FeatureMap;
14981 }
14982}
14983
14984static SYCLKernelInfo BuildSYCLKernelInfo(ASTContext &Context,
14985 CanQualType KernelNameType,
14986 const FunctionDecl *FD) {
14987 // Host and device compilation may use different ABIs and different ABIs
14988 // may allocate name mangling discriminators differently. A discriminator
14989 // override is used to ensure consistent discriminator allocation across
14990 // host and device compilation.
14991 auto DeviceDiscriminatorOverrider =
14992 [](ASTContext &Ctx, const NamedDecl *ND) -> UnsignedOrNone {
14993 if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: ND))
14994 if (RD->isLambda())
14995 return RD->getDeviceLambdaManglingNumber();
14996 return std::nullopt;
14997 };
14998 std::unique_ptr<MangleContext> MC{ItaniumMangleContext::create(
14999 Context, Diags&: Context.getDiagnostics(), Discriminator: DeviceDiscriminatorOverrider)};
15000
15001 // Construct a mangled name for the SYCL kernel caller offload entry point.
15002 // FIXME: The Itanium typeinfo mangling (_ZTS<type>) is currently used to
15003 // name the SYCL kernel caller offload entry point function. This mangling
15004 // does not suffice to clearly identify symbols that correspond to SYCL
15005 // kernel caller functions, nor is this mangling natural for targets that
15006 // use a non-Itanium ABI.
15007 std::string Buffer;
15008 Buffer.reserve(res_arg: 128);
15009 llvm::raw_string_ostream Out(Buffer);
15010 MC->mangleCanonicalTypeName(T: KernelNameType, Out);
15011 std::string KernelName = Out.str();
15012
15013 return {KernelNameType, FD, KernelName};
15014}
15015
15016void ASTContext::registerSYCLEntryPointFunction(FunctionDecl *FD) {
15017 // If the function declaration to register is invalid or dependent, the
15018 // registration attempt is ignored.
15019 if (FD->isInvalidDecl() || FD->isTemplated())
15020 return;
15021
15022 const auto *SKEPAttr = FD->getAttr<SYCLKernelEntryPointAttr>();
15023 assert(SKEPAttr && "Missing sycl_kernel_entry_point attribute");
15024
15025 // Be tolerant of multiple registration attempts so long as each attempt
15026 // is for the same entity. Callers are obligated to detect and diagnose
15027 // conflicting kernel names prior to calling this function.
15028 CanQualType KernelNameType = getCanonicalType(T: SKEPAttr->getKernelName());
15029 auto IT = SYCLKernels.find(Val: KernelNameType);
15030 assert((IT == SYCLKernels.end() ||
15031 declaresSameEntity(FD, IT->second.getKernelEntryPointDecl())) &&
15032 "SYCL kernel name conflict");
15033 (void)IT;
15034 SYCLKernels.insert(KV: std::make_pair(
15035 x&: KernelNameType, y: BuildSYCLKernelInfo(Context&: *this, KernelNameType, FD)));
15036}
15037
15038const SYCLKernelInfo &ASTContext::getSYCLKernelInfo(QualType T) const {
15039 CanQualType KernelNameType = getCanonicalType(T);
15040 return SYCLKernels.at(Val: KernelNameType);
15041}
15042
15043const SYCLKernelInfo *ASTContext::findSYCLKernelInfo(QualType T) const {
15044 CanQualType KernelNameType = getCanonicalType(T);
15045 auto IT = SYCLKernels.find(Val: KernelNameType);
15046 if (IT != SYCLKernels.end())
15047 return &IT->second;
15048 return nullptr;
15049}
15050
15051OMPTraitInfo &ASTContext::getNewOMPTraitInfo() {
15052 OMPTraitInfoVector.emplace_back(Args: new OMPTraitInfo());
15053 return *OMPTraitInfoVector.back();
15054}
15055
15056const StreamingDiagnostic &clang::
15057operator<<(const StreamingDiagnostic &DB,
15058 const ASTContext::SectionInfo &Section) {
15059 if (Section.Decl)
15060 return DB << Section.Decl;
15061 return DB << "a prior #pragma section";
15062}
15063
15064bool ASTContext::mayExternalize(const Decl *D) const {
15065 bool IsInternalVar =
15066 isa<VarDecl>(Val: D) &&
15067 basicGVALinkageForVariable(Context: *this, VD: cast<VarDecl>(Val: D)) == GVA_Internal;
15068 bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() &&
15069 !D->getAttr<CUDADeviceAttr>()->isImplicit()) ||
15070 (D->hasAttr<CUDAConstantAttr>() &&
15071 !D->getAttr<CUDAConstantAttr>()->isImplicit());
15072 // CUDA/HIP: managed variables need to be externalized since it is
15073 // a declaration in IR, therefore cannot have internal linkage. Kernels in
15074 // anonymous name space needs to be externalized to avoid duplicate symbols.
15075 return (IsInternalVar &&
15076 (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar)) ||
15077 (D->hasAttr<CUDAGlobalAttr>() &&
15078 basicGVALinkageForFunction(Context: *this, FD: cast<FunctionDecl>(Val: D)) ==
15079 GVA_Internal);
15080}
15081
15082bool ASTContext::shouldExternalize(const Decl *D) const {
15083 return mayExternalize(D) &&
15084 (D->hasAttr<HIPManagedAttr>() || D->hasAttr<CUDAGlobalAttr>() ||
15085 CUDADeviceVarODRUsedByHost.count(V: cast<VarDecl>(Val: D)));
15086}
15087
15088StringRef ASTContext::getCUIDHash() const {
15089 if (!CUIDHash.empty())
15090 return CUIDHash;
15091 if (LangOpts.CUID.empty())
15092 return StringRef();
15093 CUIDHash = llvm::utohexstr(X: llvm::MD5Hash(Str: LangOpts.CUID), /*LowerCase=*/true);
15094 return CUIDHash;
15095}
15096
15097const CXXRecordDecl *
15098ASTContext::baseForVTableAuthentication(const CXXRecordDecl *ThisClass) {
15099 assert(ThisClass);
15100 assert(ThisClass->isPolymorphic());
15101 const CXXRecordDecl *PrimaryBase = ThisClass;
15102 while (1) {
15103 assert(PrimaryBase);
15104 assert(PrimaryBase->isPolymorphic());
15105 auto &Layout = getASTRecordLayout(D: PrimaryBase);
15106 auto Base = Layout.getPrimaryBase();
15107 if (!Base || Base == PrimaryBase || !Base->isPolymorphic())
15108 break;
15109 PrimaryBase = Base;
15110 }
15111 return PrimaryBase;
15112}
15113
15114bool ASTContext::useAbbreviatedThunkName(GlobalDecl VirtualMethodDecl,
15115 StringRef MangledName) {
15116 auto *Method = cast<CXXMethodDecl>(Val: VirtualMethodDecl.getDecl());
15117 assert(Method->isVirtual());
15118 bool DefaultIncludesPointerAuth =
15119 LangOpts.PointerAuthCalls || LangOpts.PointerAuthIntrinsics;
15120
15121 if (!DefaultIncludesPointerAuth)
15122 return true;
15123
15124 auto Existing = ThunksToBeAbbreviated.find(Val: VirtualMethodDecl);
15125 if (Existing != ThunksToBeAbbreviated.end())
15126 return Existing->second.contains(key: MangledName.str());
15127
15128 std::unique_ptr<MangleContext> Mangler(createMangleContext());
15129 llvm::StringMap<llvm::SmallVector<std::string, 2>> Thunks;
15130 auto VtableContext = getVTableContext();
15131 if (const auto *ThunkInfos = VtableContext->getThunkInfo(GD: VirtualMethodDecl)) {
15132 auto *Destructor = dyn_cast<CXXDestructorDecl>(Val: Method);
15133 for (const auto &Thunk : *ThunkInfos) {
15134 SmallString<256> ElidedName;
15135 llvm::raw_svector_ostream ElidedNameStream(ElidedName);
15136 if (Destructor)
15137 Mangler->mangleCXXDtorThunk(DD: Destructor, Type: VirtualMethodDecl.getDtorType(),
15138 Thunk, /* elideOverrideInfo */ ElideOverrideInfo: true,
15139 ElidedNameStream);
15140 else
15141 Mangler->mangleThunk(MD: Method, Thunk, /* elideOverrideInfo */ ElideOverrideInfo: true,
15142 ElidedNameStream);
15143 SmallString<256> MangledName;
15144 llvm::raw_svector_ostream mangledNameStream(MangledName);
15145 if (Destructor)
15146 Mangler->mangleCXXDtorThunk(DD: Destructor, Type: VirtualMethodDecl.getDtorType(),
15147 Thunk, /* elideOverrideInfo */ ElideOverrideInfo: false,
15148 mangledNameStream);
15149 else
15150 Mangler->mangleThunk(MD: Method, Thunk, /* elideOverrideInfo */ ElideOverrideInfo: false,
15151 mangledNameStream);
15152
15153 Thunks[ElidedName].push_back(Elt: std::string(MangledName));
15154 }
15155 }
15156 llvm::StringSet<> SimplifiedThunkNames;
15157 for (auto &ThunkList : Thunks) {
15158 llvm::sort(C&: ThunkList.second);
15159 SimplifiedThunkNames.insert(key: ThunkList.second[0]);
15160 }
15161 bool Result = SimplifiedThunkNames.contains(key: MangledName);
15162 ThunksToBeAbbreviated[VirtualMethodDecl] = std::move(SimplifiedThunkNames);
15163 return Result;
15164}
15165