1//===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the ASTContext interface.
10//
11//===----------------------------------------------------------------------===//
12
13#include "clang/AST/ASTContext.h"
14#include "ByteCode/Context.h"
15#include "CXXABI.h"
16#include "clang/AST/APValue.h"
17#include "clang/AST/ASTConcept.h"
18#include "clang/AST/ASTMutationListener.h"
19#include "clang/AST/ASTStructuralEquivalence.h"
20#include "clang/AST/ASTTypeTraits.h"
21#include "clang/AST/Attr.h"
22#include "clang/AST/AttrIterator.h"
23#include "clang/AST/CharUnits.h"
24#include "clang/AST/Comment.h"
25#include "clang/AST/Decl.h"
26#include "clang/AST/DeclBase.h"
27#include "clang/AST/DeclCXX.h"
28#include "clang/AST/DeclContextInternals.h"
29#include "clang/AST/DeclObjC.h"
30#include "clang/AST/DeclOpenMP.h"
31#include "clang/AST/DeclTemplate.h"
32#include "clang/AST/DeclarationName.h"
33#include "clang/AST/DependenceFlags.h"
34#include "clang/AST/Expr.h"
35#include "clang/AST/ExprCXX.h"
36#include "clang/AST/ExternalASTSource.h"
37#include "clang/AST/Mangle.h"
38#include "clang/AST/MangleNumberingContext.h"
39#include "clang/AST/NestedNameSpecifier.h"
40#include "clang/AST/ParentMapContext.h"
41#include "clang/AST/RawCommentList.h"
42#include "clang/AST/RecordLayout.h"
43#include "clang/AST/Stmt.h"
44#include "clang/AST/TemplateBase.h"
45#include "clang/AST/TemplateName.h"
46#include "clang/AST/Type.h"
47#include "clang/AST/TypeLoc.h"
48#include "clang/AST/UnresolvedSet.h"
49#include "clang/AST/VTableBuilder.h"
50#include "clang/Basic/AddressSpaces.h"
51#include "clang/Basic/Builtins.h"
52#include "clang/Basic/CommentOptions.h"
53#include "clang/Basic/ExceptionSpecificationType.h"
54#include "clang/Basic/IdentifierTable.h"
55#include "clang/Basic/LLVM.h"
56#include "clang/Basic/LangOptions.h"
57#include "clang/Basic/Linkage.h"
58#include "clang/Basic/Module.h"
59#include "clang/Basic/NoSanitizeList.h"
60#include "clang/Basic/ObjCRuntime.h"
61#include "clang/Basic/ProfileList.h"
62#include "clang/Basic/SourceLocation.h"
63#include "clang/Basic/SourceManager.h"
64#include "clang/Basic/Specifiers.h"
65#include "clang/Basic/TargetCXXABI.h"
66#include "clang/Basic/TargetInfo.h"
67#include "clang/Basic/XRayLists.h"
68#include "llvm/ADT/APFixedPoint.h"
69#include "llvm/ADT/APInt.h"
70#include "llvm/ADT/APSInt.h"
71#include "llvm/ADT/ArrayRef.h"
72#include "llvm/ADT/DenseMap.h"
73#include "llvm/ADT/DenseSet.h"
74#include "llvm/ADT/FoldingSet.h"
75#include "llvm/ADT/PointerUnion.h"
76#include "llvm/ADT/STLExtras.h"
77#include "llvm/ADT/SmallPtrSet.h"
78#include "llvm/ADT/SmallVector.h"
79#include "llvm/ADT/StringExtras.h"
80#include "llvm/ADT/StringRef.h"
81#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
82#include "llvm/Support/Capacity.h"
83#include "llvm/Support/Casting.h"
84#include "llvm/Support/Compiler.h"
85#include "llvm/Support/ErrorHandling.h"
86#include "llvm/Support/MD5.h"
87#include "llvm/Support/MathExtras.h"
88#include "llvm/Support/SipHash.h"
89#include "llvm/Support/raw_ostream.h"
90#include "llvm/TargetParser/AArch64TargetParser.h"
91#include "llvm/TargetParser/Triple.h"
92#include <algorithm>
93#include <cassert>
94#include <cstddef>
95#include <cstdint>
96#include <cstdlib>
97#include <map>
98#include <memory>
99#include <optional>
100#include <string>
101#include <tuple>
102#include <utility>
103
104using namespace clang;
105
106enum FloatingRank {
107 BFloat16Rank,
108 Float16Rank,
109 HalfRank,
110 FloatRank,
111 DoubleRank,
112 LongDoubleRank,
113 Float128Rank,
114 Ibm128Rank
115};
116
117/// \returns The locations that are relevant when searching for Doc comments
118/// related to \p D.
119static SmallVector<SourceLocation, 2>
120getDeclLocsForCommentSearch(const Decl *D, SourceManager &SourceMgr) {
121 assert(D);
122
123 // User can not attach documentation to implicit declarations.
124 if (D->isImplicit())
125 return {};
126
127 // User can not attach documentation to implicit instantiations.
128 if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
129 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
130 return {};
131 }
132
133 if (const auto *VD = dyn_cast<VarDecl>(Val: D)) {
134 if (VD->isStaticDataMember() &&
135 VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
136 return {};
137 }
138
139 if (const auto *CRD = dyn_cast<CXXRecordDecl>(Val: D)) {
140 if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
141 return {};
142 }
143
144 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(Val: D)) {
145 TemplateSpecializationKind TSK = CTSD->getSpecializationKind();
146 if (TSK == TSK_ImplicitInstantiation ||
147 TSK == TSK_Undeclared)
148 return {};
149 }
150
151 if (const auto *ED = dyn_cast<EnumDecl>(Val: D)) {
152 if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
153 return {};
154 }
155 if (const auto *TD = dyn_cast<TagDecl>(Val: D)) {
156 // When tag declaration (but not definition!) is part of the
157 // decl-specifier-seq of some other declaration, it doesn't get comment
158 if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition())
159 return {};
160 }
161 // TODO: handle comments for function parameters properly.
162 if (isa<ParmVarDecl>(Val: D))
163 return {};
164
165 // TODO: we could look up template parameter documentation in the template
166 // documentation.
167 if (isa<TemplateTypeParmDecl>(Val: D) ||
168 isa<NonTypeTemplateParmDecl>(Val: D) ||
169 isa<TemplateTemplateParmDecl>(Val: D))
170 return {};
171
172 SmallVector<SourceLocation, 2> Locations;
173 // Find declaration location.
174 // For Objective-C declarations we generally don't expect to have multiple
175 // declarators, thus use declaration starting location as the "declaration
176 // location".
177 // For all other declarations multiple declarators are used quite frequently,
178 // so we use the location of the identifier as the "declaration location".
179 SourceLocation BaseLocation;
180 if (isa<ObjCMethodDecl>(Val: D) || isa<ObjCContainerDecl>(Val: D) ||
181 isa<ObjCPropertyDecl>(Val: D) || isa<RedeclarableTemplateDecl>(Val: D) ||
182 isa<ClassTemplateSpecializationDecl>(Val: D) ||
183 // Allow association with Y across {} in `typedef struct X {} Y`.
184 isa<TypedefDecl>(Val: D))
185 BaseLocation = D->getBeginLoc();
186 else
187 BaseLocation = D->getLocation();
188
189 if (!D->getLocation().isMacroID()) {
190 Locations.emplace_back(Args&: BaseLocation);
191 } else {
192 const auto *DeclCtx = D->getDeclContext();
193
194 // When encountering definitions generated from a macro (that are not
195 // contained by another declaration in the macro) we need to try and find
196 // the comment at the location of the expansion but if there is no comment
197 // there we should retry to see if there is a comment inside the macro as
198 // well. To this end we return first BaseLocation to first look at the
199 // expansion site, the second value is the spelling location of the
200 // beginning of the declaration defined inside the macro.
201 if (!(DeclCtx &&
202 Decl::castFromDeclContext(DeclCtx)->getLocation().isMacroID())) {
203 Locations.emplace_back(Args: SourceMgr.getExpansionLoc(Loc: BaseLocation));
204 }
205
206 // We use Decl::getBeginLoc() and not just BaseLocation here to ensure that
207 // we don't refer to the macro argument location at the expansion site (this
208 // can happen if the name's spelling is provided via macro argument), and
209 // always to the declaration itself.
210 Locations.emplace_back(Args: SourceMgr.getSpellingLoc(Loc: D->getBeginLoc()));
211 }
212
213 return Locations;
214}
215
216RawComment *ASTContext::getRawCommentForDeclNoCacheImpl(
217 const Decl *D, const SourceLocation RepresentativeLocForDecl,
218 const std::map<unsigned, RawComment *> &CommentsInTheFile) const {
219 // If the declaration doesn't map directly to a location in a file, we
220 // can't find the comment.
221 if (RepresentativeLocForDecl.isInvalid() ||
222 !RepresentativeLocForDecl.isFileID())
223 return nullptr;
224
225 // If there are no comments anywhere, we won't find anything.
226 if (CommentsInTheFile.empty())
227 return nullptr;
228
229 // Decompose the location for the declaration and find the beginning of the
230 // file buffer.
231 const FileIDAndOffset DeclLocDecomp =
232 SourceMgr.getDecomposedLoc(Loc: RepresentativeLocForDecl);
233
234 // Slow path.
235 auto OffsetCommentBehindDecl =
236 CommentsInTheFile.lower_bound(x: DeclLocDecomp.second);
237
238 // First check whether we have a trailing comment.
239 if (OffsetCommentBehindDecl != CommentsInTheFile.end()) {
240 RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second;
241 if ((CommentBehindDecl->isDocumentation() ||
242 LangOpts.CommentOpts.ParseAllComments) &&
243 CommentBehindDecl->isTrailingComment() &&
244 (isa<FieldDecl>(Val: D) || isa<EnumConstantDecl>(Val: D) || isa<VarDecl>(Val: D) ||
245 isa<ObjCMethodDecl>(Val: D) || isa<ObjCPropertyDecl>(Val: D))) {
246
247 // Check that Doxygen trailing comment comes after the declaration, starts
248 // on the same line and in the same file as the declaration.
249 if (SourceMgr.getLineNumber(FID: DeclLocDecomp.first, FilePos: DeclLocDecomp.second) ==
250 Comments.getCommentBeginLine(C: CommentBehindDecl, File: DeclLocDecomp.first,
251 Offset: OffsetCommentBehindDecl->first)) {
252 return CommentBehindDecl;
253 }
254 }
255 }
256
257 // The comment just after the declaration was not a trailing comment.
258 // Let's look at the previous comment.
259 if (OffsetCommentBehindDecl == CommentsInTheFile.begin())
260 return nullptr;
261
262 auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl;
263 RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second;
264
265 // Check that we actually have a non-member Doxygen comment.
266 if (!(CommentBeforeDecl->isDocumentation() ||
267 LangOpts.CommentOpts.ParseAllComments) ||
268 CommentBeforeDecl->isTrailingComment())
269 return nullptr;
270
271 // Decompose the end of the comment.
272 const unsigned CommentEndOffset =
273 Comments.getCommentEndOffset(C: CommentBeforeDecl);
274
275 // Get the corresponding buffer.
276 bool Invalid = false;
277 const char *Buffer = SourceMgr.getBufferData(FID: DeclLocDecomp.first,
278 Invalid: &Invalid).data();
279 if (Invalid)
280 return nullptr;
281
282 // Extract text between the comment and declaration.
283 StringRef Text(Buffer + CommentEndOffset,
284 DeclLocDecomp.second - CommentEndOffset);
285
286 // There should be no other declarations or preprocessor directives between
287 // comment and declaration.
288 if (Text.find_last_of(Chars: ";{}#@") != StringRef::npos)
289 return nullptr;
290
291 return CommentBeforeDecl;
292}
293
294RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
295 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr);
296
297 for (const auto DeclLoc : DeclLocs) {
298 // If the declaration doesn't map directly to a location in a file, we
299 // can't find the comment.
300 if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
301 continue;
302
303 if (ExternalSource && !CommentsLoaded) {
304 ExternalSource->ReadComments();
305 CommentsLoaded = true;
306 }
307
308 if (Comments.empty())
309 continue;
310
311 const FileID File = SourceMgr.getDecomposedLoc(Loc: DeclLoc).first;
312 if (!File.isValid())
313 continue;
314
315 const auto CommentsInThisFile = Comments.getCommentsInFile(File);
316 if (!CommentsInThisFile || CommentsInThisFile->empty())
317 continue;
318
319 if (RawComment *Comment =
320 getRawCommentForDeclNoCacheImpl(D, RepresentativeLocForDecl: DeclLoc, CommentsInTheFile: *CommentsInThisFile))
321 return Comment;
322 }
323
324 return nullptr;
325}
326
327void ASTContext::addComment(const RawComment &RC) {
328 assert(LangOpts.RetainCommentsFromSystemHeaders ||
329 !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin()));
330 Comments.addComment(RC, CommentOpts: LangOpts.CommentOpts, Allocator&: BumpAlloc);
331}
332
333const RawComment *ASTContext::getRawCommentForAnyRedecl(
334 const Decl *D,
335 const Decl **OriginalDecl) const {
336 if (!D) {
337 if (OriginalDecl)
338 OriginalDecl = nullptr;
339 return nullptr;
340 }
341
342 D = &adjustDeclToTemplate(D: *D);
343
344 // Any comment directly attached to D?
345 {
346 auto DeclComment = DeclRawComments.find(Val: D);
347 if (DeclComment != DeclRawComments.end()) {
348 if (OriginalDecl)
349 *OriginalDecl = D;
350 return DeclComment->second;
351 }
352 }
353
354 // Any comment attached to any redeclaration of D?
355 const Decl *CanonicalD = D->getCanonicalDecl();
356 if (!CanonicalD)
357 return nullptr;
358
359 {
360 auto RedeclComment = RedeclChainComments.find(Val: CanonicalD);
361 if (RedeclComment != RedeclChainComments.end()) {
362 if (OriginalDecl)
363 *OriginalDecl = RedeclComment->second;
364 auto CommentAtRedecl = DeclRawComments.find(Val: RedeclComment->second);
365 assert(CommentAtRedecl != DeclRawComments.end() &&
366 "This decl is supposed to have comment attached.");
367 return CommentAtRedecl->second;
368 }
369 }
370
371 // Any redeclarations of D that we haven't checked for comments yet?
372 const Decl *LastCheckedRedecl = [&]() {
373 const Decl *LastChecked = CommentlessRedeclChains.lookup(Val: CanonicalD);
374 bool CanUseCommentlessCache = false;
375 if (LastChecked) {
376 for (auto *Redecl : CanonicalD->redecls()) {
377 if (Redecl == D) {
378 CanUseCommentlessCache = true;
379 break;
380 }
381 if (Redecl == LastChecked)
382 break;
383 }
384 }
385 // FIXME: This could be improved so that even if CanUseCommentlessCache
386 // is false, once we've traversed past CanonicalD we still skip ahead
387 // LastChecked.
388 return CanUseCommentlessCache ? LastChecked : nullptr;
389 }();
390
391 for (const Decl *Redecl : D->redecls()) {
392 assert(Redecl);
393 // Skip all redeclarations that have been checked previously.
394 if (LastCheckedRedecl) {
395 if (LastCheckedRedecl == Redecl) {
396 LastCheckedRedecl = nullptr;
397 }
398 continue;
399 }
400 const RawComment *RedeclComment = getRawCommentForDeclNoCache(D: Redecl);
401 if (RedeclComment) {
402 cacheRawCommentForDecl(OriginalD: *Redecl, Comment: *RedeclComment);
403 if (OriginalDecl)
404 *OriginalDecl = Redecl;
405 return RedeclComment;
406 }
407 CommentlessRedeclChains[CanonicalD] = Redecl;
408 }
409
410 if (OriginalDecl)
411 *OriginalDecl = nullptr;
412 return nullptr;
413}
414
415void ASTContext::cacheRawCommentForDecl(const Decl &OriginalD,
416 const RawComment &Comment) const {
417 assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments);
418 DeclRawComments.try_emplace(Key: &OriginalD, Args: &Comment);
419 const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl();
420 RedeclChainComments.try_emplace(Key: CanonicalDecl, Args: &OriginalD);
421 CommentlessRedeclChains.erase(Val: CanonicalDecl);
422}
423
424static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod,
425 SmallVectorImpl<const NamedDecl *> &Redeclared) {
426 const DeclContext *DC = ObjCMethod->getDeclContext();
427 if (const auto *IMD = dyn_cast<ObjCImplDecl>(Val: DC)) {
428 const ObjCInterfaceDecl *ID = IMD->getClassInterface();
429 if (!ID)
430 return;
431 // Add redeclared method here.
432 for (const auto *Ext : ID->known_extensions()) {
433 if (ObjCMethodDecl *RedeclaredMethod =
434 Ext->getMethod(Sel: ObjCMethod->getSelector(),
435 isInstance: ObjCMethod->isInstanceMethod()))
436 Redeclared.push_back(Elt: RedeclaredMethod);
437 }
438 }
439}
440
441void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls,
442 const Preprocessor *PP) {
443 if (Comments.empty() || Decls.empty())
444 return;
445
446 FileID File;
447 for (const Decl *D : Decls) {
448 if (D->isInvalidDecl())
449 continue;
450
451 D = &adjustDeclToTemplate(D: *D);
452 SourceLocation Loc = D->getLocation();
453 if (Loc.isValid()) {
454 // See if there are any new comments that are not attached to a decl.
455 // The location doesn't have to be precise - we care only about the file.
456 File = SourceMgr.getDecomposedLoc(Loc).first;
457 break;
458 }
459 }
460
461 if (File.isInvalid())
462 return;
463
464 auto CommentsInThisFile = Comments.getCommentsInFile(File);
465 if (!CommentsInThisFile || CommentsInThisFile->empty() ||
466 CommentsInThisFile->rbegin()->second->isAttached())
467 return;
468
469 // There is at least one comment not attached to a decl.
470 // Maybe it should be attached to one of Decls?
471 //
472 // Note that this way we pick up not only comments that precede the
473 // declaration, but also comments that *follow* the declaration -- thanks to
474 // the lookahead in the lexer: we've consumed the semicolon and looked
475 // ahead through comments.
476 for (const Decl *D : Decls) {
477 assert(D);
478 if (D->isInvalidDecl())
479 continue;
480
481 D = &adjustDeclToTemplate(D: *D);
482
483 if (DeclRawComments.count(Val: D) > 0)
484 continue;
485
486 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr);
487
488 for (const auto DeclLoc : DeclLocs) {
489 if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
490 continue;
491
492 if (RawComment *const DocComment = getRawCommentForDeclNoCacheImpl(
493 D, RepresentativeLocForDecl: DeclLoc, CommentsInTheFile: *CommentsInThisFile)) {
494 cacheRawCommentForDecl(OriginalD: *D, Comment: *DocComment);
495 comments::FullComment *FC = DocComment->parse(Context: *this, PP, D);
496 ParsedComments[D->getCanonicalDecl()] = FC;
497 break;
498 }
499 }
500 }
501}
502
503comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC,
504 const Decl *D) const {
505 auto *ThisDeclInfo = new (*this) comments::DeclInfo;
506 ThisDeclInfo->CommentDecl = D;
507 ThisDeclInfo->IsFilled = false;
508 ThisDeclInfo->fill();
509 ThisDeclInfo->CommentDecl = FC->getDecl();
510 if (!ThisDeclInfo->TemplateParameters)
511 ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters;
512 comments::FullComment *CFC =
513 new (*this) comments::FullComment(FC->getBlocks(),
514 ThisDeclInfo);
515 return CFC;
516}
517
518comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const {
519 const RawComment *RC = getRawCommentForDeclNoCache(D);
520 return RC ? RC->parse(Context: *this, PP: nullptr, D) : nullptr;
521}
522
523comments::FullComment *ASTContext::getCommentForDecl(
524 const Decl *D,
525 const Preprocessor *PP) const {
526 if (!D || D->isInvalidDecl())
527 return nullptr;
528 D = &adjustDeclToTemplate(D: *D);
529
530 const Decl *Canonical = D->getCanonicalDecl();
531 llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos =
532 ParsedComments.find(Val: Canonical);
533
534 if (Pos != ParsedComments.end()) {
535 if (Canonical != D) {
536 comments::FullComment *FC = Pos->second;
537 comments::FullComment *CFC = cloneFullComment(FC, D);
538 return CFC;
539 }
540 return Pos->second;
541 }
542
543 const Decl *OriginalDecl = nullptr;
544
545 const RawComment *RC = getRawCommentForAnyRedecl(D, OriginalDecl: &OriginalDecl);
546 if (!RC) {
547 if (isa<ObjCMethodDecl>(Val: D) || isa<FunctionDecl>(Val: D)) {
548 SmallVector<const NamedDecl*, 8> Overridden;
549 const auto *OMD = dyn_cast<ObjCMethodDecl>(Val: D);
550 if (OMD && OMD->isPropertyAccessor())
551 if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl())
552 if (comments::FullComment *FC = getCommentForDecl(D: PDecl, PP))
553 return cloneFullComment(FC, D);
554 if (OMD)
555 addRedeclaredMethods(ObjCMethod: OMD, Redeclared&: Overridden);
556 getOverriddenMethods(Method: dyn_cast<NamedDecl>(Val: D), Overridden);
557 for (unsigned i = 0, e = Overridden.size(); i < e; i++)
558 if (comments::FullComment *FC = getCommentForDecl(D: Overridden[i], PP))
559 return cloneFullComment(FC, D);
560 }
561 else if (const auto *TD = dyn_cast<TypedefNameDecl>(Val: D)) {
562 // Attach any tag type's documentation to its typedef if latter
563 // does not have one of its own.
564 QualType QT = TD->getUnderlyingType();
565 if (const auto *TT = QT->getAs<TagType>())
566 if (comments::FullComment *FC = getCommentForDecl(D: TT->getDecl(), PP))
567 return cloneFullComment(FC, D);
568 }
569 else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(Val: D)) {
570 while (IC->getSuperClass()) {
571 IC = IC->getSuperClass();
572 if (comments::FullComment *FC = getCommentForDecl(D: IC, PP))
573 return cloneFullComment(FC, D);
574 }
575 }
576 else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(Val: D)) {
577 if (const ObjCInterfaceDecl *IC = CD->getClassInterface())
578 if (comments::FullComment *FC = getCommentForDecl(D: IC, PP))
579 return cloneFullComment(FC, D);
580 }
581 else if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: D)) {
582 if (!(RD = RD->getDefinition()))
583 return nullptr;
584 // Check non-virtual bases.
585 for (const auto &I : RD->bases()) {
586 if (I.isVirtual() || (I.getAccessSpecifier() != AS_public))
587 continue;
588 QualType Ty = I.getType();
589 if (Ty.isNull())
590 continue;
591 if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) {
592 if (!(NonVirtualBase= NonVirtualBase->getDefinition()))
593 continue;
594
595 if (comments::FullComment *FC = getCommentForDecl(D: (NonVirtualBase), PP))
596 return cloneFullComment(FC, D);
597 }
598 }
599 // Check virtual bases.
600 for (const auto &I : RD->vbases()) {
601 if (I.getAccessSpecifier() != AS_public)
602 continue;
603 QualType Ty = I.getType();
604 if (Ty.isNull())
605 continue;
606 if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) {
607 if (!(VirtualBase= VirtualBase->getDefinition()))
608 continue;
609 if (comments::FullComment *FC = getCommentForDecl(D: (VirtualBase), PP))
610 return cloneFullComment(FC, D);
611 }
612 }
613 }
614 return nullptr;
615 }
616
617 // If the RawComment was attached to other redeclaration of this Decl, we
618 // should parse the comment in context of that other Decl. This is important
619 // because comments can contain references to parameter names which can be
620 // different across redeclarations.
621 if (D != OriginalDecl && OriginalDecl)
622 return getCommentForDecl(D: OriginalDecl, PP);
623
624 comments::FullComment *FC = RC->parse(Context: *this, PP, D);
625 ParsedComments[Canonical] = FC;
626 return FC;
627}
628
629void ASTContext::CanonicalTemplateTemplateParm::Profile(
630 llvm::FoldingSetNodeID &ID, const ASTContext &C,
631 TemplateTemplateParmDecl *Parm) {
632 ID.AddInteger(I: Parm->getDepth());
633 ID.AddInteger(I: Parm->getPosition());
634 ID.AddBoolean(B: Parm->isParameterPack());
635 ID.AddInteger(I: Parm->templateParameterKind());
636
637 TemplateParameterList *Params = Parm->getTemplateParameters();
638 ID.AddInteger(I: Params->size());
639 for (TemplateParameterList::const_iterator P = Params->begin(),
640 PEnd = Params->end();
641 P != PEnd; ++P) {
642 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Val: *P)) {
643 ID.AddInteger(I: 0);
644 ID.AddBoolean(B: TTP->isParameterPack());
645 ID.AddInteger(
646 I: TTP->getNumExpansionParameters().toInternalRepresentation());
647 continue;
648 }
649
650 if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Val: *P)) {
651 ID.AddInteger(I: 1);
652 ID.AddBoolean(B: NTTP->isParameterPack());
653 ID.AddPointer(Ptr: C.getUnconstrainedType(T: C.getCanonicalType(T: NTTP->getType()))
654 .getAsOpaquePtr());
655 if (NTTP->isExpandedParameterPack()) {
656 ID.AddBoolean(B: true);
657 ID.AddInteger(I: NTTP->getNumExpansionTypes());
658 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
659 QualType T = NTTP->getExpansionType(I);
660 ID.AddPointer(Ptr: T.getCanonicalType().getAsOpaquePtr());
661 }
662 } else
663 ID.AddBoolean(B: false);
664 continue;
665 }
666
667 auto *TTP = cast<TemplateTemplateParmDecl>(Val: *P);
668 ID.AddInteger(I: 2);
669 Profile(ID, C, Parm: TTP);
670 }
671}
672
673TemplateTemplateParmDecl *
674ASTContext::getCanonicalTemplateTemplateParmDecl(
675 TemplateTemplateParmDecl *TTP) const {
676 // Check if we already have a canonical template template parameter.
677 llvm::FoldingSetNodeID ID;
678 CanonicalTemplateTemplateParm::Profile(ID, C: *this, Parm: TTP);
679 void *InsertPos = nullptr;
680 CanonicalTemplateTemplateParm *Canonical
681 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
682 if (Canonical)
683 return Canonical->getParam();
684
685 // Build a canonical template parameter list.
686 TemplateParameterList *Params = TTP->getTemplateParameters();
687 SmallVector<NamedDecl *, 4> CanonParams;
688 CanonParams.reserve(N: Params->size());
689 for (TemplateParameterList::const_iterator P = Params->begin(),
690 PEnd = Params->end();
691 P != PEnd; ++P) {
692 // Note that, per C++20 [temp.over.link]/6, when determining whether
693 // template-parameters are equivalent, constraints are ignored.
694 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Val: *P)) {
695 TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create(
696 C: *this, DC: getTranslationUnitDecl(), KeyLoc: SourceLocation(), NameLoc: SourceLocation(),
697 D: TTP->getDepth(), P: TTP->getIndex(), Id: nullptr, Typename: false,
698 ParameterPack: TTP->isParameterPack(), /*HasTypeConstraint=*/false,
699 NumExpanded: TTP->getNumExpansionParameters());
700 CanonParams.push_back(Elt: NewTTP);
701 } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Val: *P)) {
702 QualType T = getUnconstrainedType(T: getCanonicalType(T: NTTP->getType()));
703 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T);
704 NonTypeTemplateParmDecl *Param;
705 if (NTTP->isExpandedParameterPack()) {
706 SmallVector<QualType, 2> ExpandedTypes;
707 SmallVector<TypeSourceInfo *, 2> ExpandedTInfos;
708 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
709 ExpandedTypes.push_back(Elt: getCanonicalType(T: NTTP->getExpansionType(I)));
710 ExpandedTInfos.push_back(
711 Elt: getTrivialTypeSourceInfo(T: ExpandedTypes.back()));
712 }
713
714 Param = NonTypeTemplateParmDecl::Create(C: *this, DC: getTranslationUnitDecl(),
715 StartLoc: SourceLocation(),
716 IdLoc: SourceLocation(),
717 D: NTTP->getDepth(),
718 P: NTTP->getPosition(), Id: nullptr,
719 T,
720 TInfo,
721 ExpandedTypes,
722 ExpandedTInfos);
723 } else {
724 Param = NonTypeTemplateParmDecl::Create(C: *this, DC: getTranslationUnitDecl(),
725 StartLoc: SourceLocation(),
726 IdLoc: SourceLocation(),
727 D: NTTP->getDepth(),
728 P: NTTP->getPosition(), Id: nullptr,
729 T,
730 ParameterPack: NTTP->isParameterPack(),
731 TInfo);
732 }
733 CanonParams.push_back(Elt: Param);
734 } else
735 CanonParams.push_back(Elt: getCanonicalTemplateTemplateParmDecl(
736 TTP: cast<TemplateTemplateParmDecl>(Val: *P)));
737 }
738
739 TemplateTemplateParmDecl *CanonTTP = TemplateTemplateParmDecl::Create(
740 C: *this, DC: getTranslationUnitDecl(), L: SourceLocation(), D: TTP->getDepth(),
741 P: TTP->getPosition(), ParameterPack: TTP->isParameterPack(), Id: nullptr,
742 ParameterKind: TTP->templateParameterKind(),
743 /*Typename=*/false,
744 Params: TemplateParameterList::Create(C: *this, TemplateLoc: SourceLocation(), LAngleLoc: SourceLocation(),
745 Params: CanonParams, RAngleLoc: SourceLocation(),
746 /*RequiresClause=*/nullptr));
747
748 // Get the new insert position for the node we care about.
749 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
750 assert(!Canonical && "Shouldn't be in the map!");
751 (void)Canonical;
752
753 // Create the canonical template template parameter entry.
754 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP);
755 CanonTemplateTemplateParms.InsertNode(N: Canonical, InsertPos);
756 return CanonTTP;
757}
758
759TemplateTemplateParmDecl *
760ASTContext::findCanonicalTemplateTemplateParmDeclInternal(
761 TemplateTemplateParmDecl *TTP) const {
762 llvm::FoldingSetNodeID ID;
763 CanonicalTemplateTemplateParm::Profile(ID, C: *this, Parm: TTP);
764 void *InsertPos = nullptr;
765 CanonicalTemplateTemplateParm *Canonical =
766 CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
767 return Canonical ? Canonical->getParam() : nullptr;
768}
769
770TemplateTemplateParmDecl *
771ASTContext::insertCanonicalTemplateTemplateParmDeclInternal(
772 TemplateTemplateParmDecl *CanonTTP) const {
773 llvm::FoldingSetNodeID ID;
774 CanonicalTemplateTemplateParm::Profile(ID, C: *this, Parm: CanonTTP);
775 void *InsertPos = nullptr;
776 if (auto *Existing =
777 CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos))
778 return Existing->getParam();
779 CanonTemplateTemplateParms.InsertNode(
780 N: new (*this) CanonicalTemplateTemplateParm(CanonTTP), InsertPos);
781 return CanonTTP;
782}
783
784/// Check if a type can have its sanitizer instrumentation elided based on its
785/// presence within an ignorelist.
786bool ASTContext::isTypeIgnoredBySanitizer(const SanitizerMask &Mask,
787 const QualType &Ty) const {
788 std::string TyName = Ty.getUnqualifiedType().getAsString(Policy: getPrintingPolicy());
789 return NoSanitizeL->containsType(Mask, MangledTypeName: TyName);
790}
791
792TargetCXXABI::Kind ASTContext::getCXXABIKind() const {
793 auto Kind = getTargetInfo().getCXXABI().getKind();
794 return getLangOpts().CXXABI.value_or(u&: Kind);
795}
796
797CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
798 if (!LangOpts.CPlusPlus) return nullptr;
799
800 switch (getCXXABIKind()) {
801 case TargetCXXABI::AppleARM64:
802 case TargetCXXABI::Fuchsia:
803 case TargetCXXABI::GenericARM: // Same as Itanium at this level
804 case TargetCXXABI::iOS:
805 case TargetCXXABI::WatchOS:
806 case TargetCXXABI::GenericAArch64:
807 case TargetCXXABI::GenericMIPS:
808 case TargetCXXABI::GenericItanium:
809 case TargetCXXABI::WebAssembly:
810 case TargetCXXABI::XL:
811 return CreateItaniumCXXABI(Ctx&: *this);
812 case TargetCXXABI::Microsoft:
813 return CreateMicrosoftCXXABI(Ctx&: *this);
814 }
815 llvm_unreachable("Invalid CXXABI type!");
816}
817
818interp::Context &ASTContext::getInterpContext() {
819 if (!InterpContext) {
820 InterpContext.reset(p: new interp::Context(*this));
821 }
822 return *InterpContext;
823}
824
825ParentMapContext &ASTContext::getParentMapContext() {
826 if (!ParentMapCtx)
827 ParentMapCtx.reset(p: new ParentMapContext(*this));
828 return *ParentMapCtx;
829}
830
831static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI,
832 const LangOptions &LangOpts) {
833 switch (LangOpts.getAddressSpaceMapMangling()) {
834 case LangOptions::ASMM_Target:
835 return TI.useAddressSpaceMapMangling();
836 case LangOptions::ASMM_On:
837 return true;
838 case LangOptions::ASMM_Off:
839 return false;
840 }
841 llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything.");
842}
843
844ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM,
845 IdentifierTable &idents, SelectorTable &sels,
846 Builtin::Context &builtins, TranslationUnitKind TUKind)
847 : ConstantArrayTypes(this_(), ConstantArrayTypesLog2InitSize),
848 DependentSizedArrayTypes(this_()), DependentSizedExtVectorTypes(this_()),
849 DependentAddressSpaceTypes(this_()), DependentVectorTypes(this_()),
850 DependentSizedMatrixTypes(this_()),
851 FunctionProtoTypes(this_(), FunctionProtoTypesLog2InitSize),
852 DependentTypeOfExprTypes(this_()), DependentDecltypeTypes(this_()),
853 DependentPackIndexingTypes(this_()), TemplateSpecializationTypes(this_()),
854 DependentBitIntTypes(this_()), SubstTemplateTemplateParmPacks(this_()),
855 DeducedTemplates(this_()), ArrayParameterTypes(this_()),
856 CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts),
857 NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)),
858 XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles,
859 LangOpts.XRayNeverInstrumentFiles,
860 LangOpts.XRayAttrListFiles, SM)),
861 ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)),
862 PrintingPolicy(LOpts), Idents(idents), Selectors(sels),
863 BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this),
864 Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts),
865 CompCategories(this_()), LastSDM(nullptr, 0) {
866 addTranslationUnitDecl();
867}
868
869void ASTContext::cleanup() {
870 // Release the DenseMaps associated with DeclContext objects.
871 // FIXME: Is this the ideal solution?
872 ReleaseDeclContextMaps();
873
874 // Call all of the deallocation functions on all of their targets.
875 for (auto &Pair : Deallocations)
876 (Pair.first)(Pair.second);
877 Deallocations.clear();
878
879 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
880 // because they can contain DenseMaps.
881 for (llvm::DenseMap<const ObjCInterfaceDecl *,
882 const ASTRecordLayout *>::iterator
883 I = ObjCLayouts.begin(),
884 E = ObjCLayouts.end();
885 I != E;)
886 // Increment in loop to prevent using deallocated memory.
887 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
888 R->Destroy(Ctx&: *this);
889 ObjCLayouts.clear();
890
891 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
892 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
893 // Increment in loop to prevent using deallocated memory.
894 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
895 R->Destroy(Ctx&: *this);
896 }
897 ASTRecordLayouts.clear();
898
899 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(),
900 AEnd = DeclAttrs.end();
901 A != AEnd; ++A)
902 A->second->~AttrVec();
903 DeclAttrs.clear();
904
905 for (const auto &Value : ModuleInitializers)
906 Value.second->~PerModuleInitializers();
907 ModuleInitializers.clear();
908
909 XRayFilter.reset();
910 NoSanitizeL.reset();
911}
912
913ASTContext::~ASTContext() { cleanup(); }
914
915void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) {
916 TraversalScope = TopLevelDecls;
917 getParentMapContext().clear();
918}
919
920void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const {
921 Deallocations.push_back(Elt: {Callback, Data});
922}
923
924void
925ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) {
926 ExternalSource = std::move(Source);
927}
928
929void ASTContext::PrintStats() const {
930 llvm::errs() << "\n*** AST Context Stats:\n";
931 llvm::errs() << " " << Types.size() << " types total.\n";
932
933 unsigned counts[] = {
934#define TYPE(Name, Parent) 0,
935#define ABSTRACT_TYPE(Name, Parent)
936#include "clang/AST/TypeNodes.inc"
937 0 // Extra
938 };
939
940 for (unsigned i = 0, e = Types.size(); i != e; ++i) {
941 Type *T = Types[i];
942 counts[(unsigned)T->getTypeClass()]++;
943 }
944
945 unsigned Idx = 0;
946 unsigned TotalBytes = 0;
947#define TYPE(Name, Parent) \
948 if (counts[Idx]) \
949 llvm::errs() << " " << counts[Idx] << " " << #Name \
950 << " types, " << sizeof(Name##Type) << " each " \
951 << "(" << counts[Idx] * sizeof(Name##Type) \
952 << " bytes)\n"; \
953 TotalBytes += counts[Idx] * sizeof(Name##Type); \
954 ++Idx;
955#define ABSTRACT_TYPE(Name, Parent)
956#include "clang/AST/TypeNodes.inc"
957
958 llvm::errs() << "Total bytes = " << TotalBytes << "\n";
959
960 // Implicit special member functions.
961 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/"
962 << NumImplicitDefaultConstructors
963 << " implicit default constructors created\n";
964 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/"
965 << NumImplicitCopyConstructors
966 << " implicit copy constructors created\n";
967 if (getLangOpts().CPlusPlus)
968 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/"
969 << NumImplicitMoveConstructors
970 << " implicit move constructors created\n";
971 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/"
972 << NumImplicitCopyAssignmentOperators
973 << " implicit copy assignment operators created\n";
974 if (getLangOpts().CPlusPlus)
975 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/"
976 << NumImplicitMoveAssignmentOperators
977 << " implicit move assignment operators created\n";
978 llvm::errs() << NumImplicitDestructorsDeclared << "/"
979 << NumImplicitDestructors
980 << " implicit destructors created\n";
981
982 if (ExternalSource) {
983 llvm::errs() << "\n";
984 ExternalSource->PrintStats();
985 }
986
987 BumpAlloc.PrintStats();
988}
989
990void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M,
991 bool NotifyListeners) {
992 if (NotifyListeners)
993 if (auto *Listener = getASTMutationListener();
994 Listener && !ND->isUnconditionallyVisible())
995 Listener->RedefinedHiddenDefinition(D: ND, M);
996
997 MergedDefModules[cast<NamedDecl>(Val: ND->getCanonicalDecl())].push_back(NewVal: M);
998}
999
1000void ASTContext::deduplicateMergedDefinitionsFor(NamedDecl *ND) {
1001 auto It = MergedDefModules.find(Val: cast<NamedDecl>(Val: ND->getCanonicalDecl()));
1002 if (It == MergedDefModules.end())
1003 return;
1004
1005 auto &Merged = It->second;
1006 llvm::DenseSet<Module*> Found;
1007 for (Module *&M : Merged)
1008 if (!Found.insert(V: M).second)
1009 M = nullptr;
1010 llvm::erase(C&: Merged, V: nullptr);
1011}
1012
1013ArrayRef<Module *>
1014ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) {
1015 auto MergedIt =
1016 MergedDefModules.find(Val: cast<NamedDecl>(Val: Def->getCanonicalDecl()));
1017 if (MergedIt == MergedDefModules.end())
1018 return {};
1019 return MergedIt->second;
1020}
1021
1022void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) {
1023 if (LazyInitializers.empty())
1024 return;
1025
1026 auto *Source = Ctx.getExternalSource();
1027 assert(Source && "lazy initializers but no external source");
1028
1029 auto LazyInits = std::move(LazyInitializers);
1030 LazyInitializers.clear();
1031
1032 for (auto ID : LazyInits)
1033 Initializers.push_back(Elt: Source->GetExternalDecl(ID));
1034
1035 assert(LazyInitializers.empty() &&
1036 "GetExternalDecl for lazy module initializer added more inits");
1037}
1038
1039void ASTContext::addModuleInitializer(Module *M, Decl *D) {
1040 // One special case: if we add a module initializer that imports another
1041 // module, and that module's only initializer is an ImportDecl, simplify.
1042 if (const auto *ID = dyn_cast<ImportDecl>(Val: D)) {
1043 auto It = ModuleInitializers.find(Val: ID->getImportedModule());
1044
1045 // Maybe the ImportDecl does nothing at all. (Common case.)
1046 if (It == ModuleInitializers.end())
1047 return;
1048
1049 // Maybe the ImportDecl only imports another ImportDecl.
1050 auto &Imported = *It->second;
1051 if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) {
1052 Imported.resolve(Ctx&: *this);
1053 auto *OnlyDecl = Imported.Initializers.front();
1054 if (isa<ImportDecl>(Val: OnlyDecl))
1055 D = OnlyDecl;
1056 }
1057 }
1058
1059 auto *&Inits = ModuleInitializers[M];
1060 if (!Inits)
1061 Inits = new (*this) PerModuleInitializers;
1062 Inits->Initializers.push_back(Elt: D);
1063}
1064
1065void ASTContext::addLazyModuleInitializers(Module *M,
1066 ArrayRef<GlobalDeclID> IDs) {
1067 auto *&Inits = ModuleInitializers[M];
1068 if (!Inits)
1069 Inits = new (*this) PerModuleInitializers;
1070 Inits->LazyInitializers.insert(I: Inits->LazyInitializers.end(),
1071 From: IDs.begin(), To: IDs.end());
1072}
1073
1074ArrayRef<Decl *> ASTContext::getModuleInitializers(Module *M) {
1075 auto It = ModuleInitializers.find(Val: M);
1076 if (It == ModuleInitializers.end())
1077 return {};
1078
1079 auto *Inits = It->second;
1080 Inits->resolve(Ctx&: *this);
1081 return Inits->Initializers;
1082}
1083
1084void ASTContext::setCurrentNamedModule(Module *M) {
1085 assert(M->isNamedModule());
1086 assert(!CurrentCXXNamedModule &&
1087 "We should set named module for ASTContext for only once");
1088 CurrentCXXNamedModule = M;
1089}
1090
1091bool ASTContext::isInSameModule(const Module *M1, const Module *M2) const {
1092 if (!M1 != !M2)
1093 return false;
1094
1095 /// Get the representative module for M. The representative module is the
1096 /// first module unit for a specific primary module name. So that the module
1097 /// units have the same representative module belongs to the same module.
1098 ///
1099 /// The process is helpful to reduce the expensive string operations.
1100 auto GetRepresentativeModule = [this](const Module *M) {
1101 auto Iter = SameModuleLookupSet.find(Val: M);
1102 if (Iter != SameModuleLookupSet.end())
1103 return Iter->second;
1104
1105 const Module *RepresentativeModule =
1106 PrimaryModuleNameMap.try_emplace(Key: M->getPrimaryModuleInterfaceName(), Args&: M)
1107 .first->second;
1108 SameModuleLookupSet[M] = RepresentativeModule;
1109 return RepresentativeModule;
1110 };
1111
1112 assert(M1 && "Shouldn't call `isInSameModule` if both M1 and M2 are none.");
1113 return GetRepresentativeModule(M1) == GetRepresentativeModule(M2);
1114}
1115
1116ExternCContextDecl *ASTContext::getExternCContextDecl() const {
1117 if (!ExternCContext)
1118 ExternCContext = ExternCContextDecl::Create(C: *this, TU: getTranslationUnitDecl());
1119
1120 return ExternCContext;
1121}
1122
1123BuiltinTemplateDecl *
1124ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK,
1125 const IdentifierInfo *II) const {
1126 auto *BuiltinTemplate =
1127 BuiltinTemplateDecl::Create(C: *this, DC: getTranslationUnitDecl(), Name: II, BTK);
1128 BuiltinTemplate->setImplicit();
1129 getTranslationUnitDecl()->addDecl(D: BuiltinTemplate);
1130
1131 return BuiltinTemplate;
1132}
1133
1134#define BuiltinTemplate(BTName) \
1135 BuiltinTemplateDecl *ASTContext::get##BTName##Decl() const { \
1136 if (!Decl##BTName) \
1137 Decl##BTName = \
1138 buildBuiltinTemplateDecl(BTK##BTName, get##BTName##Name()); \
1139 return Decl##BTName; \
1140 }
1141#include "clang/Basic/BuiltinTemplates.inc"
1142
1143RecordDecl *ASTContext::buildImplicitRecord(StringRef Name,
1144 RecordDecl::TagKind TK) const {
1145 SourceLocation Loc;
1146 RecordDecl *NewDecl;
1147 if (getLangOpts().CPlusPlus)
1148 NewDecl = CXXRecordDecl::Create(C: *this, TK, DC: getTranslationUnitDecl(), StartLoc: Loc,
1149 IdLoc: Loc, Id: &Idents.get(Name));
1150 else
1151 NewDecl = RecordDecl::Create(C: *this, TK, DC: getTranslationUnitDecl(), StartLoc: Loc, IdLoc: Loc,
1152 Id: &Idents.get(Name));
1153 NewDecl->setImplicit();
1154 NewDecl->addAttr(A: TypeVisibilityAttr::CreateImplicit(
1155 Ctx&: const_cast<ASTContext &>(*this), Visibility: TypeVisibilityAttr::Default));
1156 return NewDecl;
1157}
1158
1159TypedefDecl *ASTContext::buildImplicitTypedef(QualType T,
1160 StringRef Name) const {
1161 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T);
1162 TypedefDecl *NewDecl = TypedefDecl::Create(
1163 C&: const_cast<ASTContext &>(*this), DC: getTranslationUnitDecl(),
1164 StartLoc: SourceLocation(), IdLoc: SourceLocation(), Id: &Idents.get(Name), TInfo);
1165 NewDecl->setImplicit();
1166 return NewDecl;
1167}
1168
1169TypedefDecl *ASTContext::getInt128Decl() const {
1170 if (!Int128Decl)
1171 Int128Decl = buildImplicitTypedef(T: Int128Ty, Name: "__int128_t");
1172 return Int128Decl;
1173}
1174
1175TypedefDecl *ASTContext::getUInt128Decl() const {
1176 if (!UInt128Decl)
1177 UInt128Decl = buildImplicitTypedef(T: UnsignedInt128Ty, Name: "__uint128_t");
1178 return UInt128Decl;
1179}
1180
1181void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) {
1182 auto *Ty = new (*this, alignof(BuiltinType)) BuiltinType(K);
1183 R = CanQualType::CreateUnsafe(Other: QualType(Ty, 0));
1184 Types.push_back(Elt: Ty);
1185}
1186
1187void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
1188 const TargetInfo *AuxTarget) {
1189 assert((!this->Target || this->Target == &Target) &&
1190 "Incorrect target reinitialization");
1191 assert(VoidTy.isNull() && "Context reinitialized?");
1192
1193 this->Target = &Target;
1194 this->AuxTarget = AuxTarget;
1195
1196 ABI.reset(p: createCXXABI(T: Target));
1197 AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(TI: Target, LangOpts);
1198
1199 // C99 6.2.5p19.
1200 InitBuiltinType(R&: VoidTy, K: BuiltinType::Void);
1201
1202 // C99 6.2.5p2.
1203 InitBuiltinType(R&: BoolTy, K: BuiltinType::Bool);
1204 // C99 6.2.5p3.
1205 if (LangOpts.CharIsSigned)
1206 InitBuiltinType(R&: CharTy, K: BuiltinType::Char_S);
1207 else
1208 InitBuiltinType(R&: CharTy, K: BuiltinType::Char_U);
1209 // C99 6.2.5p4.
1210 InitBuiltinType(R&: SignedCharTy, K: BuiltinType::SChar);
1211 InitBuiltinType(R&: ShortTy, K: BuiltinType::Short);
1212 InitBuiltinType(R&: IntTy, K: BuiltinType::Int);
1213 InitBuiltinType(R&: LongTy, K: BuiltinType::Long);
1214 InitBuiltinType(R&: LongLongTy, K: BuiltinType::LongLong);
1215
1216 // C99 6.2.5p6.
1217 InitBuiltinType(R&: UnsignedCharTy, K: BuiltinType::UChar);
1218 InitBuiltinType(R&: UnsignedShortTy, K: BuiltinType::UShort);
1219 InitBuiltinType(R&: UnsignedIntTy, K: BuiltinType::UInt);
1220 InitBuiltinType(R&: UnsignedLongTy, K: BuiltinType::ULong);
1221 InitBuiltinType(R&: UnsignedLongLongTy, K: BuiltinType::ULongLong);
1222
1223 // C99 6.2.5p10.
1224 InitBuiltinType(R&: FloatTy, K: BuiltinType::Float);
1225 InitBuiltinType(R&: DoubleTy, K: BuiltinType::Double);
1226 InitBuiltinType(R&: LongDoubleTy, K: BuiltinType::LongDouble);
1227
1228 // GNU extension, __float128 for IEEE quadruple precision
1229 InitBuiltinType(R&: Float128Ty, K: BuiltinType::Float128);
1230
1231 // __ibm128 for IBM extended precision
1232 InitBuiltinType(R&: Ibm128Ty, K: BuiltinType::Ibm128);
1233
1234 // C11 extension ISO/IEC TS 18661-3
1235 InitBuiltinType(R&: Float16Ty, K: BuiltinType::Float16);
1236
1237 // ISO/IEC JTC1 SC22 WG14 N1169 Extension
1238 InitBuiltinType(R&: ShortAccumTy, K: BuiltinType::ShortAccum);
1239 InitBuiltinType(R&: AccumTy, K: BuiltinType::Accum);
1240 InitBuiltinType(R&: LongAccumTy, K: BuiltinType::LongAccum);
1241 InitBuiltinType(R&: UnsignedShortAccumTy, K: BuiltinType::UShortAccum);
1242 InitBuiltinType(R&: UnsignedAccumTy, K: BuiltinType::UAccum);
1243 InitBuiltinType(R&: UnsignedLongAccumTy, K: BuiltinType::ULongAccum);
1244 InitBuiltinType(R&: ShortFractTy, K: BuiltinType::ShortFract);
1245 InitBuiltinType(R&: FractTy, K: BuiltinType::Fract);
1246 InitBuiltinType(R&: LongFractTy, K: BuiltinType::LongFract);
1247 InitBuiltinType(R&: UnsignedShortFractTy, K: BuiltinType::UShortFract);
1248 InitBuiltinType(R&: UnsignedFractTy, K: BuiltinType::UFract);
1249 InitBuiltinType(R&: UnsignedLongFractTy, K: BuiltinType::ULongFract);
1250 InitBuiltinType(R&: SatShortAccumTy, K: BuiltinType::SatShortAccum);
1251 InitBuiltinType(R&: SatAccumTy, K: BuiltinType::SatAccum);
1252 InitBuiltinType(R&: SatLongAccumTy, K: BuiltinType::SatLongAccum);
1253 InitBuiltinType(R&: SatUnsignedShortAccumTy, K: BuiltinType::SatUShortAccum);
1254 InitBuiltinType(R&: SatUnsignedAccumTy, K: BuiltinType::SatUAccum);
1255 InitBuiltinType(R&: SatUnsignedLongAccumTy, K: BuiltinType::SatULongAccum);
1256 InitBuiltinType(R&: SatShortFractTy, K: BuiltinType::SatShortFract);
1257 InitBuiltinType(R&: SatFractTy, K: BuiltinType::SatFract);
1258 InitBuiltinType(R&: SatLongFractTy, K: BuiltinType::SatLongFract);
1259 InitBuiltinType(R&: SatUnsignedShortFractTy, K: BuiltinType::SatUShortFract);
1260 InitBuiltinType(R&: SatUnsignedFractTy, K: BuiltinType::SatUFract);
1261 InitBuiltinType(R&: SatUnsignedLongFractTy, K: BuiltinType::SatULongFract);
1262
1263 // GNU extension, 128-bit integers.
1264 InitBuiltinType(R&: Int128Ty, K: BuiltinType::Int128);
1265 InitBuiltinType(R&: UnsignedInt128Ty, K: BuiltinType::UInt128);
1266
1267 // C++ 3.9.1p5
1268 if (TargetInfo::isTypeSigned(T: Target.getWCharType()))
1269 InitBuiltinType(R&: WCharTy, K: BuiltinType::WChar_S);
1270 else // -fshort-wchar makes wchar_t be unsigned.
1271 InitBuiltinType(R&: WCharTy, K: BuiltinType::WChar_U);
1272 if (LangOpts.CPlusPlus && LangOpts.WChar)
1273 WideCharTy = WCharTy;
1274 else {
1275 // C99 (or C++ using -fno-wchar).
1276 WideCharTy = getFromTargetType(Type: Target.getWCharType());
1277 }
1278
1279 WIntTy = getFromTargetType(Type: Target.getWIntType());
1280
1281 // C++20 (proposed)
1282 InitBuiltinType(R&: Char8Ty, K: BuiltinType::Char8);
1283
1284 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1285 InitBuiltinType(R&: Char16Ty, K: BuiltinType::Char16);
1286 else // C99
1287 Char16Ty = getFromTargetType(Type: Target.getChar16Type());
1288
1289 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1290 InitBuiltinType(R&: Char32Ty, K: BuiltinType::Char32);
1291 else // C99
1292 Char32Ty = getFromTargetType(Type: Target.getChar32Type());
1293
1294 // Placeholder type for type-dependent expressions whose type is
1295 // completely unknown. No code should ever check a type against
1296 // DependentTy and users should never see it; however, it is here to
1297 // help diagnose failures to properly check for type-dependent
1298 // expressions.
1299 InitBuiltinType(R&: DependentTy, K: BuiltinType::Dependent);
1300
1301 // Placeholder type for functions.
1302 InitBuiltinType(R&: OverloadTy, K: BuiltinType::Overload);
1303
1304 // Placeholder type for bound members.
1305 InitBuiltinType(R&: BoundMemberTy, K: BuiltinType::BoundMember);
1306
1307 // Placeholder type for unresolved templates.
1308 InitBuiltinType(R&: UnresolvedTemplateTy, K: BuiltinType::UnresolvedTemplate);
1309
1310 // Placeholder type for pseudo-objects.
1311 InitBuiltinType(R&: PseudoObjectTy, K: BuiltinType::PseudoObject);
1312
1313 // "any" type; useful for debugger-like clients.
1314 InitBuiltinType(R&: UnknownAnyTy, K: BuiltinType::UnknownAny);
1315
1316 // Placeholder type for unbridged ARC casts.
1317 InitBuiltinType(R&: ARCUnbridgedCastTy, K: BuiltinType::ARCUnbridgedCast);
1318
1319 // Placeholder type for builtin functions.
1320 InitBuiltinType(R&: BuiltinFnTy, K: BuiltinType::BuiltinFn);
1321
1322 // Placeholder type for OMP array sections.
1323 if (LangOpts.OpenMP) {
1324 InitBuiltinType(R&: ArraySectionTy, K: BuiltinType::ArraySection);
1325 InitBuiltinType(R&: OMPArrayShapingTy, K: BuiltinType::OMPArrayShaping);
1326 InitBuiltinType(R&: OMPIteratorTy, K: BuiltinType::OMPIterator);
1327 }
1328 // Placeholder type for OpenACC array sections, if we are ALSO in OMP mode,
1329 // don't bother, as we're just using the same type as OMP.
1330 if (LangOpts.OpenACC && !LangOpts.OpenMP) {
1331 InitBuiltinType(R&: ArraySectionTy, K: BuiltinType::ArraySection);
1332 }
1333 if (LangOpts.MatrixTypes)
1334 InitBuiltinType(R&: IncompleteMatrixIdxTy, K: BuiltinType::IncompleteMatrixIdx);
1335
1336 // Builtin types for 'id', 'Class', and 'SEL'.
1337 InitBuiltinType(R&: ObjCBuiltinIdTy, K: BuiltinType::ObjCId);
1338 InitBuiltinType(R&: ObjCBuiltinClassTy, K: BuiltinType::ObjCClass);
1339 InitBuiltinType(R&: ObjCBuiltinSelTy, K: BuiltinType::ObjCSel);
1340
1341 if (LangOpts.OpenCL) {
1342#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
1343 InitBuiltinType(SingletonId, BuiltinType::Id);
1344#include "clang/Basic/OpenCLImageTypes.def"
1345
1346 InitBuiltinType(R&: OCLSamplerTy, K: BuiltinType::OCLSampler);
1347 InitBuiltinType(R&: OCLEventTy, K: BuiltinType::OCLEvent);
1348 InitBuiltinType(R&: OCLClkEventTy, K: BuiltinType::OCLClkEvent);
1349 InitBuiltinType(R&: OCLQueueTy, K: BuiltinType::OCLQueue);
1350 InitBuiltinType(R&: OCLReserveIDTy, K: BuiltinType::OCLReserveID);
1351
1352#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
1353 InitBuiltinType(Id##Ty, BuiltinType::Id);
1354#include "clang/Basic/OpenCLExtensionTypes.def"
1355 }
1356
1357 if (LangOpts.HLSL) {
1358#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) \
1359 InitBuiltinType(SingletonId, BuiltinType::Id);
1360#include "clang/Basic/HLSLIntangibleTypes.def"
1361 }
1362
1363 if (Target.hasAArch64ACLETypes() ||
1364 (AuxTarget && AuxTarget->hasAArch64ACLETypes())) {
1365#define SVE_TYPE(Name, Id, SingletonId) \
1366 InitBuiltinType(SingletonId, BuiltinType::Id);
1367#include "clang/Basic/AArch64ACLETypes.def"
1368 }
1369
1370 if (Target.getTriple().isPPC64()) {
1371#define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
1372 InitBuiltinType(Id##Ty, BuiltinType::Id);
1373#include "clang/Basic/PPCTypes.def"
1374#define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
1375 InitBuiltinType(Id##Ty, BuiltinType::Id);
1376#include "clang/Basic/PPCTypes.def"
1377 }
1378
1379 if (Target.hasRISCVVTypes()) {
1380#define RVV_TYPE(Name, Id, SingletonId) \
1381 InitBuiltinType(SingletonId, BuiltinType::Id);
1382#include "clang/Basic/RISCVVTypes.def"
1383 }
1384
1385 if (Target.getTriple().isWasm() && Target.hasFeature(Feature: "reference-types")) {
1386#define WASM_TYPE(Name, Id, SingletonId) \
1387 InitBuiltinType(SingletonId, BuiltinType::Id);
1388#include "clang/Basic/WebAssemblyReferenceTypes.def"
1389 }
1390
1391 if (Target.getTriple().isAMDGPU() ||
1392 (AuxTarget && AuxTarget->getTriple().isAMDGPU())) {
1393#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) \
1394 InitBuiltinType(SingletonId, BuiltinType::Id);
1395#include "clang/Basic/AMDGPUTypes.def"
1396 }
1397
1398 // Builtin type for __objc_yes and __objc_no
1399 ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ?
1400 SignedCharTy : BoolTy);
1401
1402 ObjCConstantStringType = QualType();
1403
1404 ObjCSuperType = QualType();
1405
1406 // void * type
1407 if (LangOpts.OpenCLGenericAddressSpace) {
1408 auto Q = VoidTy.getQualifiers();
1409 Q.setAddressSpace(LangAS::opencl_generic);
1410 VoidPtrTy = getPointerType(T: getCanonicalType(
1411 T: getQualifiedType(T: VoidTy.getUnqualifiedType(), Qs: Q)));
1412 } else {
1413 VoidPtrTy = getPointerType(T: VoidTy);
1414 }
1415
1416 // nullptr type (C++0x 2.14.7)
1417 InitBuiltinType(R&: NullPtrTy, K: BuiltinType::NullPtr);
1418
1419 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16
1420 InitBuiltinType(R&: HalfTy, K: BuiltinType::Half);
1421
1422 InitBuiltinType(R&: BFloat16Ty, K: BuiltinType::BFloat16);
1423
1424 // Builtin type used to help define __builtin_va_list.
1425 VaListTagDecl = nullptr;
1426
1427 // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls.
1428 if (LangOpts.MicrosoftExt || LangOpts.Borland) {
1429 MSGuidTagDecl = buildImplicitRecord(Name: "_GUID");
1430 getTranslationUnitDecl()->addDecl(D: MSGuidTagDecl);
1431 }
1432}
1433
1434DiagnosticsEngine &ASTContext::getDiagnostics() const {
1435 return SourceMgr.getDiagnostics();
1436}
1437
1438AttrVec& ASTContext::getDeclAttrs(const Decl *D) {
1439 AttrVec *&Result = DeclAttrs[D];
1440 if (!Result) {
1441 void *Mem = Allocate(Size: sizeof(AttrVec));
1442 Result = new (Mem) AttrVec;
1443 }
1444
1445 return *Result;
1446}
1447
1448/// Erase the attributes corresponding to the given declaration.
1449void ASTContext::eraseDeclAttrs(const Decl *D) {
1450 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(Val: D);
1451 if (Pos != DeclAttrs.end()) {
1452 Pos->second->~AttrVec();
1453 DeclAttrs.erase(I: Pos);
1454 }
1455}
1456
1457// FIXME: Remove ?
1458MemberSpecializationInfo *
1459ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) {
1460 assert(Var->isStaticDataMember() && "Not a static data member");
1461 return getTemplateOrSpecializationInfo(Var)
1462 .dyn_cast<MemberSpecializationInfo *>();
1463}
1464
1465ASTContext::TemplateOrSpecializationInfo
1466ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) {
1467 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos =
1468 TemplateOrInstantiation.find(Val: Var);
1469 if (Pos == TemplateOrInstantiation.end())
1470 return {};
1471
1472 return Pos->second;
1473}
1474
1475void
1476ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl,
1477 TemplateSpecializationKind TSK,
1478 SourceLocation PointOfInstantiation) {
1479 assert(Inst->isStaticDataMember() && "Not a static data member");
1480 assert(Tmpl->isStaticDataMember() && "Not a static data member");
1481 setTemplateOrSpecializationInfo(Inst, TSI: new (*this) MemberSpecializationInfo(
1482 Tmpl, TSK, PointOfInstantiation));
1483}
1484
1485void
1486ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst,
1487 TemplateOrSpecializationInfo TSI) {
1488 assert(!TemplateOrInstantiation[Inst] &&
1489 "Already noted what the variable was instantiated from");
1490 TemplateOrInstantiation[Inst] = TSI;
1491}
1492
1493NamedDecl *
1494ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) {
1495 return InstantiatedFromUsingDecl.lookup(Val: UUD);
1496}
1497
1498void
1499ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) {
1500 assert((isa<UsingDecl>(Pattern) ||
1501 isa<UnresolvedUsingValueDecl>(Pattern) ||
1502 isa<UnresolvedUsingTypenameDecl>(Pattern)) &&
1503 "pattern decl is not a using decl");
1504 assert((isa<UsingDecl>(Inst) ||
1505 isa<UnresolvedUsingValueDecl>(Inst) ||
1506 isa<UnresolvedUsingTypenameDecl>(Inst)) &&
1507 "instantiation did not produce a using decl");
1508 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists");
1509 InstantiatedFromUsingDecl[Inst] = Pattern;
1510}
1511
1512UsingEnumDecl *
1513ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) {
1514 return InstantiatedFromUsingEnumDecl.lookup(Val: UUD);
1515}
1516
1517void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst,
1518 UsingEnumDecl *Pattern) {
1519 assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists");
1520 InstantiatedFromUsingEnumDecl[Inst] = Pattern;
1521}
1522
1523UsingShadowDecl *
1524ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) {
1525 return InstantiatedFromUsingShadowDecl.lookup(Val: Inst);
1526}
1527
1528void
1529ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst,
1530 UsingShadowDecl *Pattern) {
1531 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists");
1532 InstantiatedFromUsingShadowDecl[Inst] = Pattern;
1533}
1534
1535FieldDecl *
1536ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) const {
1537 return InstantiatedFromUnnamedFieldDecl.lookup(Val: Field);
1538}
1539
1540void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst,
1541 FieldDecl *Tmpl) {
1542 assert((!Inst->getDeclName() || Inst->isPlaceholderVar(getLangOpts())) &&
1543 "Instantiated field decl is not unnamed");
1544 assert((!Inst->getDeclName() || Inst->isPlaceholderVar(getLangOpts())) &&
1545 "Template field decl is not unnamed");
1546 assert(!InstantiatedFromUnnamedFieldDecl[Inst] &&
1547 "Already noted what unnamed field was instantiated from");
1548
1549 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl;
1550}
1551
1552ASTContext::overridden_cxx_method_iterator
1553ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const {
1554 return overridden_methods(Method).begin();
1555}
1556
1557ASTContext::overridden_cxx_method_iterator
1558ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const {
1559 return overridden_methods(Method).end();
1560}
1561
1562unsigned
1563ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const {
1564 auto Range = overridden_methods(Method);
1565 return Range.end() - Range.begin();
1566}
1567
1568ASTContext::overridden_method_range
1569ASTContext::overridden_methods(const CXXMethodDecl *Method) const {
1570 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos =
1571 OverriddenMethods.find(Val: Method->getCanonicalDecl());
1572 if (Pos == OverriddenMethods.end())
1573 return overridden_method_range(nullptr, nullptr);
1574 return overridden_method_range(Pos->second.begin(), Pos->second.end());
1575}
1576
1577void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method,
1578 const CXXMethodDecl *Overridden) {
1579 assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl());
1580 OverriddenMethods[Method].push_back(NewVal: Overridden);
1581}
1582
1583void ASTContext::getOverriddenMethods(
1584 const NamedDecl *D,
1585 SmallVectorImpl<const NamedDecl *> &Overridden) const {
1586 assert(D);
1587
1588 if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(Val: D)) {
1589 Overridden.append(in_start: overridden_methods_begin(Method: CXXMethod),
1590 in_end: overridden_methods_end(Method: CXXMethod));
1591 return;
1592 }
1593
1594 const auto *Method = dyn_cast<ObjCMethodDecl>(Val: D);
1595 if (!Method)
1596 return;
1597
1598 SmallVector<const ObjCMethodDecl *, 8> OverDecls;
1599 Method->getOverriddenMethods(Overridden&: OverDecls);
1600 Overridden.append(in_start: OverDecls.begin(), in_end: OverDecls.end());
1601}
1602
1603std::optional<ASTContext::CXXRecordDeclRelocationInfo>
1604ASTContext::getRelocationInfoForCXXRecord(const CXXRecordDecl *RD) const {
1605 assert(RD);
1606 CXXRecordDecl *D = RD->getDefinition();
1607 auto it = RelocatableClasses.find(Val: D);
1608 if (it != RelocatableClasses.end())
1609 return it->getSecond();
1610 return std::nullopt;
1611}
1612
1613void ASTContext::setRelocationInfoForCXXRecord(
1614 const CXXRecordDecl *RD, CXXRecordDeclRelocationInfo Info) {
1615 assert(RD);
1616 CXXRecordDecl *D = RD->getDefinition();
1617 assert(RelocatableClasses.find(D) == RelocatableClasses.end());
1618 RelocatableClasses.insert(KV: {D, Info});
1619}
1620
1621static bool primaryBaseHaseAddressDiscriminatedVTableAuthentication(
1622 const ASTContext &Context, const CXXRecordDecl *Class) {
1623 if (!Class->isPolymorphic())
1624 return false;
1625 const CXXRecordDecl *BaseType = Context.baseForVTableAuthentication(ThisClass: Class);
1626 using AuthAttr = VTablePointerAuthenticationAttr;
1627 const AuthAttr *ExplicitAuth = BaseType->getAttr<AuthAttr>();
1628 if (!ExplicitAuth)
1629 return Context.getLangOpts().PointerAuthVTPtrAddressDiscrimination;
1630 AuthAttr::AddressDiscriminationMode AddressDiscrimination =
1631 ExplicitAuth->getAddressDiscrimination();
1632 if (AddressDiscrimination == AuthAttr::DefaultAddressDiscrimination)
1633 return Context.getLangOpts().PointerAuthVTPtrAddressDiscrimination;
1634 return AddressDiscrimination == AuthAttr::AddressDiscrimination;
1635}
1636
1637ASTContext::PointerAuthContent
1638ASTContext::findPointerAuthContent(QualType T) const {
1639 assert(isPointerAuthenticationAvailable());
1640
1641 T = T.getCanonicalType();
1642 if (T->isDependentType())
1643 return PointerAuthContent::None;
1644
1645 if (T.hasAddressDiscriminatedPointerAuth())
1646 return PointerAuthContent::AddressDiscriminatedData;
1647 const RecordDecl *RD = T->getAsRecordDecl();
1648 if (!RD)
1649 return PointerAuthContent::None;
1650
1651 if (RD->isInvalidDecl())
1652 return PointerAuthContent::None;
1653
1654 if (auto Existing = RecordContainsAddressDiscriminatedPointerAuth.find(Val: RD);
1655 Existing != RecordContainsAddressDiscriminatedPointerAuth.end())
1656 return Existing->second;
1657
1658 PointerAuthContent Result = PointerAuthContent::None;
1659
1660 auto SaveResultAndReturn = [&]() -> PointerAuthContent {
1661 auto [ResultIter, DidAdd] =
1662 RecordContainsAddressDiscriminatedPointerAuth.try_emplace(Key: RD, Args&: Result);
1663 (void)ResultIter;
1664 (void)DidAdd;
1665 assert(DidAdd);
1666 return Result;
1667 };
1668 auto ShouldContinueAfterUpdate = [&](PointerAuthContent NewResult) {
1669 static_assert(PointerAuthContent::None <
1670 PointerAuthContent::AddressDiscriminatedVTable);
1671 static_assert(PointerAuthContent::AddressDiscriminatedVTable <
1672 PointerAuthContent::AddressDiscriminatedData);
1673 if (NewResult > Result)
1674 Result = NewResult;
1675 return Result != PointerAuthContent::AddressDiscriminatedData;
1676 };
1677 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) {
1678 if (primaryBaseHaseAddressDiscriminatedVTableAuthentication(Context: *this, Class: CXXRD) &&
1679 !ShouldContinueAfterUpdate(
1680 PointerAuthContent::AddressDiscriminatedVTable))
1681 return SaveResultAndReturn();
1682 for (auto Base : CXXRD->bases()) {
1683 if (!ShouldContinueAfterUpdate(findPointerAuthContent(T: Base.getType())))
1684 return SaveResultAndReturn();
1685 }
1686 }
1687 for (auto *FieldDecl : RD->fields()) {
1688 if (!ShouldContinueAfterUpdate(
1689 findPointerAuthContent(T: FieldDecl->getType())))
1690 return SaveResultAndReturn();
1691 }
1692 return SaveResultAndReturn();
1693}
1694
1695void ASTContext::addedLocalImportDecl(ImportDecl *Import) {
1696 assert(!Import->getNextLocalImport() &&
1697 "Import declaration already in the chain");
1698 assert(!Import->isFromASTFile() && "Non-local import declaration");
1699 if (!FirstLocalImport) {
1700 FirstLocalImport = Import;
1701 LastLocalImport = Import;
1702 return;
1703 }
1704
1705 LastLocalImport->setNextLocalImport(Import);
1706 LastLocalImport = Import;
1707}
1708
1709//===----------------------------------------------------------------------===//
1710// Type Sizing and Analysis
1711//===----------------------------------------------------------------------===//
1712
1713/// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
1714/// scalar floating point type.
1715const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
1716 switch (T->castAs<BuiltinType>()->getKind()) {
1717 default:
1718 llvm_unreachable("Not a floating point type!");
1719 case BuiltinType::BFloat16:
1720 return Target->getBFloat16Format();
1721 case BuiltinType::Float16:
1722 return Target->getHalfFormat();
1723 case BuiltinType::Half:
1724 return Target->getHalfFormat();
1725 case BuiltinType::Float: return Target->getFloatFormat();
1726 case BuiltinType::Double: return Target->getDoubleFormat();
1727 case BuiltinType::Ibm128:
1728 return Target->getIbm128Format();
1729 case BuiltinType::LongDouble:
1730 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice)
1731 return AuxTarget->getLongDoubleFormat();
1732 return Target->getLongDoubleFormat();
1733 case BuiltinType::Float128:
1734 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice)
1735 return AuxTarget->getFloat128Format();
1736 return Target->getFloat128Format();
1737 }
1738}
1739
1740CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
1741 unsigned Align = Target->getCharWidth();
1742
1743 const unsigned AlignFromAttr = D->getMaxAlignment();
1744 if (AlignFromAttr)
1745 Align = AlignFromAttr;
1746
1747 // __attribute__((aligned)) can increase or decrease alignment
1748 // *except* on a struct or struct member, where it only increases
1749 // alignment unless 'packed' is also specified.
1750 //
1751 // It is an error for alignas to decrease alignment, so we can
1752 // ignore that possibility; Sema should diagnose it.
1753 bool UseAlignAttrOnly;
1754 if (const FieldDecl *FD = dyn_cast<FieldDecl>(Val: D))
1755 UseAlignAttrOnly =
1756 FD->hasAttr<PackedAttr>() || FD->getParent()->hasAttr<PackedAttr>();
1757 else
1758 UseAlignAttrOnly = AlignFromAttr != 0;
1759 // If we're using the align attribute only, just ignore everything
1760 // else about the declaration and its type.
1761 if (UseAlignAttrOnly) {
1762 // do nothing
1763 } else if (const auto *VD = dyn_cast<ValueDecl>(Val: D)) {
1764 QualType T = VD->getType();
1765 if (const auto *RT = T->getAs<ReferenceType>()) {
1766 if (ForAlignof)
1767 T = RT->getPointeeType();
1768 else
1769 T = getPointerType(T: RT->getPointeeType());
1770 }
1771 QualType BaseT = getBaseElementType(QT: T);
1772 if (T->isFunctionType())
1773 Align = getTypeInfoImpl(T: T.getTypePtr()).Align;
1774 else if (!BaseT->isIncompleteType()) {
1775 // Adjust alignments of declarations with array type by the
1776 // large-array alignment on the target.
1777 if (const ArrayType *arrayType = getAsArrayType(T)) {
1778 unsigned MinWidth = Target->getLargeArrayMinWidth();
1779 if (!ForAlignof && MinWidth) {
1780 if (isa<VariableArrayType>(Val: arrayType))
1781 Align = std::max(a: Align, b: Target->getLargeArrayAlign());
1782 else if (isa<ConstantArrayType>(Val: arrayType) &&
1783 MinWidth <= getTypeSize(T: cast<ConstantArrayType>(Val: arrayType)))
1784 Align = std::max(a: Align, b: Target->getLargeArrayAlign());
1785 }
1786 }
1787 Align = std::max(a: Align, b: getPreferredTypeAlign(T: T.getTypePtr()));
1788 if (BaseT.getQualifiers().hasUnaligned())
1789 Align = Target->getCharWidth();
1790 }
1791
1792 // Ensure minimum alignment for global variables.
1793 if (const auto *VD = dyn_cast<VarDecl>(Val: D))
1794 if (VD->hasGlobalStorage() && !ForAlignof) {
1795 uint64_t TypeSize =
1796 !BaseT->isIncompleteType() ? getTypeSize(T: T.getTypePtr()) : 0;
1797 Align = std::max(a: Align, b: getMinGlobalAlignOfVar(Size: TypeSize, VD));
1798 }
1799
1800 // Fields can be subject to extra alignment constraints, like if
1801 // the field is packed, the struct is packed, or the struct has a
1802 // a max-field-alignment constraint (#pragma pack). So calculate
1803 // the actual alignment of the field within the struct, and then
1804 // (as we're expected to) constrain that by the alignment of the type.
1805 if (const auto *Field = dyn_cast<FieldDecl>(Val: VD)) {
1806 const RecordDecl *Parent = Field->getParent();
1807 // We can only produce a sensible answer if the record is valid.
1808 if (!Parent->isInvalidDecl()) {
1809 const ASTRecordLayout &Layout = getASTRecordLayout(D: Parent);
1810
1811 // Start with the record's overall alignment.
1812 unsigned FieldAlign = toBits(CharSize: Layout.getAlignment());
1813
1814 // Use the GCD of that and the offset within the record.
1815 uint64_t Offset = Layout.getFieldOffset(FieldNo: Field->getFieldIndex());
1816 if (Offset > 0) {
1817 // Alignment is always a power of 2, so the GCD will be a power of 2,
1818 // which means we get to do this crazy thing instead of Euclid's.
1819 uint64_t LowBitOfOffset = Offset & (~Offset + 1);
1820 if (LowBitOfOffset < FieldAlign)
1821 FieldAlign = static_cast<unsigned>(LowBitOfOffset);
1822 }
1823
1824 Align = std::min(a: Align, b: FieldAlign);
1825 }
1826 }
1827 }
1828
1829 // Some targets have hard limitation on the maximum requestable alignment in
1830 // aligned attribute for static variables.
1831 const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute();
1832 const auto *VD = dyn_cast<VarDecl>(Val: D);
1833 if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static)
1834 Align = std::min(a: Align, b: MaxAlignedAttr);
1835
1836 return toCharUnitsFromBits(BitSize: Align);
1837}
1838
1839CharUnits ASTContext::getExnObjectAlignment() const {
1840 return toCharUnitsFromBits(BitSize: Target->getExnObjectAlignment());
1841}
1842
1843// getTypeInfoDataSizeInChars - Return the size of a type, in
1844// chars. If the type is a record, its data size is returned. This is
1845// the size of the memcpy that's performed when assigning this type
1846// using a trivial copy/move assignment operator.
1847TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const {
1848 TypeInfoChars Info = getTypeInfoInChars(T);
1849
1850 // In C++, objects can sometimes be allocated into the tail padding
1851 // of a base-class subobject. We decide whether that's possible
1852 // during class layout, so here we can just trust the layout results.
1853 if (getLangOpts().CPlusPlus) {
1854 if (const auto *RD = T->getAsCXXRecordDecl(); RD && !RD->isInvalidDecl()) {
1855 const ASTRecordLayout &layout = getASTRecordLayout(D: RD);
1856 Info.Width = layout.getDataSize();
1857 }
1858 }
1859
1860 return Info;
1861}
1862
1863/// getConstantArrayInfoInChars - Performing the computation in CharUnits
1864/// instead of in bits prevents overflowing the uint64_t for some large arrays.
1865TypeInfoChars
1866static getConstantArrayInfoInChars(const ASTContext &Context,
1867 const ConstantArrayType *CAT) {
1868 TypeInfoChars EltInfo = Context.getTypeInfoInChars(T: CAT->getElementType());
1869 uint64_t Size = CAT->getZExtSize();
1870 assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <=
1871 (uint64_t)(-1)/Size) &&
1872 "Overflow in array type char size evaluation");
1873 uint64_t Width = EltInfo.Width.getQuantity() * Size;
1874 unsigned Align = EltInfo.Align.getQuantity();
1875 if (!Context.getTargetInfo().getCXXABI().isMicrosoft() ||
1876 Context.getTargetInfo().getPointerWidth(AddrSpace: LangAS::Default) == 64)
1877 Width = llvm::alignTo(Value: Width, Align);
1878 return TypeInfoChars(CharUnits::fromQuantity(Quantity: Width),
1879 CharUnits::fromQuantity(Quantity: Align),
1880 EltInfo.AlignRequirement);
1881}
1882
1883TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const {
1884 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: T))
1885 return getConstantArrayInfoInChars(Context: *this, CAT);
1886 TypeInfo Info = getTypeInfo(T);
1887 return TypeInfoChars(toCharUnitsFromBits(BitSize: Info.Width),
1888 toCharUnitsFromBits(BitSize: Info.Align), Info.AlignRequirement);
1889}
1890
1891TypeInfoChars ASTContext::getTypeInfoInChars(QualType T) const {
1892 return getTypeInfoInChars(T: T.getTypePtr());
1893}
1894
1895bool ASTContext::isPromotableIntegerType(QualType T) const {
1896 // HLSL doesn't promote all small integer types to int, it
1897 // just uses the rank-based promotion rules for all types.
1898 if (getLangOpts().HLSL)
1899 return false;
1900
1901 if (const auto *BT = T->getAs<BuiltinType>())
1902 switch (BT->getKind()) {
1903 case BuiltinType::Bool:
1904 case BuiltinType::Char_S:
1905 case BuiltinType::Char_U:
1906 case BuiltinType::SChar:
1907 case BuiltinType::UChar:
1908 case BuiltinType::Short:
1909 case BuiltinType::UShort:
1910 case BuiltinType::WChar_S:
1911 case BuiltinType::WChar_U:
1912 case BuiltinType::Char8:
1913 case BuiltinType::Char16:
1914 case BuiltinType::Char32:
1915 return true;
1916 default:
1917 return false;
1918 }
1919
1920 // Enumerated types are promotable to their compatible integer types
1921 // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2).
1922 if (const auto *ED = T->getAsEnumDecl()) {
1923 if (T->isDependentType() || ED->getPromotionType().isNull() ||
1924 ED->isScoped())
1925 return false;
1926
1927 return true;
1928 }
1929
1930 return false;
1931}
1932
1933bool ASTContext::isAlignmentRequired(const Type *T) const {
1934 return getTypeInfo(T).AlignRequirement != AlignRequirementKind::None;
1935}
1936
1937bool ASTContext::isAlignmentRequired(QualType T) const {
1938 return isAlignmentRequired(T: T.getTypePtr());
1939}
1940
1941unsigned ASTContext::getTypeAlignIfKnown(QualType T,
1942 bool NeedsPreferredAlignment) const {
1943 // An alignment on a typedef overrides anything else.
1944 if (const auto *TT = T->getAs<TypedefType>())
1945 if (unsigned Align = TT->getDecl()->getMaxAlignment())
1946 return Align;
1947
1948 // If we have an (array of) complete type, we're done.
1949 T = getBaseElementType(QT: T);
1950 if (!T->isIncompleteType())
1951 return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T);
1952
1953 // If we had an array type, its element type might be a typedef
1954 // type with an alignment attribute.
1955 if (const auto *TT = T->getAs<TypedefType>())
1956 if (unsigned Align = TT->getDecl()->getMaxAlignment())
1957 return Align;
1958
1959 // Otherwise, see if the declaration of the type had an attribute.
1960 if (const auto *TD = T->getAsTagDecl())
1961 return TD->getMaxAlignment();
1962
1963 return 0;
1964}
1965
1966TypeInfo ASTContext::getTypeInfo(const Type *T) const {
1967 TypeInfoMap::iterator I = MemoizedTypeInfo.find(Val: T);
1968 if (I != MemoizedTypeInfo.end())
1969 return I->second;
1970
1971 // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup.
1972 TypeInfo TI = getTypeInfoImpl(T);
1973 MemoizedTypeInfo[T] = TI;
1974 return TI;
1975}
1976
1977/// getTypeInfoImpl - Return the size of the specified type, in bits. This
1978/// method does not work on incomplete types.
1979///
1980/// FIXME: Pointers into different addr spaces could have different sizes and
1981/// alignment requirements: getPointerInfo should take an AddrSpace, this
1982/// should take a QualType, &c.
1983TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
1984 uint64_t Width = 0;
1985 unsigned Align = 8;
1986 AlignRequirementKind AlignRequirement = AlignRequirementKind::None;
1987 LangAS AS = LangAS::Default;
1988 switch (T->getTypeClass()) {
1989#define TYPE(Class, Base)
1990#define ABSTRACT_TYPE(Class, Base)
1991#define NON_CANONICAL_TYPE(Class, Base)
1992#define DEPENDENT_TYPE(Class, Base) case Type::Class:
1993#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \
1994 case Type::Class: \
1995 assert(!T->isDependentType() && "should not see dependent types here"); \
1996 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr());
1997#include "clang/AST/TypeNodes.inc"
1998 llvm_unreachable("Should not see dependent types");
1999
2000 case Type::FunctionNoProto:
2001 case Type::FunctionProto:
2002 // GCC extension: alignof(function) = 32 bits
2003 Width = 0;
2004 Align = 32;
2005 break;
2006
2007 case Type::IncompleteArray:
2008 case Type::VariableArray:
2009 case Type::ConstantArray:
2010 case Type::ArrayParameter: {
2011 // Model non-constant sized arrays as size zero, but track the alignment.
2012 uint64_t Size = 0;
2013 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: T))
2014 Size = CAT->getZExtSize();
2015
2016 TypeInfo EltInfo = getTypeInfo(T: cast<ArrayType>(Val: T)->getElementType());
2017 assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&
2018 "Overflow in array type bit size evaluation");
2019 Width = EltInfo.Width * Size;
2020 Align = EltInfo.Align;
2021 AlignRequirement = EltInfo.AlignRequirement;
2022 if (!getTargetInfo().getCXXABI().isMicrosoft() ||
2023 getTargetInfo().getPointerWidth(AddrSpace: LangAS::Default) == 64)
2024 Width = llvm::alignTo(Value: Width, Align);
2025 break;
2026 }
2027
2028 case Type::ExtVector:
2029 case Type::Vector: {
2030 const auto *VT = cast<VectorType>(Val: T);
2031 TypeInfo EltInfo = getTypeInfo(T: VT->getElementType());
2032 Width = VT->isPackedVectorBoolType(ctx: *this)
2033 ? VT->getNumElements()
2034 : EltInfo.Width * VT->getNumElements();
2035 // Enforce at least byte size and alignment.
2036 Width = std::max<unsigned>(a: 8, b: Width);
2037 Align = std::max<unsigned>(a: 8, b: Width);
2038
2039 // If the alignment is not a power of 2, round up to the next power of 2.
2040 // This happens for non-power-of-2 length vectors.
2041 if (Align & (Align-1)) {
2042 Align = llvm::bit_ceil(Value: Align);
2043 Width = llvm::alignTo(Value: Width, Align);
2044 }
2045 // Adjust the alignment based on the target max.
2046 uint64_t TargetVectorAlign = Target->getMaxVectorAlign();
2047 if (TargetVectorAlign && TargetVectorAlign < Align)
2048 Align = TargetVectorAlign;
2049 if (VT->getVectorKind() == VectorKind::SveFixedLengthData)
2050 // Adjust the alignment for fixed-length SVE vectors. This is important
2051 // for non-power-of-2 vector lengths.
2052 Align = 128;
2053 else if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
2054 // Adjust the alignment for fixed-length SVE predicates.
2055 Align = 16;
2056 else if (VT->getVectorKind() == VectorKind::RVVFixedLengthData ||
2057 VT->getVectorKind() == VectorKind::RVVFixedLengthMask ||
2058 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_1 ||
2059 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_2 ||
2060 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_4)
2061 // Adjust the alignment for fixed-length RVV vectors.
2062 Align = std::min<unsigned>(a: 64, b: Width);
2063 break;
2064 }
2065
2066 case Type::ConstantMatrix: {
2067 const auto *MT = cast<ConstantMatrixType>(Val: T);
2068 TypeInfo ElementInfo = getTypeInfo(T: MT->getElementType());
2069 // The internal layout of a matrix value is implementation defined.
2070 // Initially be ABI compatible with arrays with respect to alignment and
2071 // size.
2072 Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns();
2073 Align = ElementInfo.Align;
2074 break;
2075 }
2076
2077 case Type::Builtin:
2078 switch (cast<BuiltinType>(Val: T)->getKind()) {
2079 default: llvm_unreachable("Unknown builtin type!");
2080 case BuiltinType::Void:
2081 // GCC extension: alignof(void) = 8 bits.
2082 Width = 0;
2083 Align = 8;
2084 break;
2085 case BuiltinType::Bool:
2086 Width = Target->getBoolWidth();
2087 Align = Target->getBoolAlign();
2088 break;
2089 case BuiltinType::Char_S:
2090 case BuiltinType::Char_U:
2091 case BuiltinType::UChar:
2092 case BuiltinType::SChar:
2093 case BuiltinType::Char8:
2094 Width = Target->getCharWidth();
2095 Align = Target->getCharAlign();
2096 break;
2097 case BuiltinType::WChar_S:
2098 case BuiltinType::WChar_U:
2099 Width = Target->getWCharWidth();
2100 Align = Target->getWCharAlign();
2101 break;
2102 case BuiltinType::Char16:
2103 Width = Target->getChar16Width();
2104 Align = Target->getChar16Align();
2105 break;
2106 case BuiltinType::Char32:
2107 Width = Target->getChar32Width();
2108 Align = Target->getChar32Align();
2109 break;
2110 case BuiltinType::UShort:
2111 case BuiltinType::Short:
2112 Width = Target->getShortWidth();
2113 Align = Target->getShortAlign();
2114 break;
2115 case BuiltinType::UInt:
2116 case BuiltinType::Int:
2117 Width = Target->getIntWidth();
2118 Align = Target->getIntAlign();
2119 break;
2120 case BuiltinType::ULong:
2121 case BuiltinType::Long:
2122 Width = Target->getLongWidth();
2123 Align = Target->getLongAlign();
2124 break;
2125 case BuiltinType::ULongLong:
2126 case BuiltinType::LongLong:
2127 Width = Target->getLongLongWidth();
2128 Align = Target->getLongLongAlign();
2129 break;
2130 case BuiltinType::Int128:
2131 case BuiltinType::UInt128:
2132 Width = 128;
2133 Align = Target->getInt128Align();
2134 break;
2135 case BuiltinType::ShortAccum:
2136 case BuiltinType::UShortAccum:
2137 case BuiltinType::SatShortAccum:
2138 case BuiltinType::SatUShortAccum:
2139 Width = Target->getShortAccumWidth();
2140 Align = Target->getShortAccumAlign();
2141 break;
2142 case BuiltinType::Accum:
2143 case BuiltinType::UAccum:
2144 case BuiltinType::SatAccum:
2145 case BuiltinType::SatUAccum:
2146 Width = Target->getAccumWidth();
2147 Align = Target->getAccumAlign();
2148 break;
2149 case BuiltinType::LongAccum:
2150 case BuiltinType::ULongAccum:
2151 case BuiltinType::SatLongAccum:
2152 case BuiltinType::SatULongAccum:
2153 Width = Target->getLongAccumWidth();
2154 Align = Target->getLongAccumAlign();
2155 break;
2156 case BuiltinType::ShortFract:
2157 case BuiltinType::UShortFract:
2158 case BuiltinType::SatShortFract:
2159 case BuiltinType::SatUShortFract:
2160 Width = Target->getShortFractWidth();
2161 Align = Target->getShortFractAlign();
2162 break;
2163 case BuiltinType::Fract:
2164 case BuiltinType::UFract:
2165 case BuiltinType::SatFract:
2166 case BuiltinType::SatUFract:
2167 Width = Target->getFractWidth();
2168 Align = Target->getFractAlign();
2169 break;
2170 case BuiltinType::LongFract:
2171 case BuiltinType::ULongFract:
2172 case BuiltinType::SatLongFract:
2173 case BuiltinType::SatULongFract:
2174 Width = Target->getLongFractWidth();
2175 Align = Target->getLongFractAlign();
2176 break;
2177 case BuiltinType::BFloat16:
2178 if (Target->hasBFloat16Type()) {
2179 Width = Target->getBFloat16Width();
2180 Align = Target->getBFloat16Align();
2181 } else if ((getLangOpts().SYCLIsDevice ||
2182 (getLangOpts().OpenMP &&
2183 getLangOpts().OpenMPIsTargetDevice)) &&
2184 AuxTarget->hasBFloat16Type()) {
2185 Width = AuxTarget->getBFloat16Width();
2186 Align = AuxTarget->getBFloat16Align();
2187 }
2188 break;
2189 case BuiltinType::Float16:
2190 case BuiltinType::Half:
2191 if (Target->hasFloat16Type() || !getLangOpts().OpenMP ||
2192 !getLangOpts().OpenMPIsTargetDevice) {
2193 Width = Target->getHalfWidth();
2194 Align = Target->getHalfAlign();
2195 } else {
2196 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2197 "Expected OpenMP device compilation.");
2198 Width = AuxTarget->getHalfWidth();
2199 Align = AuxTarget->getHalfAlign();
2200 }
2201 break;
2202 case BuiltinType::Float:
2203 Width = Target->getFloatWidth();
2204 Align = Target->getFloatAlign();
2205 break;
2206 case BuiltinType::Double:
2207 Width = Target->getDoubleWidth();
2208 Align = Target->getDoubleAlign();
2209 break;
2210 case BuiltinType::Ibm128:
2211 Width = Target->getIbm128Width();
2212 Align = Target->getIbm128Align();
2213 break;
2214 case BuiltinType::LongDouble:
2215 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2216 (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() ||
2217 Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) {
2218 Width = AuxTarget->getLongDoubleWidth();
2219 Align = AuxTarget->getLongDoubleAlign();
2220 } else {
2221 Width = Target->getLongDoubleWidth();
2222 Align = Target->getLongDoubleAlign();
2223 }
2224 break;
2225 case BuiltinType::Float128:
2226 if (Target->hasFloat128Type() || !getLangOpts().OpenMP ||
2227 !getLangOpts().OpenMPIsTargetDevice) {
2228 Width = Target->getFloat128Width();
2229 Align = Target->getFloat128Align();
2230 } else {
2231 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2232 "Expected OpenMP device compilation.");
2233 Width = AuxTarget->getFloat128Width();
2234 Align = AuxTarget->getFloat128Align();
2235 }
2236 break;
2237 case BuiltinType::NullPtr:
2238 // C++ 3.9.1p11: sizeof(nullptr_t) == sizeof(void*)
2239 Width = Target->getPointerWidth(AddrSpace: LangAS::Default);
2240 Align = Target->getPointerAlign(AddrSpace: LangAS::Default);
2241 break;
2242 case BuiltinType::ObjCId:
2243 case BuiltinType::ObjCClass:
2244 case BuiltinType::ObjCSel:
2245 Width = Target->getPointerWidth(AddrSpace: LangAS::Default);
2246 Align = Target->getPointerAlign(AddrSpace: LangAS::Default);
2247 break;
2248 case BuiltinType::OCLSampler:
2249 case BuiltinType::OCLEvent:
2250 case BuiltinType::OCLClkEvent:
2251 case BuiltinType::OCLQueue:
2252 case BuiltinType::OCLReserveID:
2253#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
2254 case BuiltinType::Id:
2255#include "clang/Basic/OpenCLImageTypes.def"
2256#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
2257 case BuiltinType::Id:
2258#include "clang/Basic/OpenCLExtensionTypes.def"
2259 AS = Target->getOpenCLTypeAddrSpace(TK: getOpenCLTypeKind(T));
2260 Width = Target->getPointerWidth(AddrSpace: AS);
2261 Align = Target->getPointerAlign(AddrSpace: AS);
2262 break;
2263 // The SVE types are effectively target-specific. The length of an
2264 // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple
2265 // of 128 bits. There is one predicate bit for each vector byte, so the
2266 // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits.
2267 //
2268 // Because the length is only known at runtime, we use a dummy value
2269 // of 0 for the static length. The alignment values are those defined
2270 // by the Procedure Call Standard for the Arm Architecture.
2271#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId) \
2272 case BuiltinType::Id: \
2273 Width = 0; \
2274 Align = 128; \
2275 break;
2276#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId) \
2277 case BuiltinType::Id: \
2278 Width = 0; \
2279 Align = 16; \
2280 break;
2281#define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingletonId) \
2282 case BuiltinType::Id: \
2283 Width = 0; \
2284 Align = 16; \
2285 break;
2286#define SVE_SCALAR_TYPE(Name, MangledName, Id, SingletonId, Bits) \
2287 case BuiltinType::Id: \
2288 Width = Bits; \
2289 Align = Bits; \
2290 break;
2291#include "clang/Basic/AArch64ACLETypes.def"
2292#define PPC_VECTOR_TYPE(Name, Id, Size) \
2293 case BuiltinType::Id: \
2294 Width = Size; \
2295 Align = Size; \
2296 break;
2297#include "clang/Basic/PPCTypes.def"
2298#define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \
2299 IsFP, IsBF) \
2300 case BuiltinType::Id: \
2301 Width = 0; \
2302 Align = ElBits; \
2303 break;
2304#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \
2305 case BuiltinType::Id: \
2306 Width = 0; \
2307 Align = 8; \
2308 break;
2309#include "clang/Basic/RISCVVTypes.def"
2310#define WASM_TYPE(Name, Id, SingletonId) \
2311 case BuiltinType::Id: \
2312 Width = 0; \
2313 Align = 8; \
2314 break;
2315#include "clang/Basic/WebAssemblyReferenceTypes.def"
2316#define AMDGPU_TYPE(NAME, ID, SINGLETONID, WIDTH, ALIGN) \
2317 case BuiltinType::ID: \
2318 Width = WIDTH; \
2319 Align = ALIGN; \
2320 break;
2321#include "clang/Basic/AMDGPUTypes.def"
2322#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
2323#include "clang/Basic/HLSLIntangibleTypes.def"
2324 Width = Target->getPointerWidth(AddrSpace: LangAS::Default);
2325 Align = Target->getPointerAlign(AddrSpace: LangAS::Default);
2326 break;
2327 }
2328 break;
2329 case Type::ObjCObjectPointer:
2330 Width = Target->getPointerWidth(AddrSpace: LangAS::Default);
2331 Align = Target->getPointerAlign(AddrSpace: LangAS::Default);
2332 break;
2333 case Type::BlockPointer:
2334 AS = cast<BlockPointerType>(Val: T)->getPointeeType().getAddressSpace();
2335 Width = Target->getPointerWidth(AddrSpace: AS);
2336 Align = Target->getPointerAlign(AddrSpace: AS);
2337 break;
2338 case Type::LValueReference:
2339 case Type::RValueReference:
2340 // alignof and sizeof should never enter this code path here, so we go
2341 // the pointer route.
2342 AS = cast<ReferenceType>(Val: T)->getPointeeType().getAddressSpace();
2343 Width = Target->getPointerWidth(AddrSpace: AS);
2344 Align = Target->getPointerAlign(AddrSpace: AS);
2345 break;
2346 case Type::Pointer:
2347 AS = cast<PointerType>(Val: T)->getPointeeType().getAddressSpace();
2348 Width = Target->getPointerWidth(AddrSpace: AS);
2349 Align = Target->getPointerAlign(AddrSpace: AS);
2350 break;
2351 case Type::MemberPointer: {
2352 const auto *MPT = cast<MemberPointerType>(Val: T);
2353 CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT);
2354 Width = MPI.Width;
2355 Align = MPI.Align;
2356 break;
2357 }
2358 case Type::Complex: {
2359 // Complex types have the same alignment as their elements, but twice the
2360 // size.
2361 TypeInfo EltInfo = getTypeInfo(T: cast<ComplexType>(Val: T)->getElementType());
2362 Width = EltInfo.Width * 2;
2363 Align = EltInfo.Align;
2364 break;
2365 }
2366 case Type::ObjCObject:
2367 return getTypeInfo(T: cast<ObjCObjectType>(Val: T)->getBaseType().getTypePtr());
2368 case Type::Adjusted:
2369 case Type::Decayed:
2370 return getTypeInfo(T: cast<AdjustedType>(Val: T)->getAdjustedType().getTypePtr());
2371 case Type::ObjCInterface: {
2372 const auto *ObjCI = cast<ObjCInterfaceType>(Val: T);
2373 if (ObjCI->getDecl()->isInvalidDecl()) {
2374 Width = 8;
2375 Align = 8;
2376 break;
2377 }
2378 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(D: ObjCI->getDecl());
2379 Width = toBits(CharSize: Layout.getSize());
2380 Align = toBits(CharSize: Layout.getAlignment());
2381 break;
2382 }
2383 case Type::BitInt: {
2384 const auto *EIT = cast<BitIntType>(Val: T);
2385 Align = Target->getBitIntAlign(NumBits: EIT->getNumBits());
2386 Width = Target->getBitIntWidth(NumBits: EIT->getNumBits());
2387 break;
2388 }
2389 case Type::Record:
2390 case Type::Enum: {
2391 const auto *TT = cast<TagType>(Val: T);
2392 const TagDecl *TD = TT->getDecl()->getDefinitionOrSelf();
2393
2394 if (TD->isInvalidDecl()) {
2395 Width = 8;
2396 Align = 8;
2397 break;
2398 }
2399
2400 if (isa<EnumType>(Val: TT)) {
2401 const EnumDecl *ED = cast<EnumDecl>(Val: TD);
2402 TypeInfo Info =
2403 getTypeInfo(T: ED->getIntegerType()->getUnqualifiedDesugaredType());
2404 if (unsigned AttrAlign = ED->getMaxAlignment()) {
2405 Info.Align = AttrAlign;
2406 Info.AlignRequirement = AlignRequirementKind::RequiredByEnum;
2407 }
2408 return Info;
2409 }
2410
2411 const auto *RD = cast<RecordDecl>(Val: TD);
2412 const ASTRecordLayout &Layout = getASTRecordLayout(D: RD);
2413 Width = toBits(CharSize: Layout.getSize());
2414 Align = toBits(CharSize: Layout.getAlignment());
2415 AlignRequirement = RD->hasAttr<AlignedAttr>()
2416 ? AlignRequirementKind::RequiredByRecord
2417 : AlignRequirementKind::None;
2418 break;
2419 }
2420
2421 case Type::SubstTemplateTypeParm:
2422 return getTypeInfo(T: cast<SubstTemplateTypeParmType>(Val: T)->
2423 getReplacementType().getTypePtr());
2424
2425 case Type::Auto:
2426 case Type::DeducedTemplateSpecialization: {
2427 const auto *A = cast<DeducedType>(Val: T);
2428 assert(!A->getDeducedType().isNull() &&
2429 "cannot request the size of an undeduced or dependent auto type");
2430 return getTypeInfo(T: A->getDeducedType().getTypePtr());
2431 }
2432
2433 case Type::Paren:
2434 return getTypeInfo(T: cast<ParenType>(Val: T)->getInnerType().getTypePtr());
2435
2436 case Type::MacroQualified:
2437 return getTypeInfo(
2438 T: cast<MacroQualifiedType>(Val: T)->getUnderlyingType().getTypePtr());
2439
2440 case Type::ObjCTypeParam:
2441 return getTypeInfo(T: cast<ObjCTypeParamType>(Val: T)->desugar().getTypePtr());
2442
2443 case Type::Using:
2444 return getTypeInfo(T: cast<UsingType>(Val: T)->desugar().getTypePtr());
2445
2446 case Type::Typedef: {
2447 const auto *TT = cast<TypedefType>(Val: T);
2448 TypeInfo Info = getTypeInfo(T: TT->desugar().getTypePtr());
2449 // If the typedef has an aligned attribute on it, it overrides any computed
2450 // alignment we have. This violates the GCC documentation (which says that
2451 // attribute(aligned) can only round up) but matches its implementation.
2452 if (unsigned AttrAlign = TT->getDecl()->getMaxAlignment()) {
2453 Align = AttrAlign;
2454 AlignRequirement = AlignRequirementKind::RequiredByTypedef;
2455 } else {
2456 Align = Info.Align;
2457 AlignRequirement = Info.AlignRequirement;
2458 }
2459 Width = Info.Width;
2460 break;
2461 }
2462
2463 case Type::Attributed:
2464 return getTypeInfo(
2465 T: cast<AttributedType>(Val: T)->getEquivalentType().getTypePtr());
2466
2467 case Type::CountAttributed:
2468 return getTypeInfo(T: cast<CountAttributedType>(Val: T)->desugar().getTypePtr());
2469
2470 case Type::BTFTagAttributed:
2471 return getTypeInfo(
2472 T: cast<BTFTagAttributedType>(Val: T)->getWrappedType().getTypePtr());
2473
2474 case Type::HLSLAttributedResource:
2475 return getTypeInfo(
2476 T: cast<HLSLAttributedResourceType>(Val: T)->getWrappedType().getTypePtr());
2477
2478 case Type::HLSLInlineSpirv: {
2479 const auto *ST = cast<HLSLInlineSpirvType>(Val: T);
2480 // Size is specified in bytes, convert to bits
2481 Width = ST->getSize() * 8;
2482 Align = ST->getAlignment();
2483 if (Width == 0 && Align == 0) {
2484 // We are defaulting to laying out opaque SPIR-V types as 32-bit ints.
2485 Width = 32;
2486 Align = 32;
2487 }
2488 break;
2489 }
2490
2491 case Type::Atomic: {
2492 // Start with the base type information.
2493 TypeInfo Info = getTypeInfo(T: cast<AtomicType>(Val: T)->getValueType());
2494 Width = Info.Width;
2495 Align = Info.Align;
2496
2497 if (!Width) {
2498 // An otherwise zero-sized type should still generate an
2499 // atomic operation.
2500 Width = Target->getCharWidth();
2501 assert(Align);
2502 } else if (Width <= Target->getMaxAtomicPromoteWidth()) {
2503 // If the size of the type doesn't exceed the platform's max
2504 // atomic promotion width, make the size and alignment more
2505 // favorable to atomic operations:
2506
2507 // Round the size up to a power of 2.
2508 Width = llvm::bit_ceil(Value: Width);
2509
2510 // Set the alignment equal to the size.
2511 Align = static_cast<unsigned>(Width);
2512 }
2513 }
2514 break;
2515
2516 case Type::PredefinedSugar:
2517 return getTypeInfo(T: cast<PredefinedSugarType>(Val: T)->desugar().getTypePtr());
2518
2519 case Type::Pipe:
2520 Width = Target->getPointerWidth(AddrSpace: LangAS::opencl_global);
2521 Align = Target->getPointerAlign(AddrSpace: LangAS::opencl_global);
2522 break;
2523 }
2524
2525 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2");
2526 return TypeInfo(Width, Align, AlignRequirement);
2527}
2528
2529unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const {
2530 UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(Val: T);
2531 if (I != MemoizedUnadjustedAlign.end())
2532 return I->second;
2533
2534 unsigned UnadjustedAlign;
2535 if (const auto *RT = T->getAsCanonical<RecordType>()) {
2536 const ASTRecordLayout &Layout = getASTRecordLayout(D: RT->getDecl());
2537 UnadjustedAlign = toBits(CharSize: Layout.getUnadjustedAlignment());
2538 } else if (const auto *ObjCI = T->getAsCanonical<ObjCInterfaceType>()) {
2539 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(D: ObjCI->getDecl());
2540 UnadjustedAlign = toBits(CharSize: Layout.getUnadjustedAlignment());
2541 } else {
2542 UnadjustedAlign = getTypeAlign(T: T->getUnqualifiedDesugaredType());
2543 }
2544
2545 MemoizedUnadjustedAlign[T] = UnadjustedAlign;
2546 return UnadjustedAlign;
2547}
2548
2549unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const {
2550 unsigned SimdAlign = llvm::OpenMPIRBuilder::getOpenMPDefaultSimdAlign(
2551 TargetTriple: getTargetInfo().getTriple(), Features: Target->getTargetOpts().FeatureMap);
2552 return SimdAlign;
2553}
2554
2555/// toCharUnitsFromBits - Convert a size in bits to a size in characters.
2556CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const {
2557 return CharUnits::fromQuantity(Quantity: BitSize / getCharWidth());
2558}
2559
2560/// toBits - Convert a size in characters to a size in characters.
2561int64_t ASTContext::toBits(CharUnits CharSize) const {
2562 return CharSize.getQuantity() * getCharWidth();
2563}
2564
2565/// getTypeSizeInChars - Return the size of the specified type, in characters.
2566/// This method does not work on incomplete types.
2567CharUnits ASTContext::getTypeSizeInChars(QualType T) const {
2568 return getTypeInfoInChars(T).Width;
2569}
2570CharUnits ASTContext::getTypeSizeInChars(const Type *T) const {
2571 return getTypeInfoInChars(T).Width;
2572}
2573
2574/// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
2575/// characters. This method does not work on incomplete types.
2576CharUnits ASTContext::getTypeAlignInChars(QualType T) const {
2577 return toCharUnitsFromBits(BitSize: getTypeAlign(T));
2578}
2579CharUnits ASTContext::getTypeAlignInChars(const Type *T) const {
2580 return toCharUnitsFromBits(BitSize: getTypeAlign(T));
2581}
2582
2583/// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a
2584/// type, in characters, before alignment adjustments. This method does
2585/// not work on incomplete types.
2586CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const {
2587 return toCharUnitsFromBits(BitSize: getTypeUnadjustedAlign(T));
2588}
2589CharUnits ASTContext::getTypeUnadjustedAlignInChars(const Type *T) const {
2590 return toCharUnitsFromBits(BitSize: getTypeUnadjustedAlign(T));
2591}
2592
2593/// getPreferredTypeAlign - Return the "preferred" alignment of the specified
2594/// type for the current target in bits. This can be different than the ABI
2595/// alignment in cases where it is beneficial for performance or backwards
2596/// compatibility preserving to overalign a data type. (Note: despite the name,
2597/// the preferred alignment is ABI-impacting, and not an optimization.)
2598unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
2599 TypeInfo TI = getTypeInfo(T);
2600 unsigned ABIAlign = TI.Align;
2601
2602 T = T->getBaseElementTypeUnsafe();
2603
2604 // The preferred alignment of member pointers is that of a pointer.
2605 if (T->isMemberPointerType())
2606 return getPreferredTypeAlign(T: getPointerDiffType().getTypePtr());
2607
2608 if (!Target->allowsLargerPreferedTypeAlignment())
2609 return ABIAlign;
2610
2611 if (const auto *RD = T->getAsRecordDecl()) {
2612 // When used as part of a typedef, or together with a 'packed' attribute,
2613 // the 'aligned' attribute can be used to decrease alignment. Note that the
2614 // 'packed' case is already taken into consideration when computing the
2615 // alignment, we only need to handle the typedef case here.
2616 if (TI.AlignRequirement == AlignRequirementKind::RequiredByTypedef ||
2617 RD->isInvalidDecl())
2618 return ABIAlign;
2619
2620 unsigned PreferredAlign = static_cast<unsigned>(
2621 toBits(CharSize: getASTRecordLayout(D: RD).PreferredAlignment));
2622 assert(PreferredAlign >= ABIAlign &&
2623 "PreferredAlign should be at least as large as ABIAlign.");
2624 return PreferredAlign;
2625 }
2626
2627 // Double (and, for targets supporting AIX `power` alignment, long double) and
2628 // long long should be naturally aligned (despite requiring less alignment) if
2629 // possible.
2630 if (const auto *CT = T->getAs<ComplexType>())
2631 T = CT->getElementType().getTypePtr();
2632 if (const auto *ED = T->getAsEnumDecl())
2633 T = ED->getIntegerType().getTypePtr();
2634 if (T->isSpecificBuiltinType(K: BuiltinType::Double) ||
2635 T->isSpecificBuiltinType(K: BuiltinType::LongLong) ||
2636 T->isSpecificBuiltinType(K: BuiltinType::ULongLong) ||
2637 (T->isSpecificBuiltinType(K: BuiltinType::LongDouble) &&
2638 Target->defaultsToAIXPowerAlignment()))
2639 // Don't increase the alignment if an alignment attribute was specified on a
2640 // typedef declaration.
2641 if (!TI.isAlignRequired())
2642 return std::max(a: ABIAlign, b: (unsigned)getTypeSize(T));
2643
2644 return ABIAlign;
2645}
2646
2647/// getTargetDefaultAlignForAttributeAligned - Return the default alignment
2648/// for __attribute__((aligned)) on this target, to be used if no alignment
2649/// value is specified.
2650unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const {
2651 return getTargetInfo().getDefaultAlignForAttributeAligned();
2652}
2653
2654/// getAlignOfGlobalVar - Return the alignment in bits that should be given
2655/// to a global variable of the specified type.
2656unsigned ASTContext::getAlignOfGlobalVar(QualType T, const VarDecl *VD) const {
2657 uint64_t TypeSize = getTypeSize(T: T.getTypePtr());
2658 return std::max(a: getPreferredTypeAlign(T),
2659 b: getMinGlobalAlignOfVar(Size: TypeSize, VD));
2660}
2661
2662/// getAlignOfGlobalVarInChars - Return the alignment in characters that
2663/// should be given to a global variable of the specified type.
2664CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T,
2665 const VarDecl *VD) const {
2666 return toCharUnitsFromBits(BitSize: getAlignOfGlobalVar(T, VD));
2667}
2668
2669unsigned ASTContext::getMinGlobalAlignOfVar(uint64_t Size,
2670 const VarDecl *VD) const {
2671 // Make the default handling as that of a non-weak definition in the
2672 // current translation unit.
2673 bool HasNonWeakDef = !VD || (VD->hasDefinition() && !VD->isWeak());
2674 return getTargetInfo().getMinGlobalAlign(Size, HasNonWeakDef);
2675}
2676
2677CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const {
2678 CharUnits Offset = CharUnits::Zero();
2679 const ASTRecordLayout *Layout = &getASTRecordLayout(D: RD);
2680 while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) {
2681 Offset += Layout->getBaseClassOffset(Base);
2682 Layout = &getASTRecordLayout(D: Base);
2683 }
2684 return Offset;
2685}
2686
2687CharUnits ASTContext::getMemberPointerPathAdjustment(const APValue &MP) const {
2688 const ValueDecl *MPD = MP.getMemberPointerDecl();
2689 CharUnits ThisAdjustment = CharUnits::Zero();
2690 ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath();
2691 bool DerivedMember = MP.isMemberPointerToDerivedMember();
2692 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Val: MPD->getDeclContext());
2693 for (unsigned I = 0, N = Path.size(); I != N; ++I) {
2694 const CXXRecordDecl *Base = RD;
2695 const CXXRecordDecl *Derived = Path[I];
2696 if (DerivedMember)
2697 std::swap(a&: Base, b&: Derived);
2698 ThisAdjustment += getASTRecordLayout(D: Derived).getBaseClassOffset(Base);
2699 RD = Path[I];
2700 }
2701 if (DerivedMember)
2702 ThisAdjustment = -ThisAdjustment;
2703 return ThisAdjustment;
2704}
2705
2706/// DeepCollectObjCIvars -
2707/// This routine first collects all declared, but not synthesized, ivars in
2708/// super class and then collects all ivars, including those synthesized for
2709/// current class. This routine is used for implementation of current class
2710/// when all ivars, declared and synthesized are known.
2711void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI,
2712 bool leafClass,
2713 SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const {
2714 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
2715 DeepCollectObjCIvars(OI: SuperClass, leafClass: false, Ivars);
2716 if (!leafClass) {
2717 llvm::append_range(C&: Ivars, R: OI->ivars());
2718 } else {
2719 auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
2720 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
2721 Iv= Iv->getNextIvar())
2722 Ivars.push_back(Elt: Iv);
2723 }
2724}
2725
2726/// CollectInheritedProtocols - Collect all protocols in current class and
2727/// those inherited by it.
2728void ASTContext::CollectInheritedProtocols(const Decl *CDecl,
2729 llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) {
2730 if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(Val: CDecl)) {
2731 // We can use protocol_iterator here instead of
2732 // all_referenced_protocol_iterator since we are walking all categories.
2733 for (auto *Proto : OI->all_referenced_protocols()) {
2734 CollectInheritedProtocols(CDecl: Proto, Protocols);
2735 }
2736
2737 // Categories of this Interface.
2738 for (const auto *Cat : OI->visible_categories())
2739 CollectInheritedProtocols(CDecl: Cat, Protocols);
2740
2741 if (ObjCInterfaceDecl *SD = OI->getSuperClass())
2742 while (SD) {
2743 CollectInheritedProtocols(CDecl: SD, Protocols);
2744 SD = SD->getSuperClass();
2745 }
2746 } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(Val: CDecl)) {
2747 for (auto *Proto : OC->protocols()) {
2748 CollectInheritedProtocols(CDecl: Proto, Protocols);
2749 }
2750 } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(Val: CDecl)) {
2751 // Insert the protocol.
2752 if (!Protocols.insert(
2753 Ptr: const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second)
2754 return;
2755
2756 for (auto *Proto : OP->protocols())
2757 CollectInheritedProtocols(CDecl: Proto, Protocols);
2758 }
2759}
2760
2761static bool unionHasUniqueObjectRepresentations(const ASTContext &Context,
2762 const RecordDecl *RD,
2763 bool CheckIfTriviallyCopyable) {
2764 assert(RD->isUnion() && "Must be union type");
2765 CharUnits UnionSize =
2766 Context.getTypeSizeInChars(T: Context.getCanonicalTagType(TD: RD));
2767
2768 for (const auto *Field : RD->fields()) {
2769 if (!Context.hasUniqueObjectRepresentations(Ty: Field->getType(),
2770 CheckIfTriviallyCopyable))
2771 return false;
2772 CharUnits FieldSize = Context.getTypeSizeInChars(T: Field->getType());
2773 if (FieldSize != UnionSize)
2774 return false;
2775 }
2776 return !RD->field_empty();
2777}
2778
2779static int64_t getSubobjectOffset(const FieldDecl *Field,
2780 const ASTContext &Context,
2781 const clang::ASTRecordLayout & /*Layout*/) {
2782 return Context.getFieldOffset(FD: Field);
2783}
2784
2785static int64_t getSubobjectOffset(const CXXRecordDecl *RD,
2786 const ASTContext &Context,
2787 const clang::ASTRecordLayout &Layout) {
2788 return Context.toBits(CharSize: Layout.getBaseClassOffset(Base: RD));
2789}
2790
2791static std::optional<int64_t>
2792structHasUniqueObjectRepresentations(const ASTContext &Context,
2793 const RecordDecl *RD,
2794 bool CheckIfTriviallyCopyable);
2795
2796static std::optional<int64_t>
2797getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context,
2798 bool CheckIfTriviallyCopyable) {
2799 if (const auto *RD = Field->getType()->getAsRecordDecl();
2800 RD && !RD->isUnion())
2801 return structHasUniqueObjectRepresentations(Context, RD,
2802 CheckIfTriviallyCopyable);
2803
2804 // A _BitInt type may not be unique if it has padding bits
2805 // but if it is a bitfield the padding bits are not used.
2806 bool IsBitIntType = Field->getType()->isBitIntType();
2807 if (!Field->getType()->isReferenceType() && !IsBitIntType &&
2808 !Context.hasUniqueObjectRepresentations(Ty: Field->getType(),
2809 CheckIfTriviallyCopyable))
2810 return std::nullopt;
2811
2812 int64_t FieldSizeInBits =
2813 Context.toBits(CharSize: Context.getTypeSizeInChars(T: Field->getType()));
2814 if (Field->isBitField()) {
2815 // If we have explicit padding bits, they don't contribute bits
2816 // to the actual object representation, so return 0.
2817 if (Field->isUnnamedBitField())
2818 return 0;
2819
2820 int64_t BitfieldSize = Field->getBitWidthValue();
2821 if (IsBitIntType) {
2822 if ((unsigned)BitfieldSize >
2823 cast<BitIntType>(Val: Field->getType())->getNumBits())
2824 return std::nullopt;
2825 } else if (BitfieldSize > FieldSizeInBits) {
2826 return std::nullopt;
2827 }
2828 FieldSizeInBits = BitfieldSize;
2829 } else if (IsBitIntType && !Context.hasUniqueObjectRepresentations(
2830 Ty: Field->getType(), CheckIfTriviallyCopyable)) {
2831 return std::nullopt;
2832 }
2833 return FieldSizeInBits;
2834}
2835
2836static std::optional<int64_t>
2837getSubobjectSizeInBits(const CXXRecordDecl *RD, const ASTContext &Context,
2838 bool CheckIfTriviallyCopyable) {
2839 return structHasUniqueObjectRepresentations(Context, RD,
2840 CheckIfTriviallyCopyable);
2841}
2842
2843template <typename RangeT>
2844static std::optional<int64_t> structSubobjectsHaveUniqueObjectRepresentations(
2845 const RangeT &Subobjects, int64_t CurOffsetInBits,
2846 const ASTContext &Context, const clang::ASTRecordLayout &Layout,
2847 bool CheckIfTriviallyCopyable) {
2848 for (const auto *Subobject : Subobjects) {
2849 std::optional<int64_t> SizeInBits =
2850 getSubobjectSizeInBits(Subobject, Context, CheckIfTriviallyCopyable);
2851 if (!SizeInBits)
2852 return std::nullopt;
2853 if (*SizeInBits != 0) {
2854 int64_t Offset = getSubobjectOffset(Subobject, Context, Layout);
2855 if (Offset != CurOffsetInBits)
2856 return std::nullopt;
2857 CurOffsetInBits += *SizeInBits;
2858 }
2859 }
2860 return CurOffsetInBits;
2861}
2862
2863static std::optional<int64_t>
2864structHasUniqueObjectRepresentations(const ASTContext &Context,
2865 const RecordDecl *RD,
2866 bool CheckIfTriviallyCopyable) {
2867 assert(!RD->isUnion() && "Must be struct/class type");
2868 const auto &Layout = Context.getASTRecordLayout(D: RD);
2869
2870 int64_t CurOffsetInBits = 0;
2871 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(Val: RD)) {
2872 if (ClassDecl->isDynamicClass())
2873 return std::nullopt;
2874
2875 SmallVector<CXXRecordDecl *, 4> Bases;
2876 for (const auto &Base : ClassDecl->bases()) {
2877 // Empty types can be inherited from, and non-empty types can potentially
2878 // have tail padding, so just make sure there isn't an error.
2879 Bases.emplace_back(Args: Base.getType()->getAsCXXRecordDecl());
2880 }
2881
2882 llvm::sort(C&: Bases, Comp: [&](const CXXRecordDecl *L, const CXXRecordDecl *R) {
2883 return Layout.getBaseClassOffset(Base: L) < Layout.getBaseClassOffset(Base: R);
2884 });
2885
2886 std::optional<int64_t> OffsetAfterBases =
2887 structSubobjectsHaveUniqueObjectRepresentations(
2888 Subobjects: Bases, CurOffsetInBits, Context, Layout, CheckIfTriviallyCopyable);
2889 if (!OffsetAfterBases)
2890 return std::nullopt;
2891 CurOffsetInBits = *OffsetAfterBases;
2892 }
2893
2894 std::optional<int64_t> OffsetAfterFields =
2895 structSubobjectsHaveUniqueObjectRepresentations(
2896 Subobjects: RD->fields(), CurOffsetInBits, Context, Layout,
2897 CheckIfTriviallyCopyable);
2898 if (!OffsetAfterFields)
2899 return std::nullopt;
2900 CurOffsetInBits = *OffsetAfterFields;
2901
2902 return CurOffsetInBits;
2903}
2904
2905bool ASTContext::hasUniqueObjectRepresentations(
2906 QualType Ty, bool CheckIfTriviallyCopyable) const {
2907 // C++17 [meta.unary.prop]:
2908 // The predicate condition for a template specialization
2909 // has_unique_object_representations<T> shall be satisfied if and only if:
2910 // (9.1) - T is trivially copyable, and
2911 // (9.2) - any two objects of type T with the same value have the same
2912 // object representation, where:
2913 // - two objects of array or non-union class type are considered to have
2914 // the same value if their respective sequences of direct subobjects
2915 // have the same values, and
2916 // - two objects of union type are considered to have the same value if
2917 // they have the same active member and the corresponding members have
2918 // the same value.
2919 // The set of scalar types for which this condition holds is
2920 // implementation-defined. [ Note: If a type has padding bits, the condition
2921 // does not hold; otherwise, the condition holds true for unsigned integral
2922 // types. -- end note ]
2923 assert(!Ty.isNull() && "Null QualType sent to unique object rep check");
2924
2925 // Arrays are unique only if their element type is unique.
2926 if (Ty->isArrayType())
2927 return hasUniqueObjectRepresentations(Ty: getBaseElementType(QT: Ty),
2928 CheckIfTriviallyCopyable);
2929
2930 assert((Ty->isVoidType() || !Ty->isIncompleteType()) &&
2931 "hasUniqueObjectRepresentations should not be called with an "
2932 "incomplete type");
2933
2934 // (9.1) - T is trivially copyable...
2935 if (CheckIfTriviallyCopyable && !Ty.isTriviallyCopyableType(Context: *this))
2936 return false;
2937
2938 // All integrals and enums are unique.
2939 if (Ty->isIntegralOrEnumerationType()) {
2940 // Address discriminated integer types are not unique.
2941 if (Ty.hasAddressDiscriminatedPointerAuth())
2942 return false;
2943 // Except _BitInt types that have padding bits.
2944 if (const auto *BIT = Ty->getAs<BitIntType>())
2945 return getTypeSize(T: BIT) == BIT->getNumBits();
2946
2947 return true;
2948 }
2949
2950 // All other pointers are unique.
2951 if (Ty->isPointerType())
2952 return !Ty.hasAddressDiscriminatedPointerAuth();
2953
2954 if (const auto *MPT = Ty->getAs<MemberPointerType>())
2955 return !ABI->getMemberPointerInfo(MPT).HasPadding;
2956
2957 if (const auto *Record = Ty->getAsRecordDecl()) {
2958 if (Record->isInvalidDecl())
2959 return false;
2960
2961 if (Record->isUnion())
2962 return unionHasUniqueObjectRepresentations(Context: *this, RD: Record,
2963 CheckIfTriviallyCopyable);
2964
2965 std::optional<int64_t> StructSize = structHasUniqueObjectRepresentations(
2966 Context: *this, RD: Record, CheckIfTriviallyCopyable);
2967
2968 return StructSize && *StructSize == static_cast<int64_t>(getTypeSize(T: Ty));
2969 }
2970
2971 // FIXME: More cases to handle here (list by rsmith):
2972 // vectors (careful about, eg, vector of 3 foo)
2973 // _Complex int and friends
2974 // _Atomic T
2975 // Obj-C block pointers
2976 // Obj-C object pointers
2977 // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t,
2978 // clk_event_t, queue_t, reserve_id_t)
2979 // There're also Obj-C class types and the Obj-C selector type, but I think it
2980 // makes sense for those to return false here.
2981
2982 return false;
2983}
2984
2985unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const {
2986 unsigned count = 0;
2987 // Count ivars declared in class extension.
2988 for (const auto *Ext : OI->known_extensions())
2989 count += Ext->ivar_size();
2990
2991 // Count ivar defined in this class's implementation. This
2992 // includes synthesized ivars.
2993 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation())
2994 count += ImplDecl->ivar_size();
2995
2996 return count;
2997}
2998
2999bool ASTContext::isSentinelNullExpr(const Expr *E) {
3000 if (!E)
3001 return false;
3002
3003 // nullptr_t is always treated as null.
3004 if (E->getType()->isNullPtrType()) return true;
3005
3006 if (E->getType()->isAnyPointerType() &&
3007 E->IgnoreParenCasts()->isNullPointerConstant(Ctx&: *this,
3008 NPC: Expr::NPC_ValueDependentIsNull))
3009 return true;
3010
3011 // Unfortunately, __null has type 'int'.
3012 if (isa<GNUNullExpr>(Val: E)) return true;
3013
3014 return false;
3015}
3016
3017/// Get the implementation of ObjCInterfaceDecl, or nullptr if none
3018/// exists.
3019ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) {
3020 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
3021 I = ObjCImpls.find(Val: D);
3022 if (I != ObjCImpls.end())
3023 return cast<ObjCImplementationDecl>(Val: I->second);
3024 return nullptr;
3025}
3026
3027/// Get the implementation of ObjCCategoryDecl, or nullptr if none
3028/// exists.
3029ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) {
3030 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
3031 I = ObjCImpls.find(Val: D);
3032 if (I != ObjCImpls.end())
3033 return cast<ObjCCategoryImplDecl>(Val: I->second);
3034 return nullptr;
3035}
3036
3037/// Set the implementation of ObjCInterfaceDecl.
3038void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD,
3039 ObjCImplementationDecl *ImplD) {
3040 assert(IFaceD && ImplD && "Passed null params");
3041 ObjCImpls[IFaceD] = ImplD;
3042}
3043
3044/// Set the implementation of ObjCCategoryDecl.
3045void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD,
3046 ObjCCategoryImplDecl *ImplD) {
3047 assert(CatD && ImplD && "Passed null params");
3048 ObjCImpls[CatD] = ImplD;
3049}
3050
3051const ObjCMethodDecl *
3052ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const {
3053 return ObjCMethodRedecls.lookup(Val: MD);
3054}
3055
3056void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD,
3057 const ObjCMethodDecl *Redecl) {
3058 assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration");
3059 ObjCMethodRedecls[MD] = Redecl;
3060}
3061
3062const ObjCInterfaceDecl *ASTContext::getObjContainingInterface(
3063 const NamedDecl *ND) const {
3064 if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(Val: ND->getDeclContext()))
3065 return ID;
3066 if (const auto *CD = dyn_cast<ObjCCategoryDecl>(Val: ND->getDeclContext()))
3067 return CD->getClassInterface();
3068 if (const auto *IMD = dyn_cast<ObjCImplDecl>(Val: ND->getDeclContext()))
3069 return IMD->getClassInterface();
3070
3071 return nullptr;
3072}
3073
3074/// Get the copy initialization expression of VarDecl, or nullptr if
3075/// none exists.
3076BlockVarCopyInit ASTContext::getBlockVarCopyInit(const VarDecl *VD) const {
3077 assert(VD && "Passed null params");
3078 assert(VD->hasAttr<BlocksAttr>() &&
3079 "getBlockVarCopyInits - not __block var");
3080 auto I = BlockVarCopyInits.find(Val: VD);
3081 if (I != BlockVarCopyInits.end())
3082 return I->second;
3083 return {nullptr, false};
3084}
3085
3086/// Set the copy initialization expression of a block var decl.
3087void ASTContext::setBlockVarCopyInit(const VarDecl*VD, Expr *CopyExpr,
3088 bool CanThrow) {
3089 assert(VD && CopyExpr && "Passed null params");
3090 assert(VD->hasAttr<BlocksAttr>() &&
3091 "setBlockVarCopyInits - not __block var");
3092 BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow);
3093}
3094
3095TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T,
3096 unsigned DataSize) const {
3097 if (!DataSize)
3098 DataSize = TypeLoc::getFullDataSizeForType(Ty: T);
3099 else
3100 assert(DataSize == TypeLoc::getFullDataSizeForType(T) &&
3101 "incorrect data size provided to CreateTypeSourceInfo!");
3102
3103 auto *TInfo =
3104 (TypeSourceInfo*)BumpAlloc.Allocate(Size: sizeof(TypeSourceInfo) + DataSize, Alignment: 8);
3105 new (TInfo) TypeSourceInfo(T, DataSize);
3106 return TInfo;
3107}
3108
3109TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T,
3110 SourceLocation L) const {
3111 TypeSourceInfo *TSI = CreateTypeSourceInfo(T);
3112 TSI->getTypeLoc().initialize(Context&: const_cast<ASTContext &>(*this), Loc: L);
3113 return TSI;
3114}
3115
3116const ASTRecordLayout &
3117ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const {
3118 return getObjCLayout(D);
3119}
3120
3121static auto getCanonicalTemplateArguments(const ASTContext &C,
3122 ArrayRef<TemplateArgument> Args,
3123 bool &AnyNonCanonArgs) {
3124 SmallVector<TemplateArgument, 16> CanonArgs(Args);
3125 AnyNonCanonArgs |= C.canonicalizeTemplateArguments(Args: CanonArgs);
3126 return CanonArgs;
3127}
3128
3129bool ASTContext::canonicalizeTemplateArguments(
3130 MutableArrayRef<TemplateArgument> Args) const {
3131 bool AnyNonCanonArgs = false;
3132 for (auto &Arg : Args) {
3133 TemplateArgument OrigArg = Arg;
3134 Arg = getCanonicalTemplateArgument(Arg);
3135 AnyNonCanonArgs |= !Arg.structurallyEquals(Other: OrigArg);
3136 }
3137 return AnyNonCanonArgs;
3138}
3139
3140//===----------------------------------------------------------------------===//
3141// Type creation/memoization methods
3142//===----------------------------------------------------------------------===//
3143
3144QualType
3145ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const {
3146 unsigned fastQuals = quals.getFastQualifiers();
3147 quals.removeFastQualifiers();
3148
3149 // Check if we've already instantiated this type.
3150 llvm::FoldingSetNodeID ID;
3151 ExtQuals::Profile(ID, BaseType: baseType, Quals: quals);
3152 void *insertPos = nullptr;
3153 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, InsertPos&: insertPos)) {
3154 assert(eq->getQualifiers() == quals);
3155 return QualType(eq, fastQuals);
3156 }
3157
3158 // If the base type is not canonical, make the appropriate canonical type.
3159 QualType canon;
3160 if (!baseType->isCanonicalUnqualified()) {
3161 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split();
3162 canonSplit.Quals.addConsistentQualifiers(qs: quals);
3163 canon = getExtQualType(baseType: canonSplit.Ty, quals: canonSplit.Quals);
3164
3165 // Re-find the insert position.
3166 (void) ExtQualNodes.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
3167 }
3168
3169 auto *eq = new (*this, alignof(ExtQuals)) ExtQuals(baseType, canon, quals);
3170 ExtQualNodes.InsertNode(N: eq, InsertPos: insertPos);
3171 return QualType(eq, fastQuals);
3172}
3173
3174QualType ASTContext::getAddrSpaceQualType(QualType T,
3175 LangAS AddressSpace) const {
3176 QualType CanT = getCanonicalType(T);
3177 if (CanT.getAddressSpace() == AddressSpace)
3178 return T;
3179
3180 // If we are composing extended qualifiers together, merge together
3181 // into one ExtQuals node.
3182 QualifierCollector Quals;
3183 const Type *TypeNode = Quals.strip(type: T);
3184
3185 // If this type already has an address space specified, it cannot get
3186 // another one.
3187 assert(!Quals.hasAddressSpace() &&
3188 "Type cannot be in multiple addr spaces!");
3189 Quals.addAddressSpace(space: AddressSpace);
3190
3191 return getExtQualType(baseType: TypeNode, quals: Quals);
3192}
3193
3194QualType ASTContext::removeAddrSpaceQualType(QualType T) const {
3195 // If the type is not qualified with an address space, just return it
3196 // immediately.
3197 if (!T.hasAddressSpace())
3198 return T;
3199
3200 QualifierCollector Quals;
3201 const Type *TypeNode;
3202 // For arrays, strip the qualifier off the element type, then reconstruct the
3203 // array type
3204 if (T.getTypePtr()->isArrayType()) {
3205 T = getUnqualifiedArrayType(T, Quals);
3206 TypeNode = T.getTypePtr();
3207 } else {
3208 // If we are composing extended qualifiers together, merge together
3209 // into one ExtQuals node.
3210 while (T.hasAddressSpace()) {
3211 TypeNode = Quals.strip(type: T);
3212
3213 // If the type no longer has an address space after stripping qualifiers,
3214 // jump out.
3215 if (!QualType(TypeNode, 0).hasAddressSpace())
3216 break;
3217
3218 // There might be sugar in the way. Strip it and try again.
3219 T = T.getSingleStepDesugaredType(Context: *this);
3220 }
3221 }
3222
3223 Quals.removeAddressSpace();
3224
3225 // Removal of the address space can mean there are no longer any
3226 // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts)
3227 // or required.
3228 if (Quals.hasNonFastQualifiers())
3229 return getExtQualType(baseType: TypeNode, quals: Quals);
3230 else
3231 return QualType(TypeNode, Quals.getFastQualifiers());
3232}
3233
3234uint16_t
3235ASTContext::getPointerAuthVTablePointerDiscriminator(const CXXRecordDecl *RD) {
3236 assert(RD->isPolymorphic() &&
3237 "Attempted to get vtable pointer discriminator on a monomorphic type");
3238 std::unique_ptr<MangleContext> MC(createMangleContext());
3239 SmallString<256> Str;
3240 llvm::raw_svector_ostream Out(Str);
3241 MC->mangleCXXVTable(RD, Out);
3242 return llvm::getPointerAuthStableSipHash(S: Str);
3243}
3244
3245/// Encode a function type for use in the discriminator of a function pointer
3246/// type. We can't use the itanium scheme for this since C has quite permissive
3247/// rules for type compatibility that we need to be compatible with.
3248///
3249/// Formally, this function associates every function pointer type T with an
3250/// encoded string E(T). Let the equivalence relation T1 ~ T2 be defined as
3251/// E(T1) == E(T2). E(T) is part of the ABI of values of type T. C type
3252/// compatibility requires equivalent treatment under the ABI, so
3253/// CCompatible(T1, T2) must imply E(T1) == E(T2), that is, CCompatible must be
3254/// a subset of ~. Crucially, however, it must be a proper subset because
3255/// CCompatible is not an equivalence relation: for example, int[] is compatible
3256/// with both int[1] and int[2], but the latter are not compatible with each
3257/// other. Therefore this encoding function must be careful to only distinguish
3258/// types if there is no third type with which they are both required to be
3259/// compatible.
3260static void encodeTypeForFunctionPointerAuth(const ASTContext &Ctx,
3261 raw_ostream &OS, QualType QT) {
3262 // FIXME: Consider address space qualifiers.
3263 const Type *T = QT.getCanonicalType().getTypePtr();
3264
3265 // FIXME: Consider using the C++ type mangling when we encounter a construct
3266 // that is incompatible with C.
3267
3268 switch (T->getTypeClass()) {
3269 case Type::Atomic:
3270 return encodeTypeForFunctionPointerAuth(
3271 Ctx, OS, QT: cast<AtomicType>(Val: T)->getValueType());
3272
3273 case Type::LValueReference:
3274 OS << "R";
3275 encodeTypeForFunctionPointerAuth(Ctx, OS,
3276 QT: cast<ReferenceType>(Val: T)->getPointeeType());
3277 return;
3278 case Type::RValueReference:
3279 OS << "O";
3280 encodeTypeForFunctionPointerAuth(Ctx, OS,
3281 QT: cast<ReferenceType>(Val: T)->getPointeeType());
3282 return;
3283
3284 case Type::Pointer:
3285 // C11 6.7.6.1p2:
3286 // For two pointer types to be compatible, both shall be identically
3287 // qualified and both shall be pointers to compatible types.
3288 // FIXME: we should also consider pointee types.
3289 OS << "P";
3290 return;
3291
3292 case Type::ObjCObjectPointer:
3293 case Type::BlockPointer:
3294 OS << "P";
3295 return;
3296
3297 case Type::Complex:
3298 OS << "C";
3299 return encodeTypeForFunctionPointerAuth(
3300 Ctx, OS, QT: cast<ComplexType>(Val: T)->getElementType());
3301
3302 case Type::VariableArray:
3303 case Type::ConstantArray:
3304 case Type::IncompleteArray:
3305 case Type::ArrayParameter:
3306 // C11 6.7.6.2p6:
3307 // For two array types to be compatible, both shall have compatible
3308 // element types, and if both size specifiers are present, and are integer
3309 // constant expressions, then both size specifiers shall have the same
3310 // constant value [...]
3311 //
3312 // So since ElemType[N] has to be compatible ElemType[], we can't encode the
3313 // width of the array.
3314 OS << "A";
3315 return encodeTypeForFunctionPointerAuth(
3316 Ctx, OS, QT: cast<ArrayType>(Val: T)->getElementType());
3317
3318 case Type::ObjCInterface:
3319 case Type::ObjCObject:
3320 OS << "<objc_object>";
3321 return;
3322
3323 case Type::Enum: {
3324 // C11 6.7.2.2p4:
3325 // Each enumerated type shall be compatible with char, a signed integer
3326 // type, or an unsigned integer type.
3327 //
3328 // So we have to treat enum types as integers.
3329 QualType UnderlyingType = T->castAsEnumDecl()->getIntegerType();
3330 return encodeTypeForFunctionPointerAuth(
3331 Ctx, OS, QT: UnderlyingType.isNull() ? Ctx.IntTy : UnderlyingType);
3332 }
3333
3334 case Type::FunctionNoProto:
3335 case Type::FunctionProto: {
3336 // C11 6.7.6.3p15:
3337 // For two function types to be compatible, both shall specify compatible
3338 // return types. Moreover, the parameter type lists, if both are present,
3339 // shall agree in the number of parameters and in the use of the ellipsis
3340 // terminator; corresponding parameters shall have compatible types.
3341 //
3342 // That paragraph goes on to describe how unprototyped functions are to be
3343 // handled, which we ignore here. Unprototyped function pointers are hashed
3344 // as though they were prototyped nullary functions since thats probably
3345 // what the user meant. This behavior is non-conforming.
3346 // FIXME: If we add a "custom discriminator" function type attribute we
3347 // should encode functions as their discriminators.
3348 OS << "F";
3349 const auto *FuncType = cast<FunctionType>(Val: T);
3350 encodeTypeForFunctionPointerAuth(Ctx, OS, QT: FuncType->getReturnType());
3351 if (const auto *FPT = dyn_cast<FunctionProtoType>(Val: FuncType)) {
3352 for (QualType Param : FPT->param_types()) {
3353 Param = Ctx.getSignatureParameterType(T: Param);
3354 encodeTypeForFunctionPointerAuth(Ctx, OS, QT: Param);
3355 }
3356 if (FPT->isVariadic())
3357 OS << "z";
3358 }
3359 OS << "E";
3360 return;
3361 }
3362
3363 case Type::MemberPointer: {
3364 OS << "M";
3365 const auto *MPT = T->castAs<MemberPointerType>();
3366 encodeTypeForFunctionPointerAuth(
3367 Ctx, OS, QT: QualType(MPT->getQualifier().getAsType(), 0));
3368 encodeTypeForFunctionPointerAuth(Ctx, OS, QT: MPT->getPointeeType());
3369 return;
3370 }
3371 case Type::ExtVector:
3372 case Type::Vector:
3373 OS << "Dv" << Ctx.getTypeSizeInChars(T).getQuantity();
3374 break;
3375
3376 // Don't bother discriminating based on these types.
3377 case Type::Pipe:
3378 case Type::BitInt:
3379 case Type::ConstantMatrix:
3380 OS << "?";
3381 return;
3382
3383 case Type::Builtin: {
3384 const auto *BTy = T->castAs<BuiltinType>();
3385 switch (BTy->getKind()) {
3386#define SIGNED_TYPE(Id, SingletonId) \
3387 case BuiltinType::Id: \
3388 OS << "i"; \
3389 return;
3390#define UNSIGNED_TYPE(Id, SingletonId) \
3391 case BuiltinType::Id: \
3392 OS << "i"; \
3393 return;
3394#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id:
3395#define BUILTIN_TYPE(Id, SingletonId)
3396#include "clang/AST/BuiltinTypes.def"
3397 llvm_unreachable("placeholder types should not appear here.");
3398
3399 case BuiltinType::Half:
3400 OS << "Dh";
3401 return;
3402 case BuiltinType::Float:
3403 OS << "f";
3404 return;
3405 case BuiltinType::Double:
3406 OS << "d";
3407 return;
3408 case BuiltinType::LongDouble:
3409 OS << "e";
3410 return;
3411 case BuiltinType::Float16:
3412 OS << "DF16_";
3413 return;
3414 case BuiltinType::Float128:
3415 OS << "g";
3416 return;
3417
3418 case BuiltinType::Void:
3419 OS << "v";
3420 return;
3421
3422 case BuiltinType::ObjCId:
3423 case BuiltinType::ObjCClass:
3424 case BuiltinType::ObjCSel:
3425 case BuiltinType::NullPtr:
3426 OS << "P";
3427 return;
3428
3429 // Don't bother discriminating based on OpenCL types.
3430 case BuiltinType::OCLSampler:
3431 case BuiltinType::OCLEvent:
3432 case BuiltinType::OCLClkEvent:
3433 case BuiltinType::OCLQueue:
3434 case BuiltinType::OCLReserveID:
3435 case BuiltinType::BFloat16:
3436 case BuiltinType::VectorQuad:
3437 case BuiltinType::VectorPair:
3438 case BuiltinType::DMR1024:
3439 case BuiltinType::DMR2048:
3440 OS << "?";
3441 return;
3442
3443 // Don't bother discriminating based on these seldom-used types.
3444 case BuiltinType::Ibm128:
3445 return;
3446#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3447 case BuiltinType::Id: \
3448 return;
3449#include "clang/Basic/OpenCLImageTypes.def"
3450#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3451 case BuiltinType::Id: \
3452 return;
3453#include "clang/Basic/OpenCLExtensionTypes.def"
3454#define SVE_TYPE(Name, Id, SingletonId) \
3455 case BuiltinType::Id: \
3456 return;
3457#include "clang/Basic/AArch64ACLETypes.def"
3458#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) \
3459 case BuiltinType::Id: \
3460 return;
3461#include "clang/Basic/HLSLIntangibleTypes.def"
3462 case BuiltinType::Dependent:
3463 llvm_unreachable("should never get here");
3464#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) case BuiltinType::Id:
3465#include "clang/Basic/AMDGPUTypes.def"
3466 case BuiltinType::WasmExternRef:
3467#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3468#include "clang/Basic/RISCVVTypes.def"
3469 llvm_unreachable("not yet implemented");
3470 }
3471 llvm_unreachable("should never get here");
3472 }
3473 case Type::Record: {
3474 const RecordDecl *RD = T->castAsCanonical<RecordType>()->getDecl();
3475 const IdentifierInfo *II = RD->getIdentifier();
3476
3477 // In C++, an immediate typedef of an anonymous struct or union
3478 // is considered to name it for ODR purposes, but C's specification
3479 // of type compatibility does not have a similar rule. Using the typedef
3480 // name in function type discriminators anyway, as we do here,
3481 // therefore technically violates the C standard: two function pointer
3482 // types defined in terms of two typedef'd anonymous structs with
3483 // different names are formally still compatible, but we are assigning
3484 // them different discriminators and therefore incompatible ABIs.
3485 //
3486 // This is a relatively minor violation that significantly improves
3487 // discrimination in some cases and has not caused problems in
3488 // practice. Regardless, it is now part of the ABI in places where
3489 // function type discrimination is used, and it can no longer be
3490 // changed except on new platforms.
3491
3492 if (!II)
3493 if (const TypedefNameDecl *Typedef = RD->getTypedefNameForAnonDecl())
3494 II = Typedef->getDeclName().getAsIdentifierInfo();
3495
3496 if (!II) {
3497 OS << "<anonymous_record>";
3498 return;
3499 }
3500 OS << II->getLength() << II->getName();
3501 return;
3502 }
3503 case Type::HLSLAttributedResource:
3504 case Type::HLSLInlineSpirv:
3505 llvm_unreachable("should never get here");
3506 break;
3507 case Type::DeducedTemplateSpecialization:
3508 case Type::Auto:
3509#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3510#define DEPENDENT_TYPE(Class, Base) case Type::Class:
3511#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3512#define ABSTRACT_TYPE(Class, Base)
3513#define TYPE(Class, Base)
3514#include "clang/AST/TypeNodes.inc"
3515 llvm_unreachable("unexpected non-canonical or dependent type!");
3516 return;
3517 }
3518}
3519
3520uint16_t ASTContext::getPointerAuthTypeDiscriminator(QualType T) {
3521 assert(!T->isDependentType() &&
3522 "cannot compute type discriminator of a dependent type");
3523 SmallString<256> Str;
3524 llvm::raw_svector_ostream Out(Str);
3525
3526 if (T->isFunctionPointerType() || T->isFunctionReferenceType())
3527 T = T->getPointeeType();
3528
3529 if (T->isFunctionType()) {
3530 encodeTypeForFunctionPointerAuth(Ctx: *this, OS&: Out, QT: T);
3531 } else {
3532 T = T.getUnqualifiedType();
3533 // Calls to member function pointers don't need to worry about
3534 // language interop or the laxness of the C type compatibility rules.
3535 // We just mangle the member pointer type directly, which is
3536 // implicitly much stricter about type matching. However, we do
3537 // strip any top-level exception specification before this mangling.
3538 // C++23 requires calls to work when the function type is convertible
3539 // to the pointer type by a function pointer conversion, which can
3540 // change the exception specification. This does not technically
3541 // require the exception specification to not affect representation,
3542 // because the function pointer conversion is still always a direct
3543 // value conversion and therefore an opportunity to resign the
3544 // pointer. (This is in contrast to e.g. qualification conversions,
3545 // which can be applied in nested pointer positions, effectively
3546 // requiring qualified and unqualified representations to match.)
3547 // However, it is pragmatic to ignore exception specifications
3548 // because it allows a certain amount of `noexcept` mismatching
3549 // to not become a visible ODR problem. This also leaves some
3550 // room for the committee to add laxness to function pointer
3551 // conversions in future standards.
3552 if (auto *MPT = T->getAs<MemberPointerType>())
3553 if (MPT->isMemberFunctionPointer()) {
3554 QualType PointeeType = MPT->getPointeeType();
3555 if (PointeeType->castAs<FunctionProtoType>()->getExceptionSpecType() !=
3556 EST_None) {
3557 QualType FT = getFunctionTypeWithExceptionSpec(Orig: PointeeType, ESI: EST_None);
3558 T = getMemberPointerType(T: FT, Qualifier: MPT->getQualifier(),
3559 Cls: MPT->getMostRecentCXXRecordDecl());
3560 }
3561 }
3562 std::unique_ptr<MangleContext> MC(createMangleContext());
3563 MC->mangleCanonicalTypeName(T, Out);
3564 }
3565
3566 return llvm::getPointerAuthStableSipHash(S: Str);
3567}
3568
3569QualType ASTContext::getObjCGCQualType(QualType T,
3570 Qualifiers::GC GCAttr) const {
3571 QualType CanT = getCanonicalType(T);
3572 if (CanT.getObjCGCAttr() == GCAttr)
3573 return T;
3574
3575 if (const auto *ptr = T->getAs<PointerType>()) {
3576 QualType Pointee = ptr->getPointeeType();
3577 if (Pointee->isAnyPointerType()) {
3578 QualType ResultType = getObjCGCQualType(T: Pointee, GCAttr);
3579 return getPointerType(T: ResultType);
3580 }
3581 }
3582
3583 // If we are composing extended qualifiers together, merge together
3584 // into one ExtQuals node.
3585 QualifierCollector Quals;
3586 const Type *TypeNode = Quals.strip(type: T);
3587
3588 // If this type already has an ObjCGC specified, it cannot get
3589 // another one.
3590 assert(!Quals.hasObjCGCAttr() &&
3591 "Type cannot have multiple ObjCGCs!");
3592 Quals.addObjCGCAttr(type: GCAttr);
3593
3594 return getExtQualType(baseType: TypeNode, quals: Quals);
3595}
3596
3597QualType ASTContext::removePtrSizeAddrSpace(QualType T) const {
3598 if (const PointerType *Ptr = T->getAs<PointerType>()) {
3599 QualType Pointee = Ptr->getPointeeType();
3600 if (isPtrSizeAddressSpace(AS: Pointee.getAddressSpace())) {
3601 return getPointerType(T: removeAddrSpaceQualType(T: Pointee));
3602 }
3603 }
3604 return T;
3605}
3606
3607QualType ASTContext::getCountAttributedType(
3608 QualType WrappedTy, Expr *CountExpr, bool CountInBytes, bool OrNull,
3609 ArrayRef<TypeCoupledDeclRefInfo> DependentDecls) const {
3610 assert(WrappedTy->isPointerType() || WrappedTy->isArrayType());
3611
3612 llvm::FoldingSetNodeID ID;
3613 CountAttributedType::Profile(ID, WrappedTy, CountExpr, CountInBytes, Nullable: OrNull);
3614
3615 void *InsertPos = nullptr;
3616 CountAttributedType *CATy =
3617 CountAttributedTypes.FindNodeOrInsertPos(ID, InsertPos);
3618 if (CATy)
3619 return QualType(CATy, 0);
3620
3621 QualType CanonTy = getCanonicalType(T: WrappedTy);
3622 size_t Size = CountAttributedType::totalSizeToAlloc<TypeCoupledDeclRefInfo>(
3623 Counts: DependentDecls.size());
3624 CATy = (CountAttributedType *)Allocate(Size, Align: TypeAlignment);
3625 new (CATy) CountAttributedType(WrappedTy, CanonTy, CountExpr, CountInBytes,
3626 OrNull, DependentDecls);
3627 Types.push_back(Elt: CATy);
3628 CountAttributedTypes.InsertNode(N: CATy, InsertPos);
3629
3630 return QualType(CATy, 0);
3631}
3632
3633QualType
3634ASTContext::adjustType(QualType Orig,
3635 llvm::function_ref<QualType(QualType)> Adjust) const {
3636 switch (Orig->getTypeClass()) {
3637 case Type::Attributed: {
3638 const auto *AT = cast<AttributedType>(Val&: Orig);
3639 return getAttributedType(attrKind: AT->getAttrKind(),
3640 modifiedType: adjustType(Orig: AT->getModifiedType(), Adjust),
3641 equivalentType: adjustType(Orig: AT->getEquivalentType(), Adjust),
3642 attr: AT->getAttr());
3643 }
3644
3645 case Type::BTFTagAttributed: {
3646 const auto *BTFT = dyn_cast<BTFTagAttributedType>(Val&: Orig);
3647 return getBTFTagAttributedType(BTFAttr: BTFT->getAttr(),
3648 Wrapped: adjustType(Orig: BTFT->getWrappedType(), Adjust));
3649 }
3650
3651 case Type::Paren:
3652 return getParenType(
3653 NamedType: adjustType(Orig: cast<ParenType>(Val&: Orig)->getInnerType(), Adjust));
3654
3655 case Type::Adjusted: {
3656 const auto *AT = cast<AdjustedType>(Val&: Orig);
3657 return getAdjustedType(Orig: AT->getOriginalType(),
3658 New: adjustType(Orig: AT->getAdjustedType(), Adjust));
3659 }
3660
3661 case Type::MacroQualified: {
3662 const auto *MQT = cast<MacroQualifiedType>(Val&: Orig);
3663 return getMacroQualifiedType(UnderlyingTy: adjustType(Orig: MQT->getUnderlyingType(), Adjust),
3664 MacroII: MQT->getMacroIdentifier());
3665 }
3666
3667 default:
3668 return Adjust(Orig);
3669 }
3670}
3671
3672const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T,
3673 FunctionType::ExtInfo Info) {
3674 if (T->getExtInfo() == Info)
3675 return T;
3676
3677 QualType Result;
3678 if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(Val: T)) {
3679 Result = getFunctionNoProtoType(ResultTy: FNPT->getReturnType(), Info);
3680 } else {
3681 const auto *FPT = cast<FunctionProtoType>(Val: T);
3682 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
3683 EPI.ExtInfo = Info;
3684 Result = getFunctionType(ResultTy: FPT->getReturnType(), Args: FPT->getParamTypes(), EPI);
3685 }
3686
3687 return cast<FunctionType>(Val: Result.getTypePtr());
3688}
3689
3690QualType ASTContext::adjustFunctionResultType(QualType FunctionType,
3691 QualType ResultType) {
3692 return adjustType(Orig: FunctionType, Adjust: [&](QualType Orig) {
3693 if (const auto *FNPT = Orig->getAs<FunctionNoProtoType>())
3694 return getFunctionNoProtoType(ResultTy: ResultType, Info: FNPT->getExtInfo());
3695
3696 const auto *FPT = Orig->castAs<FunctionProtoType>();
3697 return getFunctionType(ResultTy: ResultType, Args: FPT->getParamTypes(),
3698 EPI: FPT->getExtProtoInfo());
3699 });
3700}
3701
3702void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD,
3703 QualType ResultType) {
3704 FD = FD->getMostRecentDecl();
3705 while (true) {
3706 FD->setType(adjustFunctionResultType(FunctionType: FD->getType(), ResultType));
3707 if (FunctionDecl *Next = FD->getPreviousDecl())
3708 FD = Next;
3709 else
3710 break;
3711 }
3712 if (ASTMutationListener *L = getASTMutationListener())
3713 L->DeducedReturnType(FD, ReturnType: ResultType);
3714}
3715
3716/// Get a function type and produce the equivalent function type with the
3717/// specified exception specification. Type sugar that can be present on a
3718/// declaration of a function with an exception specification is permitted
3719/// and preserved. Other type sugar (for instance, typedefs) is not.
3720QualType ASTContext::getFunctionTypeWithExceptionSpec(
3721 QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const {
3722 return adjustType(Orig, Adjust: [&](QualType Ty) {
3723 const auto *Proto = Ty->castAs<FunctionProtoType>();
3724 return getFunctionType(ResultTy: Proto->getReturnType(), Args: Proto->getParamTypes(),
3725 EPI: Proto->getExtProtoInfo().withExceptionSpec(ESI));
3726 });
3727}
3728
3729bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T,
3730 QualType U) const {
3731 return hasSameType(T1: T, T2: U) ||
3732 (getLangOpts().CPlusPlus17 &&
3733 hasSameType(T1: getFunctionTypeWithExceptionSpec(Orig: T, ESI: EST_None),
3734 T2: getFunctionTypeWithExceptionSpec(Orig: U, ESI: EST_None)));
3735}
3736
3737QualType ASTContext::getFunctionTypeWithoutPtrSizes(QualType T) {
3738 if (const auto *Proto = T->getAs<FunctionProtoType>()) {
3739 QualType RetTy = removePtrSizeAddrSpace(T: Proto->getReturnType());
3740 SmallVector<QualType, 16> Args(Proto->param_types().size());
3741 for (unsigned i = 0, n = Args.size(); i != n; ++i)
3742 Args[i] = removePtrSizeAddrSpace(T: Proto->param_types()[i]);
3743 return getFunctionType(ResultTy: RetTy, Args, EPI: Proto->getExtProtoInfo());
3744 }
3745
3746 if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) {
3747 QualType RetTy = removePtrSizeAddrSpace(T: Proto->getReturnType());
3748 return getFunctionNoProtoType(ResultTy: RetTy, Info: Proto->getExtInfo());
3749 }
3750
3751 return T;
3752}
3753
3754bool ASTContext::hasSameFunctionTypeIgnoringPtrSizes(QualType T, QualType U) {
3755 return hasSameType(T1: T, T2: U) ||
3756 hasSameType(T1: getFunctionTypeWithoutPtrSizes(T),
3757 T2: getFunctionTypeWithoutPtrSizes(T: U));
3758}
3759
3760QualType ASTContext::getFunctionTypeWithoutParamABIs(QualType T) const {
3761 if (const auto *Proto = T->getAs<FunctionProtoType>()) {
3762 FunctionProtoType::ExtProtoInfo EPI = Proto->getExtProtoInfo();
3763 EPI.ExtParameterInfos = nullptr;
3764 return getFunctionType(ResultTy: Proto->getReturnType(), Args: Proto->param_types(), EPI);
3765 }
3766 return T;
3767}
3768
3769bool ASTContext::hasSameFunctionTypeIgnoringParamABI(QualType T,
3770 QualType U) const {
3771 return hasSameType(T1: T, T2: U) || hasSameType(T1: getFunctionTypeWithoutParamABIs(T),
3772 T2: getFunctionTypeWithoutParamABIs(T: U));
3773}
3774
3775void ASTContext::adjustExceptionSpec(
3776 FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI,
3777 bool AsWritten) {
3778 // Update the type.
3779 QualType Updated =
3780 getFunctionTypeWithExceptionSpec(Orig: FD->getType(), ESI);
3781 FD->setType(Updated);
3782
3783 if (!AsWritten)
3784 return;
3785
3786 // Update the type in the type source information too.
3787 if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) {
3788 // If the type and the type-as-written differ, we may need to update
3789 // the type-as-written too.
3790 if (TSInfo->getType() != FD->getType())
3791 Updated = getFunctionTypeWithExceptionSpec(Orig: TSInfo->getType(), ESI);
3792
3793 // FIXME: When we get proper type location information for exceptions,
3794 // we'll also have to rebuild the TypeSourceInfo. For now, we just patch
3795 // up the TypeSourceInfo;
3796 assert(TypeLoc::getFullDataSizeForType(Updated) ==
3797 TypeLoc::getFullDataSizeForType(TSInfo->getType()) &&
3798 "TypeLoc size mismatch from updating exception specification");
3799 TSInfo->overrideType(T: Updated);
3800 }
3801}
3802
3803/// getComplexType - Return the uniqued reference to the type for a complex
3804/// number with the specified element type.
3805QualType ASTContext::getComplexType(QualType T) const {
3806 // Unique pointers, to guarantee there is only one pointer of a particular
3807 // structure.
3808 llvm::FoldingSetNodeID ID;
3809 ComplexType::Profile(ID, Element: T);
3810
3811 void *InsertPos = nullptr;
3812 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos))
3813 return QualType(CT, 0);
3814
3815 // If the pointee type isn't canonical, this won't be a canonical type either,
3816 // so fill in the canonical type field.
3817 QualType Canonical;
3818 if (!T.isCanonical()) {
3819 Canonical = getComplexType(T: getCanonicalType(T));
3820
3821 // Get the new insert position for the node we care about.
3822 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos);
3823 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3824 }
3825 auto *New = new (*this, alignof(ComplexType)) ComplexType(T, Canonical);
3826 Types.push_back(Elt: New);
3827 ComplexTypes.InsertNode(N: New, InsertPos);
3828 return QualType(New, 0);
3829}
3830
3831/// getPointerType - Return the uniqued reference to the type for a pointer to
3832/// the specified type.
3833QualType ASTContext::getPointerType(QualType T) const {
3834 // Unique pointers, to guarantee there is only one pointer of a particular
3835 // structure.
3836 llvm::FoldingSetNodeID ID;
3837 PointerType::Profile(ID, Pointee: T);
3838
3839 void *InsertPos = nullptr;
3840 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3841 return QualType(PT, 0);
3842
3843 // If the pointee type isn't canonical, this won't be a canonical type either,
3844 // so fill in the canonical type field.
3845 QualType Canonical;
3846 if (!T.isCanonical()) {
3847 Canonical = getPointerType(T: getCanonicalType(T));
3848
3849 // Get the new insert position for the node we care about.
3850 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3851 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3852 }
3853 auto *New = new (*this, alignof(PointerType)) PointerType(T, Canonical);
3854 Types.push_back(Elt: New);
3855 PointerTypes.InsertNode(N: New, InsertPos);
3856 return QualType(New, 0);
3857}
3858
3859QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const {
3860 llvm::FoldingSetNodeID ID;
3861 AdjustedType::Profile(ID, Orig, New);
3862 void *InsertPos = nullptr;
3863 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3864 if (AT)
3865 return QualType(AT, 0);
3866
3867 QualType Canonical = getCanonicalType(T: New);
3868
3869 // Get the new insert position for the node we care about.
3870 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3871 assert(!AT && "Shouldn't be in the map!");
3872
3873 AT = new (*this, alignof(AdjustedType))
3874 AdjustedType(Type::Adjusted, Orig, New, Canonical);
3875 Types.push_back(Elt: AT);
3876 AdjustedTypes.InsertNode(N: AT, InsertPos);
3877 return QualType(AT, 0);
3878}
3879
3880QualType ASTContext::getDecayedType(QualType Orig, QualType Decayed) const {
3881 llvm::FoldingSetNodeID ID;
3882 AdjustedType::Profile(ID, Orig, New: Decayed);
3883 void *InsertPos = nullptr;
3884 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3885 if (AT)
3886 return QualType(AT, 0);
3887
3888 QualType Canonical = getCanonicalType(T: Decayed);
3889
3890 // Get the new insert position for the node we care about.
3891 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3892 assert(!AT && "Shouldn't be in the map!");
3893
3894 AT = new (*this, alignof(DecayedType)) DecayedType(Orig, Decayed, Canonical);
3895 Types.push_back(Elt: AT);
3896 AdjustedTypes.InsertNode(N: AT, InsertPos);
3897 return QualType(AT, 0);
3898}
3899
3900QualType ASTContext::getDecayedType(QualType T) const {
3901 assert((T->isArrayType() || T->isFunctionType()) && "T does not decay");
3902
3903 QualType Decayed;
3904
3905 // C99 6.7.5.3p7:
3906 // A declaration of a parameter as "array of type" shall be
3907 // adjusted to "qualified pointer to type", where the type
3908 // qualifiers (if any) are those specified within the [ and ] of
3909 // the array type derivation.
3910 if (T->isArrayType())
3911 Decayed = getArrayDecayedType(T);
3912
3913 // C99 6.7.5.3p8:
3914 // A declaration of a parameter as "function returning type"
3915 // shall be adjusted to "pointer to function returning type", as
3916 // in 6.3.2.1.
3917 if (T->isFunctionType())
3918 Decayed = getPointerType(T);
3919
3920 return getDecayedType(Orig: T, Decayed);
3921}
3922
3923QualType ASTContext::getArrayParameterType(QualType Ty) const {
3924 if (Ty->isArrayParameterType())
3925 return Ty;
3926 assert(Ty->isConstantArrayType() && "Ty must be an array type.");
3927 QualType DTy = Ty.getDesugaredType(Context: *this);
3928 const auto *ATy = cast<ConstantArrayType>(Val&: DTy);
3929 llvm::FoldingSetNodeID ID;
3930 ATy->Profile(ID, Ctx: *this, ET: ATy->getElementType(), ArraySize: ATy->getZExtSize(),
3931 SizeExpr: ATy->getSizeExpr(), SizeMod: ATy->getSizeModifier(),
3932 TypeQuals: ATy->getIndexTypeQualifiers().getAsOpaqueValue());
3933 void *InsertPos = nullptr;
3934 ArrayParameterType *AT =
3935 ArrayParameterTypes.FindNodeOrInsertPos(ID, InsertPos);
3936 if (AT)
3937 return QualType(AT, 0);
3938
3939 QualType Canonical;
3940 if (!DTy.isCanonical()) {
3941 Canonical = getArrayParameterType(Ty: getCanonicalType(T: Ty));
3942
3943 // Get the new insert position for the node we care about.
3944 AT = ArrayParameterTypes.FindNodeOrInsertPos(ID, InsertPos);
3945 assert(!AT && "Shouldn't be in the map!");
3946 }
3947
3948 AT = new (*this, alignof(ArrayParameterType))
3949 ArrayParameterType(ATy, Canonical);
3950 Types.push_back(Elt: AT);
3951 ArrayParameterTypes.InsertNode(N: AT, InsertPos);
3952 return QualType(AT, 0);
3953}
3954
3955/// getBlockPointerType - Return the uniqued reference to the type for
3956/// a pointer to the specified block.
3957QualType ASTContext::getBlockPointerType(QualType T) const {
3958 assert(T->isFunctionType() && "block of function types only");
3959 // Unique pointers, to guarantee there is only one block of a particular
3960 // structure.
3961 llvm::FoldingSetNodeID ID;
3962 BlockPointerType::Profile(ID, Pointee: T);
3963
3964 void *InsertPos = nullptr;
3965 if (BlockPointerType *PT =
3966 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3967 return QualType(PT, 0);
3968
3969 // If the block pointee type isn't canonical, this won't be a canonical
3970 // type either so fill in the canonical type field.
3971 QualType Canonical;
3972 if (!T.isCanonical()) {
3973 Canonical = getBlockPointerType(T: getCanonicalType(T));
3974
3975 // Get the new insert position for the node we care about.
3976 BlockPointerType *NewIP =
3977 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3978 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3979 }
3980 auto *New =
3981 new (*this, alignof(BlockPointerType)) BlockPointerType(T, Canonical);
3982 Types.push_back(Elt: New);
3983 BlockPointerTypes.InsertNode(N: New, InsertPos);
3984 return QualType(New, 0);
3985}
3986
3987/// getLValueReferenceType - Return the uniqued reference to the type for an
3988/// lvalue reference to the specified type.
3989QualType
3990ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
3991 assert((!T->isPlaceholderType() ||
3992 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) &&
3993 "Unresolved placeholder type");
3994
3995 // Unique pointers, to guarantee there is only one pointer of a particular
3996 // structure.
3997 llvm::FoldingSetNodeID ID;
3998 ReferenceType::Profile(ID, Referencee: T, SpelledAsLValue);
3999
4000 void *InsertPos = nullptr;
4001 if (LValueReferenceType *RT =
4002 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
4003 return QualType(RT, 0);
4004
4005 const auto *InnerRef = T->getAs<ReferenceType>();
4006
4007 // If the referencee type isn't canonical, this won't be a canonical type
4008 // either, so fill in the canonical type field.
4009 QualType Canonical;
4010 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) {
4011 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
4012 Canonical = getLValueReferenceType(T: getCanonicalType(T: PointeeType));
4013
4014 // Get the new insert position for the node we care about.
4015 LValueReferenceType *NewIP =
4016 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
4017 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4018 }
4019
4020 auto *New = new (*this, alignof(LValueReferenceType))
4021 LValueReferenceType(T, Canonical, SpelledAsLValue);
4022 Types.push_back(Elt: New);
4023 LValueReferenceTypes.InsertNode(N: New, InsertPos);
4024
4025 return QualType(New, 0);
4026}
4027
4028/// getRValueReferenceType - Return the uniqued reference to the type for an
4029/// rvalue reference to the specified type.
4030QualType ASTContext::getRValueReferenceType(QualType T) const {
4031 assert((!T->isPlaceholderType() ||
4032 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) &&
4033 "Unresolved placeholder type");
4034
4035 // Unique pointers, to guarantee there is only one pointer of a particular
4036 // structure.
4037 llvm::FoldingSetNodeID ID;
4038 ReferenceType::Profile(ID, Referencee: T, SpelledAsLValue: false);
4039
4040 void *InsertPos = nullptr;
4041 if (RValueReferenceType *RT =
4042 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
4043 return QualType(RT, 0);
4044
4045 const auto *InnerRef = T->getAs<ReferenceType>();
4046
4047 // If the referencee type isn't canonical, this won't be a canonical type
4048 // either, so fill in the canonical type field.
4049 QualType Canonical;
4050 if (InnerRef || !T.isCanonical()) {
4051 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
4052 Canonical = getRValueReferenceType(T: getCanonicalType(T: PointeeType));
4053
4054 // Get the new insert position for the node we care about.
4055 RValueReferenceType *NewIP =
4056 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
4057 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4058 }
4059
4060 auto *New = new (*this, alignof(RValueReferenceType))
4061 RValueReferenceType(T, Canonical);
4062 Types.push_back(Elt: New);
4063 RValueReferenceTypes.InsertNode(N: New, InsertPos);
4064 return QualType(New, 0);
4065}
4066
4067QualType ASTContext::getMemberPointerType(QualType T,
4068 NestedNameSpecifier Qualifier,
4069 const CXXRecordDecl *Cls) const {
4070 if (!Qualifier) {
4071 assert(Cls && "At least one of Qualifier or Cls must be provided");
4072 Qualifier = NestedNameSpecifier(getCanonicalTagType(TD: Cls).getTypePtr());
4073 } else if (!Cls) {
4074 Cls = Qualifier.getAsRecordDecl();
4075 }
4076 // Unique pointers, to guarantee there is only one pointer of a particular
4077 // structure.
4078 llvm::FoldingSetNodeID ID;
4079 MemberPointerType::Profile(ID, Pointee: T, Qualifier, Cls);
4080
4081 void *InsertPos = nullptr;
4082 if (MemberPointerType *PT =
4083 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
4084 return QualType(PT, 0);
4085
4086 NestedNameSpecifier CanonicalQualifier = [&] {
4087 if (!Cls)
4088 return Qualifier.getCanonical();
4089 NestedNameSpecifier R(getCanonicalTagType(TD: Cls).getTypePtr());
4090 assert(R.isCanonical());
4091 return R;
4092 }();
4093 // If the pointee or class type isn't canonical, this won't be a canonical
4094 // type either, so fill in the canonical type field.
4095 QualType Canonical;
4096 if (!T.isCanonical() || Qualifier != CanonicalQualifier) {
4097 Canonical =
4098 getMemberPointerType(T: getCanonicalType(T), Qualifier: CanonicalQualifier, Cls);
4099 assert(!cast<MemberPointerType>(Canonical)->isSugared());
4100 // Get the new insert position for the node we care about.
4101 [[maybe_unused]] MemberPointerType *NewIP =
4102 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
4103 assert(!NewIP && "Shouldn't be in the map!");
4104 }
4105 auto *New = new (*this, alignof(MemberPointerType))
4106 MemberPointerType(T, Qualifier, Canonical);
4107 Types.push_back(Elt: New);
4108 MemberPointerTypes.InsertNode(N: New, InsertPos);
4109 return QualType(New, 0);
4110}
4111
4112/// getConstantArrayType - Return the unique reference to the type for an
4113/// array of the specified element type.
4114QualType ASTContext::getConstantArrayType(QualType EltTy,
4115 const llvm::APInt &ArySizeIn,
4116 const Expr *SizeExpr,
4117 ArraySizeModifier ASM,
4118 unsigned IndexTypeQuals) const {
4119 assert((EltTy->isDependentType() ||
4120 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) &&
4121 "Constant array of VLAs is illegal!");
4122
4123 // We only need the size as part of the type if it's instantiation-dependent.
4124 if (SizeExpr && !SizeExpr->isInstantiationDependent())
4125 SizeExpr = nullptr;
4126
4127 // Convert the array size into a canonical width matching the pointer size for
4128 // the target.
4129 llvm::APInt ArySize(ArySizeIn);
4130 ArySize = ArySize.zextOrTrunc(width: Target->getMaxPointerWidth());
4131
4132 llvm::FoldingSetNodeID ID;
4133 ConstantArrayType::Profile(ID, Ctx: *this, ET: EltTy, ArraySize: ArySize.getZExtValue(), SizeExpr,
4134 SizeMod: ASM, TypeQuals: IndexTypeQuals);
4135
4136 void *InsertPos = nullptr;
4137 if (ConstantArrayType *ATP =
4138 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos))
4139 return QualType(ATP, 0);
4140
4141 // If the element type isn't canonical or has qualifiers, or the array bound
4142 // is instantiation-dependent, this won't be a canonical type either, so fill
4143 // in the canonical type field.
4144 QualType Canon;
4145 // FIXME: Check below should look for qualifiers behind sugar.
4146 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) {
4147 SplitQualType canonSplit = getCanonicalType(T: EltTy).split();
4148 Canon = getConstantArrayType(EltTy: QualType(canonSplit.Ty, 0), ArySizeIn: ArySize, SizeExpr: nullptr,
4149 ASM, IndexTypeQuals);
4150 Canon = getQualifiedType(T: Canon, Qs: canonSplit.Quals);
4151
4152 // Get the new insert position for the node we care about.
4153 ConstantArrayType *NewIP =
4154 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
4155 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4156 }
4157
4158 auto *New = ConstantArrayType::Create(Ctx: *this, ET: EltTy, Can: Canon, Sz: ArySize, SzExpr: SizeExpr,
4159 SzMod: ASM, Qual: IndexTypeQuals);
4160 ConstantArrayTypes.InsertNode(N: New, InsertPos);
4161 Types.push_back(Elt: New);
4162 return QualType(New, 0);
4163}
4164
4165/// getVariableArrayDecayedType - Turns the given type, which may be
4166/// variably-modified, into the corresponding type with all the known
4167/// sizes replaced with [*].
4168QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
4169 // Vastly most common case.
4170 if (!type->isVariablyModifiedType()) return type;
4171
4172 QualType result;
4173
4174 SplitQualType split = type.getSplitDesugaredType();
4175 const Type *ty = split.Ty;
4176 switch (ty->getTypeClass()) {
4177#define TYPE(Class, Base)
4178#define ABSTRACT_TYPE(Class, Base)
4179#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
4180#include "clang/AST/TypeNodes.inc"
4181 llvm_unreachable("didn't desugar past all non-canonical types?");
4182
4183 // These types should never be variably-modified.
4184 case Type::Builtin:
4185 case Type::Complex:
4186 case Type::Vector:
4187 case Type::DependentVector:
4188 case Type::ExtVector:
4189 case Type::DependentSizedExtVector:
4190 case Type::ConstantMatrix:
4191 case Type::DependentSizedMatrix:
4192 case Type::DependentAddressSpace:
4193 case Type::ObjCObject:
4194 case Type::ObjCInterface:
4195 case Type::ObjCObjectPointer:
4196 case Type::Record:
4197 case Type::Enum:
4198 case Type::UnresolvedUsing:
4199 case Type::TypeOfExpr:
4200 case Type::TypeOf:
4201 case Type::Decltype:
4202 case Type::UnaryTransform:
4203 case Type::DependentName:
4204 case Type::InjectedClassName:
4205 case Type::TemplateSpecialization:
4206 case Type::TemplateTypeParm:
4207 case Type::SubstTemplateTypeParmPack:
4208 case Type::SubstBuiltinTemplatePack:
4209 case Type::Auto:
4210 case Type::DeducedTemplateSpecialization:
4211 case Type::PackExpansion:
4212 case Type::PackIndexing:
4213 case Type::BitInt:
4214 case Type::DependentBitInt:
4215 case Type::ArrayParameter:
4216 case Type::HLSLAttributedResource:
4217 case Type::HLSLInlineSpirv:
4218 llvm_unreachable("type should never be variably-modified");
4219
4220 // These types can be variably-modified but should never need to
4221 // further decay.
4222 case Type::FunctionNoProto:
4223 case Type::FunctionProto:
4224 case Type::BlockPointer:
4225 case Type::MemberPointer:
4226 case Type::Pipe:
4227 return type;
4228
4229 // These types can be variably-modified. All these modifications
4230 // preserve structure except as noted by comments.
4231 // TODO: if we ever care about optimizing VLAs, there are no-op
4232 // optimizations available here.
4233 case Type::Pointer:
4234 result = getPointerType(T: getVariableArrayDecayedType(
4235 type: cast<PointerType>(Val: ty)->getPointeeType()));
4236 break;
4237
4238 case Type::LValueReference: {
4239 const auto *lv = cast<LValueReferenceType>(Val: ty);
4240 result = getLValueReferenceType(
4241 T: getVariableArrayDecayedType(type: lv->getPointeeType()),
4242 SpelledAsLValue: lv->isSpelledAsLValue());
4243 break;
4244 }
4245
4246 case Type::RValueReference: {
4247 const auto *lv = cast<RValueReferenceType>(Val: ty);
4248 result = getRValueReferenceType(
4249 T: getVariableArrayDecayedType(type: lv->getPointeeType()));
4250 break;
4251 }
4252
4253 case Type::Atomic: {
4254 const auto *at = cast<AtomicType>(Val: ty);
4255 result = getAtomicType(T: getVariableArrayDecayedType(type: at->getValueType()));
4256 break;
4257 }
4258
4259 case Type::ConstantArray: {
4260 const auto *cat = cast<ConstantArrayType>(Val: ty);
4261 result = getConstantArrayType(
4262 EltTy: getVariableArrayDecayedType(type: cat->getElementType()),
4263 ArySizeIn: cat->getSize(),
4264 SizeExpr: cat->getSizeExpr(),
4265 ASM: cat->getSizeModifier(),
4266 IndexTypeQuals: cat->getIndexTypeCVRQualifiers());
4267 break;
4268 }
4269
4270 case Type::DependentSizedArray: {
4271 const auto *dat = cast<DependentSizedArrayType>(Val: ty);
4272 result = getDependentSizedArrayType(
4273 EltTy: getVariableArrayDecayedType(type: dat->getElementType()), NumElts: dat->getSizeExpr(),
4274 ASM: dat->getSizeModifier(), IndexTypeQuals: dat->getIndexTypeCVRQualifiers());
4275 break;
4276 }
4277
4278 // Turn incomplete types into [*] types.
4279 case Type::IncompleteArray: {
4280 const auto *iat = cast<IncompleteArrayType>(Val: ty);
4281 result =
4282 getVariableArrayType(EltTy: getVariableArrayDecayedType(type: iat->getElementType()),
4283 /*size*/ NumElts: nullptr, ASM: ArraySizeModifier::Normal,
4284 IndexTypeQuals: iat->getIndexTypeCVRQualifiers());
4285 break;
4286 }
4287
4288 // Turn VLA types into [*] types.
4289 case Type::VariableArray: {
4290 const auto *vat = cast<VariableArrayType>(Val: ty);
4291 result =
4292 getVariableArrayType(EltTy: getVariableArrayDecayedType(type: vat->getElementType()),
4293 /*size*/ NumElts: nullptr, ASM: ArraySizeModifier::Star,
4294 IndexTypeQuals: vat->getIndexTypeCVRQualifiers());
4295 break;
4296 }
4297 }
4298
4299 // Apply the top-level qualifiers from the original.
4300 return getQualifiedType(T: result, Qs: split.Quals);
4301}
4302
4303/// getVariableArrayType - Returns a non-unique reference to the type for a
4304/// variable array of the specified element type.
4305QualType ASTContext::getVariableArrayType(QualType EltTy, Expr *NumElts,
4306 ArraySizeModifier ASM,
4307 unsigned IndexTypeQuals) const {
4308 // Since we don't unique expressions, it isn't possible to unique VLA's
4309 // that have an expression provided for their size.
4310 QualType Canon;
4311
4312 // Be sure to pull qualifiers off the element type.
4313 // FIXME: Check below should look for qualifiers behind sugar.
4314 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
4315 SplitQualType canonSplit = getCanonicalType(T: EltTy).split();
4316 Canon = getVariableArrayType(EltTy: QualType(canonSplit.Ty, 0), NumElts, ASM,
4317 IndexTypeQuals);
4318 Canon = getQualifiedType(T: Canon, Qs: canonSplit.Quals);
4319 }
4320
4321 auto *New = new (*this, alignof(VariableArrayType))
4322 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals);
4323
4324 VariableArrayTypes.push_back(x: New);
4325 Types.push_back(Elt: New);
4326 return QualType(New, 0);
4327}
4328
4329/// getDependentSizedArrayType - Returns a non-unique reference to
4330/// the type for a dependently-sized array of the specified element
4331/// type.
4332QualType
4333ASTContext::getDependentSizedArrayType(QualType elementType, Expr *numElements,
4334 ArraySizeModifier ASM,
4335 unsigned elementTypeQuals) const {
4336 assert((!numElements || numElements->isTypeDependent() ||
4337 numElements->isValueDependent()) &&
4338 "Size must be type- or value-dependent!");
4339
4340 SplitQualType canonElementType = getCanonicalType(T: elementType).split();
4341
4342 void *insertPos = nullptr;
4343 llvm::FoldingSetNodeID ID;
4344 DependentSizedArrayType::Profile(
4345 ID, Context: *this, ET: numElements ? QualType(canonElementType.Ty, 0) : elementType,
4346 SizeMod: ASM, TypeQuals: elementTypeQuals, E: numElements);
4347
4348 // Look for an existing type with these properties.
4349 DependentSizedArrayType *canonTy =
4350 DependentSizedArrayTypes.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
4351
4352 // Dependently-sized array types that do not have a specified number
4353 // of elements will have their sizes deduced from a dependent
4354 // initializer.
4355 if (!numElements) {
4356 if (canonTy)
4357 return QualType(canonTy, 0);
4358
4359 auto *newType = new (*this, alignof(DependentSizedArrayType))
4360 DependentSizedArrayType(elementType, QualType(), numElements, ASM,
4361 elementTypeQuals);
4362 DependentSizedArrayTypes.InsertNode(N: newType, InsertPos: insertPos);
4363 Types.push_back(Elt: newType);
4364 return QualType(newType, 0);
4365 }
4366
4367 // If we don't have one, build one.
4368 if (!canonTy) {
4369 canonTy = new (*this, alignof(DependentSizedArrayType))
4370 DependentSizedArrayType(QualType(canonElementType.Ty, 0), QualType(),
4371 numElements, ASM, elementTypeQuals);
4372 DependentSizedArrayTypes.InsertNode(N: canonTy, InsertPos: insertPos);
4373 Types.push_back(Elt: canonTy);
4374 }
4375
4376 // Apply qualifiers from the element type to the array.
4377 QualType canon = getQualifiedType(T: QualType(canonTy,0),
4378 Qs: canonElementType.Quals);
4379
4380 // If we didn't need extra canonicalization for the element type or the size
4381 // expression, then just use that as our result.
4382 if (QualType(canonElementType.Ty, 0) == elementType &&
4383 canonTy->getSizeExpr() == numElements)
4384 return canon;
4385
4386 // Otherwise, we need to build a type which follows the spelling
4387 // of the element type.
4388 auto *sugaredType = new (*this, alignof(DependentSizedArrayType))
4389 DependentSizedArrayType(elementType, canon, numElements, ASM,
4390 elementTypeQuals);
4391 Types.push_back(Elt: sugaredType);
4392 return QualType(sugaredType, 0);
4393}
4394
4395QualType ASTContext::getIncompleteArrayType(QualType elementType,
4396 ArraySizeModifier ASM,
4397 unsigned elementTypeQuals) const {
4398 llvm::FoldingSetNodeID ID;
4399 IncompleteArrayType::Profile(ID, ET: elementType, SizeMod: ASM, TypeQuals: elementTypeQuals);
4400
4401 void *insertPos = nullptr;
4402 if (IncompleteArrayType *iat =
4403 IncompleteArrayTypes.FindNodeOrInsertPos(ID, InsertPos&: insertPos))
4404 return QualType(iat, 0);
4405
4406 // If the element type isn't canonical, this won't be a canonical type
4407 // either, so fill in the canonical type field. We also have to pull
4408 // qualifiers off the element type.
4409 QualType canon;
4410
4411 // FIXME: Check below should look for qualifiers behind sugar.
4412 if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) {
4413 SplitQualType canonSplit = getCanonicalType(T: elementType).split();
4414 canon = getIncompleteArrayType(elementType: QualType(canonSplit.Ty, 0),
4415 ASM, elementTypeQuals);
4416 canon = getQualifiedType(T: canon, Qs: canonSplit.Quals);
4417
4418 // Get the new insert position for the node we care about.
4419 IncompleteArrayType *existing =
4420 IncompleteArrayTypes.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
4421 assert(!existing && "Shouldn't be in the map!"); (void) existing;
4422 }
4423
4424 auto *newType = new (*this, alignof(IncompleteArrayType))
4425 IncompleteArrayType(elementType, canon, ASM, elementTypeQuals);
4426
4427 IncompleteArrayTypes.InsertNode(N: newType, InsertPos: insertPos);
4428 Types.push_back(Elt: newType);
4429 return QualType(newType, 0);
4430}
4431
4432ASTContext::BuiltinVectorTypeInfo
4433ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const {
4434#define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \
4435 {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \
4436 NUMVECTORS};
4437
4438#define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \
4439 {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS};
4440
4441 switch (Ty->getKind()) {
4442 default:
4443 llvm_unreachable("Unsupported builtin vector type");
4444
4445#define SVE_VECTOR_TYPE_INT(Name, MangledName, Id, SingletonId, NumEls, \
4446 ElBits, NF, IsSigned) \
4447 case BuiltinType::Id: \
4448 return {getIntTypeForBitwidth(ElBits, IsSigned), \
4449 llvm::ElementCount::getScalable(NumEls), NF};
4450#define SVE_VECTOR_TYPE_FLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4451 ElBits, NF) \
4452 case BuiltinType::Id: \
4453 return {ElBits == 16 ? HalfTy : (ElBits == 32 ? FloatTy : DoubleTy), \
4454 llvm::ElementCount::getScalable(NumEls), NF};
4455#define SVE_VECTOR_TYPE_BFLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4456 ElBits, NF) \
4457 case BuiltinType::Id: \
4458 return {BFloat16Ty, llvm::ElementCount::getScalable(NumEls), NF};
4459#define SVE_VECTOR_TYPE_MFLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4460 ElBits, NF) \
4461 case BuiltinType::Id: \
4462 return {MFloat8Ty, llvm::ElementCount::getScalable(NumEls), NF};
4463#define SVE_PREDICATE_TYPE_ALL(Name, MangledName, Id, SingletonId, NumEls, NF) \
4464 case BuiltinType::Id: \
4465 return {BoolTy, llvm::ElementCount::getScalable(NumEls), NF};
4466#include "clang/Basic/AArch64ACLETypes.def"
4467
4468#define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \
4469 IsSigned) \
4470 case BuiltinType::Id: \
4471 return {getIntTypeForBitwidth(ElBits, IsSigned), \
4472 llvm::ElementCount::getScalable(NumEls), NF};
4473#define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
4474 case BuiltinType::Id: \
4475 return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \
4476 llvm::ElementCount::getScalable(NumEls), NF};
4477#define RVV_VECTOR_TYPE_BFLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
4478 case BuiltinType::Id: \
4479 return {BFloat16Ty, llvm::ElementCount::getScalable(NumEls), NF};
4480#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
4481 case BuiltinType::Id: \
4482 return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1};
4483#include "clang/Basic/RISCVVTypes.def"
4484 }
4485}
4486
4487/// getExternrefType - Return a WebAssembly externref type, which represents an
4488/// opaque reference to a host value.
4489QualType ASTContext::getWebAssemblyExternrefType() const {
4490 if (Target->getTriple().isWasm() && Target->hasFeature(Feature: "reference-types")) {
4491#define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \
4492 if (BuiltinType::Id == BuiltinType::WasmExternRef) \
4493 return SingletonId;
4494#include "clang/Basic/WebAssemblyReferenceTypes.def"
4495 }
4496 llvm_unreachable(
4497 "shouldn't try to generate type externref outside WebAssembly target");
4498}
4499
4500/// getScalableVectorType - Return the unique reference to a scalable vector
4501/// type of the specified element type and size. VectorType must be a built-in
4502/// type.
4503QualType ASTContext::getScalableVectorType(QualType EltTy, unsigned NumElts,
4504 unsigned NumFields) const {
4505 auto K = llvm::ScalableVecTyKey{.EltTy: EltTy, .NumElts: NumElts, .NumFields: NumFields};
4506 if (auto It = ScalableVecTyMap.find(Val: K); It != ScalableVecTyMap.end())
4507 return It->second;
4508
4509 if (Target->hasAArch64ACLETypes()) {
4510 uint64_t EltTySize = getTypeSize(T: EltTy);
4511
4512#define SVE_VECTOR_TYPE_INT(Name, MangledName, Id, SingletonId, NumEls, \
4513 ElBits, NF, IsSigned) \
4514 if (EltTy->hasIntegerRepresentation() && !EltTy->isBooleanType() && \
4515 EltTy->hasSignedIntegerRepresentation() == IsSigned && \
4516 EltTySize == ElBits && NumElts == (NumEls * NF) && NumFields == 1) { \
4517 return ScalableVecTyMap[K] = SingletonId; \
4518 }
4519#define SVE_VECTOR_TYPE_FLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4520 ElBits, NF) \
4521 if (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
4522 EltTySize == ElBits && NumElts == (NumEls * NF) && NumFields == 1) { \
4523 return ScalableVecTyMap[K] = SingletonId; \
4524 }
4525#define SVE_VECTOR_TYPE_BFLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4526 ElBits, NF) \
4527 if (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
4528 EltTySize == ElBits && NumElts == (NumEls * NF) && NumFields == 1) { \
4529 return ScalableVecTyMap[K] = SingletonId; \
4530 }
4531#define SVE_VECTOR_TYPE_MFLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4532 ElBits, NF) \
4533 if (EltTy->isMFloat8Type() && EltTySize == ElBits && \
4534 NumElts == (NumEls * NF) && NumFields == 1) { \
4535 return ScalableVecTyMap[K] = SingletonId; \
4536 }
4537#define SVE_PREDICATE_TYPE_ALL(Name, MangledName, Id, SingletonId, NumEls, NF) \
4538 if (EltTy->isBooleanType() && NumElts == (NumEls * NF) && NumFields == 1) \
4539 return ScalableVecTyMap[K] = SingletonId;
4540#include "clang/Basic/AArch64ACLETypes.def"
4541 } else if (Target->hasRISCVVTypes()) {
4542 uint64_t EltTySize = getTypeSize(T: EltTy);
4543#define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \
4544 IsFP, IsBF) \
4545 if (!EltTy->isBooleanType() && \
4546 ((EltTy->hasIntegerRepresentation() && \
4547 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
4548 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
4549 IsFP && !IsBF) || \
4550 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
4551 IsBF && !IsFP)) && \
4552 EltTySize == ElBits && NumElts == NumEls && NumFields == NF) \
4553 return ScalableVecTyMap[K] = SingletonId;
4554#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
4555 if (EltTy->isBooleanType() && NumElts == NumEls) \
4556 return ScalableVecTyMap[K] = SingletonId;
4557#include "clang/Basic/RISCVVTypes.def"
4558 }
4559 return QualType();
4560}
4561
4562/// getVectorType - Return the unique reference to a vector type of
4563/// the specified element type and size. VectorType must be a built-in type.
4564QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
4565 VectorKind VecKind) const {
4566 assert(vecType->isBuiltinType() ||
4567 (vecType->isBitIntType() &&
4568 // Only support _BitInt elements with byte-sized power of 2 NumBits.
4569 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits())));
4570
4571 // Check if we've already instantiated a vector of this type.
4572 llvm::FoldingSetNodeID ID;
4573 VectorType::Profile(ID, ElementType: vecType, NumElements: NumElts, TypeClass: Type::Vector, VecKind);
4574
4575 void *InsertPos = nullptr;
4576 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
4577 return QualType(VTP, 0);
4578
4579 // If the element type isn't canonical, this won't be a canonical type either,
4580 // so fill in the canonical type field.
4581 QualType Canonical;
4582 if (!vecType.isCanonical()) {
4583 Canonical = getVectorType(vecType: getCanonicalType(T: vecType), NumElts, VecKind);
4584
4585 // Get the new insert position for the node we care about.
4586 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4587 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4588 }
4589 auto *New = new (*this, alignof(VectorType))
4590 VectorType(vecType, NumElts, Canonical, VecKind);
4591 VectorTypes.InsertNode(N: New, InsertPos);
4592 Types.push_back(Elt: New);
4593 return QualType(New, 0);
4594}
4595
4596QualType ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr,
4597 SourceLocation AttrLoc,
4598 VectorKind VecKind) const {
4599 llvm::FoldingSetNodeID ID;
4600 DependentVectorType::Profile(ID, Context: *this, ElementType: getCanonicalType(T: VecType), SizeExpr,
4601 VecKind);
4602 void *InsertPos = nullptr;
4603 DependentVectorType *Canon =
4604 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4605 DependentVectorType *New;
4606
4607 if (Canon) {
4608 New = new (*this, alignof(DependentVectorType)) DependentVectorType(
4609 VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind);
4610 } else {
4611 QualType CanonVecTy = getCanonicalType(T: VecType);
4612 if (CanonVecTy == VecType) {
4613 New = new (*this, alignof(DependentVectorType))
4614 DependentVectorType(VecType, QualType(), SizeExpr, AttrLoc, VecKind);
4615
4616 DependentVectorType *CanonCheck =
4617 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4618 assert(!CanonCheck &&
4619 "Dependent-sized vector_size canonical type broken");
4620 (void)CanonCheck;
4621 DependentVectorTypes.InsertNode(N: New, InsertPos);
4622 } else {
4623 QualType CanonTy = getDependentVectorType(VecType: CanonVecTy, SizeExpr,
4624 AttrLoc: SourceLocation(), VecKind);
4625 New = new (*this, alignof(DependentVectorType))
4626 DependentVectorType(VecType, CanonTy, SizeExpr, AttrLoc, VecKind);
4627 }
4628 }
4629
4630 Types.push_back(Elt: New);
4631 return QualType(New, 0);
4632}
4633
4634/// getExtVectorType - Return the unique reference to an extended vector type of
4635/// the specified element type and size. VectorType must be a built-in type.
4636QualType ASTContext::getExtVectorType(QualType vecType,
4637 unsigned NumElts) const {
4638 assert(vecType->isBuiltinType() || vecType->isDependentType() ||
4639 (vecType->isBitIntType() &&
4640 // Only support _BitInt elements with byte-sized power of 2 NumBits.
4641 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits())));
4642
4643 // Check if we've already instantiated a vector of this type.
4644 llvm::FoldingSetNodeID ID;
4645 VectorType::Profile(ID, ElementType: vecType, NumElements: NumElts, TypeClass: Type::ExtVector,
4646 VecKind: VectorKind::Generic);
4647 void *InsertPos = nullptr;
4648 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
4649 return QualType(VTP, 0);
4650
4651 // If the element type isn't canonical, this won't be a canonical type either,
4652 // so fill in the canonical type field.
4653 QualType Canonical;
4654 if (!vecType.isCanonical()) {
4655 Canonical = getExtVectorType(vecType: getCanonicalType(T: vecType), NumElts);
4656
4657 // Get the new insert position for the node we care about.
4658 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4659 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4660 }
4661 auto *New = new (*this, alignof(ExtVectorType))
4662 ExtVectorType(vecType, NumElts, Canonical);
4663 VectorTypes.InsertNode(N: New, InsertPos);
4664 Types.push_back(Elt: New);
4665 return QualType(New, 0);
4666}
4667
4668QualType
4669ASTContext::getDependentSizedExtVectorType(QualType vecType,
4670 Expr *SizeExpr,
4671 SourceLocation AttrLoc) const {
4672 llvm::FoldingSetNodeID ID;
4673 DependentSizedExtVectorType::Profile(ID, Context: *this, ElementType: getCanonicalType(T: vecType),
4674 SizeExpr);
4675
4676 void *InsertPos = nullptr;
4677 DependentSizedExtVectorType *Canon
4678 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4679 DependentSizedExtVectorType *New;
4680 if (Canon) {
4681 // We already have a canonical version of this array type; use it as
4682 // the canonical type for a newly-built type.
4683 New = new (*this, alignof(DependentSizedExtVectorType))
4684 DependentSizedExtVectorType(vecType, QualType(Canon, 0), SizeExpr,
4685 AttrLoc);
4686 } else {
4687 QualType CanonVecTy = getCanonicalType(T: vecType);
4688 if (CanonVecTy == vecType) {
4689 New = new (*this, alignof(DependentSizedExtVectorType))
4690 DependentSizedExtVectorType(vecType, QualType(), SizeExpr, AttrLoc);
4691
4692 DependentSizedExtVectorType *CanonCheck
4693 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4694 assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken");
4695 (void)CanonCheck;
4696 DependentSizedExtVectorTypes.InsertNode(N: New, InsertPos);
4697 } else {
4698 QualType CanonExtTy = getDependentSizedExtVectorType(vecType: CanonVecTy, SizeExpr,
4699 AttrLoc: SourceLocation());
4700 New = new (*this, alignof(DependentSizedExtVectorType))
4701 DependentSizedExtVectorType(vecType, CanonExtTy, SizeExpr, AttrLoc);
4702 }
4703 }
4704
4705 Types.push_back(Elt: New);
4706 return QualType(New, 0);
4707}
4708
4709QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows,
4710 unsigned NumColumns) const {
4711 llvm::FoldingSetNodeID ID;
4712 ConstantMatrixType::Profile(ID, ElementType: ElementTy, NumRows, NumColumns,
4713 TypeClass: Type::ConstantMatrix);
4714
4715 assert(MatrixType::isValidElementType(ElementTy, getLangOpts()) &&
4716 "need a valid element type");
4717 assert(NumRows > 0 && NumRows <= LangOpts.MaxMatrixDimension &&
4718 NumColumns > 0 && NumColumns <= LangOpts.MaxMatrixDimension &&
4719 "need valid matrix dimensions");
4720 void *InsertPos = nullptr;
4721 if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos))
4722 return QualType(MTP, 0);
4723
4724 QualType Canonical;
4725 if (!ElementTy.isCanonical()) {
4726 Canonical =
4727 getConstantMatrixType(ElementTy: getCanonicalType(T: ElementTy), NumRows, NumColumns);
4728
4729 ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4730 assert(!NewIP && "Matrix type shouldn't already exist in the map");
4731 (void)NewIP;
4732 }
4733
4734 auto *New = new (*this, alignof(ConstantMatrixType))
4735 ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical);
4736 MatrixTypes.InsertNode(N: New, InsertPos);
4737 Types.push_back(Elt: New);
4738 return QualType(New, 0);
4739}
4740
4741QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy,
4742 Expr *RowExpr,
4743 Expr *ColumnExpr,
4744 SourceLocation AttrLoc) const {
4745 QualType CanonElementTy = getCanonicalType(T: ElementTy);
4746 llvm::FoldingSetNodeID ID;
4747 DependentSizedMatrixType::Profile(ID, Context: *this, ElementType: CanonElementTy, RowExpr,
4748 ColumnExpr);
4749
4750 void *InsertPos = nullptr;
4751 DependentSizedMatrixType *Canon =
4752 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4753
4754 if (!Canon) {
4755 Canon = new (*this, alignof(DependentSizedMatrixType))
4756 DependentSizedMatrixType(CanonElementTy, QualType(), RowExpr,
4757 ColumnExpr, AttrLoc);
4758#ifndef NDEBUG
4759 DependentSizedMatrixType *CanonCheck =
4760 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4761 assert(!CanonCheck && "Dependent-sized matrix canonical type broken");
4762#endif
4763 DependentSizedMatrixTypes.InsertNode(N: Canon, InsertPos);
4764 Types.push_back(Elt: Canon);
4765 }
4766
4767 // Already have a canonical version of the matrix type
4768 //
4769 // If it exactly matches the requested type, use it directly.
4770 if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr &&
4771 Canon->getRowExpr() == ColumnExpr)
4772 return QualType(Canon, 0);
4773
4774 // Use Canon as the canonical type for newly-built type.
4775 DependentSizedMatrixType *New = new (*this, alignof(DependentSizedMatrixType))
4776 DependentSizedMatrixType(ElementTy, QualType(Canon, 0), RowExpr,
4777 ColumnExpr, AttrLoc);
4778 Types.push_back(Elt: New);
4779 return QualType(New, 0);
4780}
4781
4782QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType,
4783 Expr *AddrSpaceExpr,
4784 SourceLocation AttrLoc) const {
4785 assert(AddrSpaceExpr->isInstantiationDependent());
4786
4787 QualType canonPointeeType = getCanonicalType(T: PointeeType);
4788
4789 void *insertPos = nullptr;
4790 llvm::FoldingSetNodeID ID;
4791 DependentAddressSpaceType::Profile(ID, Context: *this, PointeeType: canonPointeeType,
4792 AddrSpaceExpr);
4793
4794 DependentAddressSpaceType *canonTy =
4795 DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
4796
4797 if (!canonTy) {
4798 canonTy = new (*this, alignof(DependentAddressSpaceType))
4799 DependentAddressSpaceType(canonPointeeType, QualType(), AddrSpaceExpr,
4800 AttrLoc);
4801 DependentAddressSpaceTypes.InsertNode(N: canonTy, InsertPos: insertPos);
4802 Types.push_back(Elt: canonTy);
4803 }
4804
4805 if (canonPointeeType == PointeeType &&
4806 canonTy->getAddrSpaceExpr() == AddrSpaceExpr)
4807 return QualType(canonTy, 0);
4808
4809 auto *sugaredType = new (*this, alignof(DependentAddressSpaceType))
4810 DependentAddressSpaceType(PointeeType, QualType(canonTy, 0),
4811 AddrSpaceExpr, AttrLoc);
4812 Types.push_back(Elt: sugaredType);
4813 return QualType(sugaredType, 0);
4814}
4815
4816/// Determine whether \p T is canonical as the result type of a function.
4817static bool isCanonicalResultType(QualType T) {
4818 return T.isCanonical() &&
4819 (T.getObjCLifetime() == Qualifiers::OCL_None ||
4820 T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone);
4821}
4822
4823/// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
4824QualType
4825ASTContext::getFunctionNoProtoType(QualType ResultTy,
4826 const FunctionType::ExtInfo &Info) const {
4827 // FIXME: This assertion cannot be enabled (yet) because the ObjC rewriter
4828 // functionality creates a function without a prototype regardless of
4829 // language mode (so it makes them even in C++). Once the rewriter has been
4830 // fixed, this assertion can be enabled again.
4831 //assert(!LangOpts.requiresStrictPrototypes() &&
4832 // "strict prototypes are disabled");
4833
4834 // Unique functions, to guarantee there is only one function of a particular
4835 // structure.
4836 llvm::FoldingSetNodeID ID;
4837 FunctionNoProtoType::Profile(ID, ResultType: ResultTy, Info);
4838
4839 void *InsertPos = nullptr;
4840 if (FunctionNoProtoType *FT =
4841 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
4842 return QualType(FT, 0);
4843
4844 QualType Canonical;
4845 if (!isCanonicalResultType(T: ResultTy)) {
4846 Canonical =
4847 getFunctionNoProtoType(ResultTy: getCanonicalFunctionResultType(ResultType: ResultTy), Info);
4848
4849 // Get the new insert position for the node we care about.
4850 FunctionNoProtoType *NewIP =
4851 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
4852 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4853 }
4854
4855 auto *New = new (*this, alignof(FunctionNoProtoType))
4856 FunctionNoProtoType(ResultTy, Canonical, Info);
4857 Types.push_back(Elt: New);
4858 FunctionNoProtoTypes.InsertNode(N: New, InsertPos);
4859 return QualType(New, 0);
4860}
4861
4862CanQualType
4863ASTContext::getCanonicalFunctionResultType(QualType ResultType) const {
4864 CanQualType CanResultType = getCanonicalType(T: ResultType);
4865
4866 // Canonical result types do not have ARC lifetime qualifiers.
4867 if (CanResultType.getQualifiers().hasObjCLifetime()) {
4868 Qualifiers Qs = CanResultType.getQualifiers();
4869 Qs.removeObjCLifetime();
4870 return CanQualType::CreateUnsafe(
4871 Other: getQualifiedType(T: CanResultType.getUnqualifiedType(), Qs));
4872 }
4873
4874 return CanResultType;
4875}
4876
4877static bool isCanonicalExceptionSpecification(
4878 const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) {
4879 if (ESI.Type == EST_None)
4880 return true;
4881 if (!NoexceptInType)
4882 return false;
4883
4884 // C++17 onwards: exception specification is part of the type, as a simple
4885 // boolean "can this function type throw".
4886 if (ESI.Type == EST_BasicNoexcept)
4887 return true;
4888
4889 // A noexcept(expr) specification is (possibly) canonical if expr is
4890 // value-dependent.
4891 if (ESI.Type == EST_DependentNoexcept)
4892 return true;
4893
4894 // A dynamic exception specification is canonical if it only contains pack
4895 // expansions (so we can't tell whether it's non-throwing) and all its
4896 // contained types are canonical.
4897 if (ESI.Type == EST_Dynamic) {
4898 bool AnyPackExpansions = false;
4899 for (QualType ET : ESI.Exceptions) {
4900 if (!ET.isCanonical())
4901 return false;
4902 if (ET->getAs<PackExpansionType>())
4903 AnyPackExpansions = true;
4904 }
4905 return AnyPackExpansions;
4906 }
4907
4908 return false;
4909}
4910
4911QualType ASTContext::getFunctionTypeInternal(
4912 QualType ResultTy, ArrayRef<QualType> ArgArray,
4913 const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const {
4914 size_t NumArgs = ArgArray.size();
4915
4916 // Unique functions, to guarantee there is only one function of a particular
4917 // structure.
4918 llvm::FoldingSetNodeID ID;
4919 FunctionProtoType::Profile(ID, Result: ResultTy, ArgTys: ArgArray.begin(), NumArgs, EPI,
4920 Context: *this, Canonical: true);
4921
4922 QualType Canonical;
4923 bool Unique = false;
4924
4925 void *InsertPos = nullptr;
4926 if (FunctionProtoType *FPT =
4927 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) {
4928 QualType Existing = QualType(FPT, 0);
4929
4930 // If we find a pre-existing equivalent FunctionProtoType, we can just reuse
4931 // it so long as our exception specification doesn't contain a dependent
4932 // noexcept expression, or we're just looking for a canonical type.
4933 // Otherwise, we're going to need to create a type
4934 // sugar node to hold the concrete expression.
4935 if (OnlyWantCanonical || !isComputedNoexcept(ESpecType: EPI.ExceptionSpec.Type) ||
4936 EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr())
4937 return Existing;
4938
4939 // We need a new type sugar node for this one, to hold the new noexcept
4940 // expression. We do no canonicalization here, but that's OK since we don't
4941 // expect to see the same noexcept expression much more than once.
4942 Canonical = getCanonicalType(T: Existing);
4943 Unique = true;
4944 }
4945
4946 bool NoexceptInType = getLangOpts().CPlusPlus17;
4947 bool IsCanonicalExceptionSpec =
4948 isCanonicalExceptionSpecification(ESI: EPI.ExceptionSpec, NoexceptInType);
4949
4950 // Determine whether the type being created is already canonical or not.
4951 bool isCanonical = !Unique && IsCanonicalExceptionSpec &&
4952 isCanonicalResultType(T: ResultTy) && !EPI.HasTrailingReturn;
4953 for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
4954 if (!ArgArray[i].isCanonicalAsParam())
4955 isCanonical = false;
4956
4957 if (OnlyWantCanonical)
4958 assert(isCanonical &&
4959 "given non-canonical parameters constructing canonical type");
4960
4961 // If this type isn't canonical, get the canonical version of it if we don't
4962 // already have it. The exception spec is only partially part of the
4963 // canonical type, and only in C++17 onwards.
4964 if (!isCanonical && Canonical.isNull()) {
4965 SmallVector<QualType, 16> CanonicalArgs;
4966 CanonicalArgs.reserve(N: NumArgs);
4967 for (unsigned i = 0; i != NumArgs; ++i)
4968 CanonicalArgs.push_back(Elt: getCanonicalParamType(T: ArgArray[i]));
4969
4970 llvm::SmallVector<QualType, 8> ExceptionTypeStorage;
4971 FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI;
4972 CanonicalEPI.HasTrailingReturn = false;
4973
4974 if (IsCanonicalExceptionSpec) {
4975 // Exception spec is already OK.
4976 } else if (NoexceptInType) {
4977 switch (EPI.ExceptionSpec.Type) {
4978 case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated:
4979 // We don't know yet. It shouldn't matter what we pick here; no-one
4980 // should ever look at this.
4981 [[fallthrough]];
4982 case EST_None: case EST_MSAny: case EST_NoexceptFalse:
4983 CanonicalEPI.ExceptionSpec.Type = EST_None;
4984 break;
4985
4986 // A dynamic exception specification is almost always "not noexcept",
4987 // with the exception that a pack expansion might expand to no types.
4988 case EST_Dynamic: {
4989 bool AnyPacks = false;
4990 for (QualType ET : EPI.ExceptionSpec.Exceptions) {
4991 if (ET->getAs<PackExpansionType>())
4992 AnyPacks = true;
4993 ExceptionTypeStorage.push_back(Elt: getCanonicalType(T: ET));
4994 }
4995 if (!AnyPacks)
4996 CanonicalEPI.ExceptionSpec.Type = EST_None;
4997 else {
4998 CanonicalEPI.ExceptionSpec.Type = EST_Dynamic;
4999 CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage;
5000 }
5001 break;
5002 }
5003
5004 case EST_DynamicNone:
5005 case EST_BasicNoexcept:
5006 case EST_NoexceptTrue:
5007 case EST_NoThrow:
5008 CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept;
5009 break;
5010
5011 case EST_DependentNoexcept:
5012 llvm_unreachable("dependent noexcept is already canonical");
5013 }
5014 } else {
5015 CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo();
5016 }
5017
5018 // Adjust the canonical function result type.
5019 CanQualType CanResultTy = getCanonicalFunctionResultType(ResultType: ResultTy);
5020 Canonical =
5021 getFunctionTypeInternal(ResultTy: CanResultTy, ArgArray: CanonicalArgs, EPI: CanonicalEPI, OnlyWantCanonical: true);
5022
5023 // Get the new insert position for the node we care about.
5024 FunctionProtoType *NewIP =
5025 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
5026 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
5027 }
5028
5029 // Compute the needed size to hold this FunctionProtoType and the
5030 // various trailing objects.
5031 auto ESH = FunctionProtoType::getExceptionSpecSize(
5032 EST: EPI.ExceptionSpec.Type, NumExceptions: EPI.ExceptionSpec.Exceptions.size());
5033 size_t Size = FunctionProtoType::totalSizeToAlloc<
5034 QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields,
5035 FunctionType::FunctionTypeExtraAttributeInfo,
5036 FunctionType::FunctionTypeArmAttributes, FunctionType::ExceptionType,
5037 Expr *, FunctionDecl *, FunctionProtoType::ExtParameterInfo, Qualifiers,
5038 FunctionEffect, EffectConditionExpr>(
5039 Counts: NumArgs, Counts: EPI.Variadic, Counts: EPI.requiresFunctionProtoTypeExtraBitfields(),
5040 Counts: EPI.requiresFunctionProtoTypeExtraAttributeInfo(),
5041 Counts: EPI.requiresFunctionProtoTypeArmAttributes(), Counts: ESH.NumExceptionType,
5042 Counts: ESH.NumExprPtr, Counts: ESH.NumFunctionDeclPtr,
5043 Counts: EPI.ExtParameterInfos ? NumArgs : 0,
5044 Counts: EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0, Counts: EPI.FunctionEffects.size(),
5045 Counts: EPI.FunctionEffects.conditions().size());
5046
5047 auto *FTP = (FunctionProtoType *)Allocate(Size, Align: alignof(FunctionProtoType));
5048 FunctionProtoType::ExtProtoInfo newEPI = EPI;
5049 new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI);
5050 Types.push_back(Elt: FTP);
5051 if (!Unique)
5052 FunctionProtoTypes.InsertNode(N: FTP, InsertPos);
5053 if (!EPI.FunctionEffects.empty())
5054 AnyFunctionEffects = true;
5055 return QualType(FTP, 0);
5056}
5057
5058QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const {
5059 llvm::FoldingSetNodeID ID;
5060 PipeType::Profile(ID, T, isRead: ReadOnly);
5061
5062 void *InsertPos = nullptr;
5063 if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos))
5064 return QualType(PT, 0);
5065
5066 // If the pipe element type isn't canonical, this won't be a canonical type
5067 // either, so fill in the canonical type field.
5068 QualType Canonical;
5069 if (!T.isCanonical()) {
5070 Canonical = getPipeType(T: getCanonicalType(T), ReadOnly);
5071
5072 // Get the new insert position for the node we care about.
5073 PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos);
5074 assert(!NewIP && "Shouldn't be in the map!");
5075 (void)NewIP;
5076 }
5077 auto *New = new (*this, alignof(PipeType)) PipeType(T, Canonical, ReadOnly);
5078 Types.push_back(Elt: New);
5079 PipeTypes.InsertNode(N: New, InsertPos);
5080 return QualType(New, 0);
5081}
5082
5083QualType ASTContext::adjustStringLiteralBaseType(QualType Ty) const {
5084 // OpenCL v1.1 s6.5.3: a string literal is in the constant address space.
5085 return LangOpts.OpenCL ? getAddrSpaceQualType(T: Ty, AddressSpace: LangAS::opencl_constant)
5086 : Ty;
5087}
5088
5089QualType ASTContext::getReadPipeType(QualType T) const {
5090 return getPipeType(T, ReadOnly: true);
5091}
5092
5093QualType ASTContext::getWritePipeType(QualType T) const {
5094 return getPipeType(T, ReadOnly: false);
5095}
5096
5097QualType ASTContext::getBitIntType(bool IsUnsigned, unsigned NumBits) const {
5098 llvm::FoldingSetNodeID ID;
5099 BitIntType::Profile(ID, IsUnsigned, NumBits);
5100
5101 void *InsertPos = nullptr;
5102 if (BitIntType *EIT = BitIntTypes.FindNodeOrInsertPos(ID, InsertPos))
5103 return QualType(EIT, 0);
5104
5105 auto *New = new (*this, alignof(BitIntType)) BitIntType(IsUnsigned, NumBits);
5106 BitIntTypes.InsertNode(N: New, InsertPos);
5107 Types.push_back(Elt: New);
5108 return QualType(New, 0);
5109}
5110
5111QualType ASTContext::getDependentBitIntType(bool IsUnsigned,
5112 Expr *NumBitsExpr) const {
5113 assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent");
5114 llvm::FoldingSetNodeID ID;
5115 DependentBitIntType::Profile(ID, Context: *this, IsUnsigned, NumBitsExpr);
5116
5117 void *InsertPos = nullptr;
5118 if (DependentBitIntType *Existing =
5119 DependentBitIntTypes.FindNodeOrInsertPos(ID, InsertPos))
5120 return QualType(Existing, 0);
5121
5122 auto *New = new (*this, alignof(DependentBitIntType))
5123 DependentBitIntType(IsUnsigned, NumBitsExpr);
5124 DependentBitIntTypes.InsertNode(N: New, InsertPos);
5125
5126 Types.push_back(Elt: New);
5127 return QualType(New, 0);
5128}
5129
5130QualType
5131ASTContext::getPredefinedSugarType(PredefinedSugarType::Kind KD) const {
5132 using Kind = PredefinedSugarType::Kind;
5133
5134 if (auto *Target = PredefinedSugarTypes[llvm::to_underlying(E: KD)];
5135 Target != nullptr)
5136 return QualType(Target, 0);
5137
5138 auto getCanonicalType = [](const ASTContext &Ctx, Kind KDI) -> QualType {
5139 switch (KDI) {
5140 // size_t (C99TC3 6.5.3.4), signed size_t (C++23 5.13.2) and
5141 // ptrdiff_t (C99TC3 6.5.6) Although these types are not built-in, they
5142 // are part of the core language and are widely used. Using
5143 // PredefinedSugarType makes these types as named sugar types rather than
5144 // standard integer types, enabling better hints and diagnostics.
5145 case Kind::SizeT:
5146 return Ctx.getFromTargetType(Type: Ctx.Target->getSizeType());
5147 case Kind::SignedSizeT:
5148 return Ctx.getFromTargetType(Type: Ctx.Target->getSignedSizeType());
5149 case Kind::PtrdiffT:
5150 return Ctx.getFromTargetType(Type: Ctx.Target->getPtrDiffType(AddrSpace: LangAS::Default));
5151 }
5152 llvm_unreachable("unexpected kind");
5153 };
5154 auto *New = new (*this, alignof(PredefinedSugarType))
5155 PredefinedSugarType(KD, &Idents.get(Name: PredefinedSugarType::getName(KD)),
5156 getCanonicalType(*this, static_cast<Kind>(KD)));
5157 Types.push_back(Elt: New);
5158 PredefinedSugarTypes[llvm::to_underlying(E: KD)] = New;
5159 return QualType(New, 0);
5160}
5161
5162QualType ASTContext::getTypeDeclType(ElaboratedTypeKeyword Keyword,
5163 NestedNameSpecifier Qualifier,
5164 const TypeDecl *Decl) const {
5165 if (auto *Tag = dyn_cast<TagDecl>(Val: Decl))
5166 return getTagType(Keyword, Qualifier, TD: Tag,
5167 /*OwnsTag=*/false);
5168 if (auto *Typedef = dyn_cast<TypedefNameDecl>(Val: Decl))
5169 return getTypedefType(Keyword, Qualifier, Decl: Typedef);
5170 if (auto *UD = dyn_cast<UnresolvedUsingTypenameDecl>(Val: Decl))
5171 return getUnresolvedUsingType(Keyword, Qualifier, D: UD);
5172
5173 assert(Keyword == ElaboratedTypeKeyword::None);
5174 assert(!Qualifier);
5175 return QualType(Decl->TypeForDecl, 0);
5176}
5177
5178CanQualType ASTContext::getCanonicalTypeDeclType(const TypeDecl *TD) const {
5179 if (auto *Tag = dyn_cast<TagDecl>(Val: TD))
5180 return getCanonicalTagType(TD: Tag);
5181 if (auto *TN = dyn_cast<TypedefNameDecl>(Val: TD))
5182 return getCanonicalType(T: TN->getUnderlyingType());
5183 if (const auto *UD = dyn_cast<UnresolvedUsingTypenameDecl>(Val: TD))
5184 return getCanonicalUnresolvedUsingType(D: UD);
5185 assert(TD->TypeForDecl);
5186 return TD->TypeForDecl->getCanonicalTypeUnqualified();
5187}
5188
5189QualType ASTContext::getTypeDeclType(const TypeDecl *Decl) const {
5190 if (const auto *TD = dyn_cast<TagDecl>(Val: Decl))
5191 return getCanonicalTagType(TD);
5192 if (const auto *TD = dyn_cast<TypedefNameDecl>(Val: Decl);
5193 isa_and_nonnull<TypedefDecl, TypeAliasDecl>(Val: TD))
5194 return getTypedefType(Keyword: ElaboratedTypeKeyword::None,
5195 /*Qualifier=*/std::nullopt, Decl: TD);
5196 if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Val: Decl))
5197 return getCanonicalUnresolvedUsingType(D: Using);
5198
5199 assert(Decl->TypeForDecl);
5200 return QualType(Decl->TypeForDecl, 0);
5201}
5202
5203/// getTypedefType - Return the unique reference to the type for the
5204/// specified typedef name decl.
5205QualType
5206ASTContext::getTypedefType(ElaboratedTypeKeyword Keyword,
5207 NestedNameSpecifier Qualifier,
5208 const TypedefNameDecl *Decl, QualType UnderlyingType,
5209 std::optional<bool> TypeMatchesDeclOrNone) const {
5210 if (!TypeMatchesDeclOrNone) {
5211 QualType DeclUnderlyingType = Decl->getUnderlyingType();
5212 assert(!DeclUnderlyingType.isNull());
5213 if (UnderlyingType.isNull())
5214 UnderlyingType = DeclUnderlyingType;
5215 else
5216 assert(hasSameType(UnderlyingType, DeclUnderlyingType));
5217 TypeMatchesDeclOrNone = UnderlyingType == DeclUnderlyingType;
5218 } else {
5219 // FIXME: This is a workaround for a serialization cycle: assume the decl
5220 // underlying type is not available; don't touch it.
5221 assert(!UnderlyingType.isNull());
5222 }
5223
5224 if (Keyword == ElaboratedTypeKeyword::None && !Qualifier &&
5225 *TypeMatchesDeclOrNone) {
5226 if (Decl->TypeForDecl)
5227 return QualType(Decl->TypeForDecl, 0);
5228
5229 auto *NewType = new (*this, alignof(TypedefType))
5230 TypedefType(Type::Typedef, Keyword, Qualifier, Decl, UnderlyingType,
5231 !*TypeMatchesDeclOrNone);
5232
5233 Types.push_back(Elt: NewType);
5234 Decl->TypeForDecl = NewType;
5235 return QualType(NewType, 0);
5236 }
5237
5238 llvm::FoldingSetNodeID ID;
5239 TypedefType::Profile(ID, Keyword, Qualifier, Decl,
5240 Underlying: *TypeMatchesDeclOrNone ? QualType() : UnderlyingType);
5241
5242 void *InsertPos = nullptr;
5243 if (FoldingSetPlaceholder<TypedefType> *Placeholder =
5244 TypedefTypes.FindNodeOrInsertPos(ID, InsertPos))
5245 return QualType(Placeholder->getType(), 0);
5246
5247 void *Mem =
5248 Allocate(Size: TypedefType::totalSizeToAlloc<FoldingSetPlaceholder<TypedefType>,
5249 NestedNameSpecifier, QualType>(
5250 Counts: 1, Counts: !!Qualifier, Counts: !*TypeMatchesDeclOrNone),
5251 Align: alignof(TypedefType));
5252 auto *NewType =
5253 new (Mem) TypedefType(Type::Typedef, Keyword, Qualifier, Decl,
5254 UnderlyingType, !*TypeMatchesDeclOrNone);
5255 auto *Placeholder = new (NewType->getFoldingSetPlaceholder())
5256 FoldingSetPlaceholder<TypedefType>();
5257 TypedefTypes.InsertNode(N: Placeholder, InsertPos);
5258 Types.push_back(Elt: NewType);
5259 return QualType(NewType, 0);
5260}
5261
5262QualType ASTContext::getUsingType(ElaboratedTypeKeyword Keyword,
5263 NestedNameSpecifier Qualifier,
5264 const UsingShadowDecl *D,
5265 QualType UnderlyingType) const {
5266 // FIXME: This is expensive to compute every time!
5267 if (UnderlyingType.isNull()) {
5268 const auto *UD = cast<UsingDecl>(Val: D->getIntroducer());
5269 UnderlyingType =
5270 getTypeDeclType(Keyword: UD->hasTypename() ? ElaboratedTypeKeyword::Typename
5271 : ElaboratedTypeKeyword::None,
5272 Qualifier: UD->getQualifier(), Decl: cast<TypeDecl>(Val: D->getTargetDecl()));
5273 }
5274
5275 llvm::FoldingSetNodeID ID;
5276 UsingType::Profile(ID, Keyword, Qualifier, D, UnderlyingType);
5277
5278 void *InsertPos = nullptr;
5279 if (const UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos))
5280 return QualType(T, 0);
5281
5282 assert(!UnderlyingType.hasLocalQualifiers());
5283
5284 assert(
5285 hasSameType(getCanonicalTypeDeclType(cast<TypeDecl>(D->getTargetDecl())),
5286 UnderlyingType));
5287
5288 void *Mem =
5289 Allocate(Size: UsingType::totalSizeToAlloc<NestedNameSpecifier>(Counts: !!Qualifier),
5290 Align: alignof(UsingType));
5291 UsingType *T = new (Mem) UsingType(Keyword, Qualifier, D, UnderlyingType);
5292 Types.push_back(Elt: T);
5293 UsingTypes.InsertNode(N: T, InsertPos);
5294 return QualType(T, 0);
5295}
5296
5297TagType *ASTContext::getTagTypeInternal(ElaboratedTypeKeyword Keyword,
5298 NestedNameSpecifier Qualifier,
5299 const TagDecl *TD, bool OwnsTag,
5300 bool IsInjected,
5301 const Type *CanonicalType,
5302 bool WithFoldingSetNode) const {
5303 auto [TC, Size] = [&] {
5304 switch (TD->getDeclKind()) {
5305 case Decl::Enum:
5306 static_assert(alignof(EnumType) == alignof(TagType));
5307 return std::make_tuple(args: Type::Enum, args: sizeof(EnumType));
5308 case Decl::ClassTemplatePartialSpecialization:
5309 case Decl::ClassTemplateSpecialization:
5310 case Decl::CXXRecord:
5311 static_assert(alignof(RecordType) == alignof(TagType));
5312 static_assert(alignof(InjectedClassNameType) == alignof(TagType));
5313 if (cast<CXXRecordDecl>(Val: TD)->hasInjectedClassType())
5314 return std::make_tuple(args: Type::InjectedClassName,
5315 args: sizeof(InjectedClassNameType));
5316 [[fallthrough]];
5317 case Decl::Record:
5318 return std::make_tuple(args: Type::Record, args: sizeof(RecordType));
5319 default:
5320 llvm_unreachable("unexpected decl kind");
5321 }
5322 }();
5323
5324 if (Qualifier) {
5325 static_assert(alignof(NestedNameSpecifier) <= alignof(TagType));
5326 Size = llvm::alignTo(Value: Size, Align: alignof(NestedNameSpecifier)) +
5327 sizeof(NestedNameSpecifier);
5328 }
5329 void *Mem;
5330 if (WithFoldingSetNode) {
5331 // FIXME: It would be more profitable to tail allocate the folding set node
5332 // from the type, instead of the other way around, due to the greater
5333 // alignment requirements of the type. But this makes it harder to deal with
5334 // the different type node sizes. This would require either uniquing from
5335 // different folding sets, or having the folding setaccept a
5336 // contextual parameter which is not fixed at construction.
5337 Mem = Allocate(
5338 Size: sizeof(TagTypeFoldingSetPlaceholder) +
5339 TagTypeFoldingSetPlaceholder::getOffset() + Size,
5340 Align: std::max(a: alignof(TagTypeFoldingSetPlaceholder), b: alignof(TagType)));
5341 auto *T = new (Mem) TagTypeFoldingSetPlaceholder();
5342 Mem = T->getTagType();
5343 } else {
5344 Mem = Allocate(Size, Align: alignof(TagType));
5345 }
5346
5347 auto *T = [&, TC = TC]() -> TagType * {
5348 switch (TC) {
5349 case Type::Enum: {
5350 assert(isa<EnumDecl>(TD));
5351 auto *T = new (Mem) EnumType(TC, Keyword, Qualifier, TD, OwnsTag,
5352 IsInjected, CanonicalType);
5353 assert(reinterpret_cast<void *>(T) ==
5354 reinterpret_cast<void *>(static_cast<TagType *>(T)) &&
5355 "TagType must be the first base of EnumType");
5356 return T;
5357 }
5358 case Type::Record: {
5359 assert(isa<RecordDecl>(TD));
5360 auto *T = new (Mem) RecordType(TC, Keyword, Qualifier, TD, OwnsTag,
5361 IsInjected, CanonicalType);
5362 assert(reinterpret_cast<void *>(T) ==
5363 reinterpret_cast<void *>(static_cast<TagType *>(T)) &&
5364 "TagType must be the first base of RecordType");
5365 return T;
5366 }
5367 case Type::InjectedClassName: {
5368 auto *T = new (Mem) InjectedClassNameType(Keyword, Qualifier, TD,
5369 IsInjected, CanonicalType);
5370 assert(reinterpret_cast<void *>(T) ==
5371 reinterpret_cast<void *>(static_cast<TagType *>(T)) &&
5372 "TagType must be the first base of InjectedClassNameType");
5373 return T;
5374 }
5375 default:
5376 llvm_unreachable("unexpected type class");
5377 }
5378 }();
5379 assert(T->getKeyword() == Keyword);
5380 assert(T->getQualifier() == Qualifier);
5381 assert(T->getDecl() == TD);
5382 assert(T->isInjected() == IsInjected);
5383 assert(T->isTagOwned() == OwnsTag);
5384 assert((T->isCanonicalUnqualified()
5385 ? QualType()
5386 : T->getCanonicalTypeInternal()) == QualType(CanonicalType, 0));
5387 Types.push_back(Elt: T);
5388 return T;
5389}
5390
5391static const TagDecl *getNonInjectedClassName(const TagDecl *TD) {
5392 if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: TD);
5393 RD && RD->isInjectedClassName())
5394 return cast<TagDecl>(Val: RD->getDeclContext());
5395 return TD;
5396}
5397
5398CanQualType ASTContext::getCanonicalTagType(const TagDecl *TD) const {
5399 TD = ::getNonInjectedClassName(TD)->getCanonicalDecl();
5400 if (TD->TypeForDecl)
5401 return TD->TypeForDecl->getCanonicalTypeUnqualified();
5402
5403 const Type *CanonicalType = getTagTypeInternal(
5404 Keyword: ElaboratedTypeKeyword::None,
5405 /*Qualifier=*/std::nullopt, TD,
5406 /*OwnsTag=*/false, /*IsInjected=*/false, /*CanonicalType=*/nullptr,
5407 /*WithFoldingSetNode=*/false);
5408 TD->TypeForDecl = CanonicalType;
5409 return CanQualType::CreateUnsafe(Other: QualType(CanonicalType, 0));
5410}
5411
5412QualType ASTContext::getTagType(ElaboratedTypeKeyword Keyword,
5413 NestedNameSpecifier Qualifier,
5414 const TagDecl *TD, bool OwnsTag) const {
5415
5416 const TagDecl *NonInjectedTD = ::getNonInjectedClassName(TD);
5417 bool IsInjected = TD != NonInjectedTD;
5418
5419 ElaboratedTypeKeyword PreferredKeyword =
5420 getLangOpts().CPlusPlus ? ElaboratedTypeKeyword::None
5421 : KeywordHelpers::getKeywordForTagTypeKind(
5422 Tag: NonInjectedTD->getTagKind());
5423
5424 if (Keyword == PreferredKeyword && !Qualifier && !OwnsTag) {
5425 if (const Type *T = TD->TypeForDecl; T && !T->isCanonicalUnqualified())
5426 return QualType(T, 0);
5427
5428 const Type *CanonicalType = getCanonicalTagType(TD: NonInjectedTD).getTypePtr();
5429 const Type *T =
5430 getTagTypeInternal(Keyword,
5431 /*Qualifier=*/std::nullopt, TD: NonInjectedTD,
5432 /*OwnsTag=*/false, IsInjected, CanonicalType,
5433 /*WithFoldingSetNode=*/false);
5434 TD->TypeForDecl = T;
5435 return QualType(T, 0);
5436 }
5437
5438 llvm::FoldingSetNodeID ID;
5439 TagTypeFoldingSetPlaceholder::Profile(ID, Keyword, Qualifier, Tag: NonInjectedTD,
5440 OwnsTag, IsInjected);
5441
5442 void *InsertPos = nullptr;
5443 if (TagTypeFoldingSetPlaceholder *T =
5444 TagTypes.FindNodeOrInsertPos(ID, InsertPos))
5445 return QualType(T->getTagType(), 0);
5446
5447 const Type *CanonicalType = getCanonicalTagType(TD: NonInjectedTD).getTypePtr();
5448 TagType *T =
5449 getTagTypeInternal(Keyword, Qualifier, TD: NonInjectedTD, OwnsTag, IsInjected,
5450 CanonicalType, /*WithFoldingSetNode=*/true);
5451 TagTypes.InsertNode(N: TagTypeFoldingSetPlaceholder::fromTagType(T), InsertPos);
5452 return QualType(T, 0);
5453}
5454
5455bool ASTContext::computeBestEnumTypes(bool IsPacked, unsigned NumNegativeBits,
5456 unsigned NumPositiveBits,
5457 QualType &BestType,
5458 QualType &BestPromotionType) {
5459 unsigned IntWidth = Target->getIntWidth();
5460 unsigned CharWidth = Target->getCharWidth();
5461 unsigned ShortWidth = Target->getShortWidth();
5462 bool EnumTooLarge = false;
5463 unsigned BestWidth;
5464 if (NumNegativeBits) {
5465 // If there is a negative value, figure out the smallest integer type (of
5466 // int/long/longlong) that fits.
5467 // If it's packed, check also if it fits a char or a short.
5468 if (IsPacked && NumNegativeBits <= CharWidth &&
5469 NumPositiveBits < CharWidth) {
5470 BestType = SignedCharTy;
5471 BestWidth = CharWidth;
5472 } else if (IsPacked && NumNegativeBits <= ShortWidth &&
5473 NumPositiveBits < ShortWidth) {
5474 BestType = ShortTy;
5475 BestWidth = ShortWidth;
5476 } else if (NumNegativeBits <= IntWidth && NumPositiveBits < IntWidth) {
5477 BestType = IntTy;
5478 BestWidth = IntWidth;
5479 } else {
5480 BestWidth = Target->getLongWidth();
5481
5482 if (NumNegativeBits <= BestWidth && NumPositiveBits < BestWidth) {
5483 BestType = LongTy;
5484 } else {
5485 BestWidth = Target->getLongLongWidth();
5486
5487 if (NumNegativeBits > BestWidth || NumPositiveBits >= BestWidth)
5488 EnumTooLarge = true;
5489 BestType = LongLongTy;
5490 }
5491 }
5492 BestPromotionType = (BestWidth <= IntWidth ? IntTy : BestType);
5493 } else {
5494 // If there is no negative value, figure out the smallest type that fits
5495 // all of the enumerator values.
5496 // If it's packed, check also if it fits a char or a short.
5497 if (IsPacked && NumPositiveBits <= CharWidth) {
5498 BestType = UnsignedCharTy;
5499 BestPromotionType = IntTy;
5500 BestWidth = CharWidth;
5501 } else if (IsPacked && NumPositiveBits <= ShortWidth) {
5502 BestType = UnsignedShortTy;
5503 BestPromotionType = IntTy;
5504 BestWidth = ShortWidth;
5505 } else if (NumPositiveBits <= IntWidth) {
5506 BestType = UnsignedIntTy;
5507 BestWidth = IntWidth;
5508 BestPromotionType = (NumPositiveBits == BestWidth || !LangOpts.CPlusPlus)
5509 ? UnsignedIntTy
5510 : IntTy;
5511 } else if (NumPositiveBits <= (BestWidth = Target->getLongWidth())) {
5512 BestType = UnsignedLongTy;
5513 BestPromotionType = (NumPositiveBits == BestWidth || !LangOpts.CPlusPlus)
5514 ? UnsignedLongTy
5515 : LongTy;
5516 } else {
5517 BestWidth = Target->getLongLongWidth();
5518 if (NumPositiveBits > BestWidth) {
5519 // This can happen with bit-precise integer types, but those are not
5520 // allowed as the type for an enumerator per C23 6.7.2.2p4 and p12.
5521 // FIXME: GCC uses __int128_t and __uint128_t for cases that fit within
5522 // a 128-bit integer, we should consider doing the same.
5523 EnumTooLarge = true;
5524 }
5525 BestType = UnsignedLongLongTy;
5526 BestPromotionType = (NumPositiveBits == BestWidth || !LangOpts.CPlusPlus)
5527 ? UnsignedLongLongTy
5528 : LongLongTy;
5529 }
5530 }
5531 return EnumTooLarge;
5532}
5533
5534bool ASTContext::isRepresentableIntegerValue(llvm::APSInt &Value, QualType T) {
5535 assert((T->isIntegralType(*this) || T->isEnumeralType()) &&
5536 "Integral type required!");
5537 unsigned BitWidth = getIntWidth(T);
5538
5539 if (Value.isUnsigned() || Value.isNonNegative()) {
5540 if (T->isSignedIntegerOrEnumerationType())
5541 --BitWidth;
5542 return Value.getActiveBits() <= BitWidth;
5543 }
5544 return Value.getSignificantBits() <= BitWidth;
5545}
5546
5547UnresolvedUsingType *ASTContext::getUnresolvedUsingTypeInternal(
5548 ElaboratedTypeKeyword Keyword, NestedNameSpecifier Qualifier,
5549 const UnresolvedUsingTypenameDecl *D, void *InsertPos,
5550 const Type *CanonicalType) const {
5551 void *Mem = Allocate(
5552 Size: UnresolvedUsingType::totalSizeToAlloc<
5553 FoldingSetPlaceholder<UnresolvedUsingType>, NestedNameSpecifier>(
5554 Counts: !!InsertPos, Counts: !!Qualifier),
5555 Align: alignof(UnresolvedUsingType));
5556 auto *T = new (Mem) UnresolvedUsingType(Keyword, Qualifier, D, CanonicalType);
5557 if (InsertPos) {
5558 auto *Placeholder = new (T->getFoldingSetPlaceholder())
5559 FoldingSetPlaceholder<TypedefType>();
5560 TypedefTypes.InsertNode(N: Placeholder, InsertPos);
5561 }
5562 Types.push_back(Elt: T);
5563 return T;
5564}
5565
5566CanQualType ASTContext::getCanonicalUnresolvedUsingType(
5567 const UnresolvedUsingTypenameDecl *D) const {
5568 D = D->getCanonicalDecl();
5569 if (D->TypeForDecl)
5570 return D->TypeForDecl->getCanonicalTypeUnqualified();
5571
5572 const Type *CanonicalType = getUnresolvedUsingTypeInternal(
5573 Keyword: ElaboratedTypeKeyword::None,
5574 /*Qualifier=*/std::nullopt, D,
5575 /*InsertPos=*/nullptr, /*CanonicalType=*/nullptr);
5576 D->TypeForDecl = CanonicalType;
5577 return CanQualType::CreateUnsafe(Other: QualType(CanonicalType, 0));
5578}
5579
5580QualType
5581ASTContext::getUnresolvedUsingType(ElaboratedTypeKeyword Keyword,
5582 NestedNameSpecifier Qualifier,
5583 const UnresolvedUsingTypenameDecl *D) const {
5584 if (Keyword == ElaboratedTypeKeyword::None && !Qualifier) {
5585 if (const Type *T = D->TypeForDecl; T && !T->isCanonicalUnqualified())
5586 return QualType(T, 0);
5587
5588 const Type *CanonicalType = getCanonicalUnresolvedUsingType(D).getTypePtr();
5589 const Type *T =
5590 getUnresolvedUsingTypeInternal(Keyword: ElaboratedTypeKeyword::None,
5591 /*Qualifier=*/std::nullopt, D,
5592 /*InsertPos=*/nullptr, CanonicalType);
5593 D->TypeForDecl = T;
5594 return QualType(T, 0);
5595 }
5596
5597 llvm::FoldingSetNodeID ID;
5598 UnresolvedUsingType::Profile(ID, Keyword, Qualifier, D);
5599
5600 void *InsertPos = nullptr;
5601 if (FoldingSetPlaceholder<UnresolvedUsingType> *Placeholder =
5602 UnresolvedUsingTypes.FindNodeOrInsertPos(ID, InsertPos))
5603 return QualType(Placeholder->getType(), 0);
5604 assert(InsertPos);
5605
5606 const Type *CanonicalType = getCanonicalUnresolvedUsingType(D).getTypePtr();
5607 const Type *T = getUnresolvedUsingTypeInternal(Keyword, Qualifier, D,
5608 InsertPos, CanonicalType);
5609 return QualType(T, 0);
5610}
5611
5612QualType ASTContext::getAttributedType(attr::Kind attrKind,
5613 QualType modifiedType,
5614 QualType equivalentType,
5615 const Attr *attr) const {
5616 llvm::FoldingSetNodeID id;
5617 AttributedType::Profile(ID&: id, attrKind, modified: modifiedType, equivalent: equivalentType, attr);
5618
5619 void *insertPos = nullptr;
5620 AttributedType *type = AttributedTypes.FindNodeOrInsertPos(ID: id, InsertPos&: insertPos);
5621 if (type) return QualType(type, 0);
5622
5623 assert(!attr || attr->getKind() == attrKind);
5624
5625 QualType canon = getCanonicalType(T: equivalentType);
5626 type = new (*this, alignof(AttributedType))
5627 AttributedType(canon, attrKind, attr, modifiedType, equivalentType);
5628
5629 Types.push_back(Elt: type);
5630 AttributedTypes.InsertNode(N: type, InsertPos: insertPos);
5631
5632 return QualType(type, 0);
5633}
5634
5635QualType ASTContext::getAttributedType(const Attr *attr, QualType modifiedType,
5636 QualType equivalentType) const {
5637 return getAttributedType(attrKind: attr->getKind(), modifiedType, equivalentType, attr);
5638}
5639
5640QualType ASTContext::getAttributedType(NullabilityKind nullability,
5641 QualType modifiedType,
5642 QualType equivalentType) {
5643 switch (nullability) {
5644 case NullabilityKind::NonNull:
5645 return getAttributedType(attrKind: attr::TypeNonNull, modifiedType, equivalentType);
5646
5647 case NullabilityKind::Nullable:
5648 return getAttributedType(attrKind: attr::TypeNullable, modifiedType, equivalentType);
5649
5650 case NullabilityKind::NullableResult:
5651 return getAttributedType(attrKind: attr::TypeNullableResult, modifiedType,
5652 equivalentType);
5653
5654 case NullabilityKind::Unspecified:
5655 return getAttributedType(attrKind: attr::TypeNullUnspecified, modifiedType,
5656 equivalentType);
5657 }
5658
5659 llvm_unreachable("Unknown nullability kind");
5660}
5661
5662QualType ASTContext::getBTFTagAttributedType(const BTFTypeTagAttr *BTFAttr,
5663 QualType Wrapped) const {
5664 llvm::FoldingSetNodeID ID;
5665 BTFTagAttributedType::Profile(ID, Wrapped, BTFAttr);
5666
5667 void *InsertPos = nullptr;
5668 BTFTagAttributedType *Ty =
5669 BTFTagAttributedTypes.FindNodeOrInsertPos(ID, InsertPos);
5670 if (Ty)
5671 return QualType(Ty, 0);
5672
5673 QualType Canon = getCanonicalType(T: Wrapped);
5674 Ty = new (*this, alignof(BTFTagAttributedType))
5675 BTFTagAttributedType(Canon, Wrapped, BTFAttr);
5676
5677 Types.push_back(Elt: Ty);
5678 BTFTagAttributedTypes.InsertNode(N: Ty, InsertPos);
5679
5680 return QualType(Ty, 0);
5681}
5682
5683QualType ASTContext::getHLSLAttributedResourceType(
5684 QualType Wrapped, QualType Contained,
5685 const HLSLAttributedResourceType::Attributes &Attrs) {
5686
5687 llvm::FoldingSetNodeID ID;
5688 HLSLAttributedResourceType::Profile(ID, Wrapped, Contained, Attrs);
5689
5690 void *InsertPos = nullptr;
5691 HLSLAttributedResourceType *Ty =
5692 HLSLAttributedResourceTypes.FindNodeOrInsertPos(ID, InsertPos);
5693 if (Ty)
5694 return QualType(Ty, 0);
5695
5696 Ty = new (*this, alignof(HLSLAttributedResourceType))
5697 HLSLAttributedResourceType(Wrapped, Contained, Attrs);
5698
5699 Types.push_back(Elt: Ty);
5700 HLSLAttributedResourceTypes.InsertNode(N: Ty, InsertPos);
5701
5702 return QualType(Ty, 0);
5703}
5704
5705QualType ASTContext::getHLSLInlineSpirvType(uint32_t Opcode, uint32_t Size,
5706 uint32_t Alignment,
5707 ArrayRef<SpirvOperand> Operands) {
5708 llvm::FoldingSetNodeID ID;
5709 HLSLInlineSpirvType::Profile(ID, Opcode, Size, Alignment, Operands);
5710
5711 void *InsertPos = nullptr;
5712 HLSLInlineSpirvType *Ty =
5713 HLSLInlineSpirvTypes.FindNodeOrInsertPos(ID, InsertPos);
5714 if (Ty)
5715 return QualType(Ty, 0);
5716
5717 void *Mem = Allocate(
5718 Size: HLSLInlineSpirvType::totalSizeToAlloc<SpirvOperand>(Counts: Operands.size()),
5719 Align: alignof(HLSLInlineSpirvType));
5720
5721 Ty = new (Mem) HLSLInlineSpirvType(Opcode, Size, Alignment, Operands);
5722
5723 Types.push_back(Elt: Ty);
5724 HLSLInlineSpirvTypes.InsertNode(N: Ty, InsertPos);
5725
5726 return QualType(Ty, 0);
5727}
5728
5729/// Retrieve a substitution-result type.
5730QualType ASTContext::getSubstTemplateTypeParmType(QualType Replacement,
5731 Decl *AssociatedDecl,
5732 unsigned Index,
5733 UnsignedOrNone PackIndex,
5734 bool Final) const {
5735 llvm::FoldingSetNodeID ID;
5736 SubstTemplateTypeParmType::Profile(ID, Replacement, AssociatedDecl, Index,
5737 PackIndex, Final);
5738 void *InsertPos = nullptr;
5739 SubstTemplateTypeParmType *SubstParm =
5740 SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
5741
5742 if (!SubstParm) {
5743 void *Mem = Allocate(Size: SubstTemplateTypeParmType::totalSizeToAlloc<QualType>(
5744 Counts: !Replacement.isCanonical()),
5745 Align: alignof(SubstTemplateTypeParmType));
5746 SubstParm = new (Mem) SubstTemplateTypeParmType(Replacement, AssociatedDecl,
5747 Index, PackIndex, Final);
5748 Types.push_back(Elt: SubstParm);
5749 SubstTemplateTypeParmTypes.InsertNode(N: SubstParm, InsertPos);
5750 }
5751
5752 return QualType(SubstParm, 0);
5753}
5754
5755QualType
5756ASTContext::getSubstTemplateTypeParmPackType(Decl *AssociatedDecl,
5757 unsigned Index, bool Final,
5758 const TemplateArgument &ArgPack) {
5759#ifndef NDEBUG
5760 for (const auto &P : ArgPack.pack_elements())
5761 assert(P.getKind() == TemplateArgument::Type && "Pack contains a non-type");
5762#endif
5763
5764 llvm::FoldingSetNodeID ID;
5765 SubstTemplateTypeParmPackType::Profile(ID, AssociatedDecl, Index, Final,
5766 ArgPack);
5767 void *InsertPos = nullptr;
5768 if (SubstTemplateTypeParmPackType *SubstParm =
5769 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos))
5770 return QualType(SubstParm, 0);
5771
5772 QualType Canon;
5773 {
5774 TemplateArgument CanonArgPack = getCanonicalTemplateArgument(Arg: ArgPack);
5775 if (!AssociatedDecl->isCanonicalDecl() ||
5776 !CanonArgPack.structurallyEquals(Other: ArgPack)) {
5777 Canon = getSubstTemplateTypeParmPackType(
5778 AssociatedDecl: AssociatedDecl->getCanonicalDecl(), Index, Final, ArgPack: CanonArgPack);
5779 [[maybe_unused]] const auto *Nothing =
5780 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos);
5781 assert(!Nothing);
5782 }
5783 }
5784
5785 auto *SubstParm = new (*this, alignof(SubstTemplateTypeParmPackType))
5786 SubstTemplateTypeParmPackType(Canon, AssociatedDecl, Index, Final,
5787 ArgPack);
5788 Types.push_back(Elt: SubstParm);
5789 SubstTemplateTypeParmPackTypes.InsertNode(N: SubstParm, InsertPos);
5790 return QualType(SubstParm, 0);
5791}
5792
5793QualType
5794ASTContext::getSubstBuiltinTemplatePack(const TemplateArgument &ArgPack) {
5795 assert(llvm::all_of(ArgPack.pack_elements(),
5796 [](const auto &P) {
5797 return P.getKind() == TemplateArgument::Type;
5798 }) &&
5799 "Pack contains a non-type");
5800
5801 llvm::FoldingSetNodeID ID;
5802 SubstBuiltinTemplatePackType::Profile(ID, ArgPack);
5803
5804 void *InsertPos = nullptr;
5805 if (auto *T =
5806 SubstBuiltinTemplatePackTypes.FindNodeOrInsertPos(ID, InsertPos))
5807 return QualType(T, 0);
5808
5809 QualType Canon;
5810 TemplateArgument CanonArgPack = getCanonicalTemplateArgument(Arg: ArgPack);
5811 if (!CanonArgPack.structurallyEquals(Other: ArgPack)) {
5812 Canon = getSubstBuiltinTemplatePack(ArgPack: CanonArgPack);
5813 // Refresh InsertPos, in case the recursive call above caused rehashing,
5814 // which would invalidate the bucket pointer.
5815 [[maybe_unused]] const auto *Nothing =
5816 SubstBuiltinTemplatePackTypes.FindNodeOrInsertPos(ID, InsertPos);
5817 assert(!Nothing);
5818 }
5819
5820 auto *PackType = new (*this, alignof(SubstBuiltinTemplatePackType))
5821 SubstBuiltinTemplatePackType(Canon, ArgPack);
5822 Types.push_back(Elt: PackType);
5823 SubstBuiltinTemplatePackTypes.InsertNode(N: PackType, InsertPos);
5824 return QualType(PackType, 0);
5825}
5826
5827/// Retrieve the template type parameter type for a template
5828/// parameter or parameter pack with the given depth, index, and (optionally)
5829/// name.
5830QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index,
5831 bool ParameterPack,
5832 TemplateTypeParmDecl *TTPDecl) const {
5833 llvm::FoldingSetNodeID ID;
5834 TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl);
5835 void *InsertPos = nullptr;
5836 TemplateTypeParmType *TypeParm
5837 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
5838
5839 if (TypeParm)
5840 return QualType(TypeParm, 0);
5841
5842 if (TTPDecl) {
5843 QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack);
5844 TypeParm = new (*this, alignof(TemplateTypeParmType))
5845 TemplateTypeParmType(Depth, Index, ParameterPack, TTPDecl, Canon);
5846
5847 TemplateTypeParmType *TypeCheck
5848 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
5849 assert(!TypeCheck && "Template type parameter canonical type broken");
5850 (void)TypeCheck;
5851 } else
5852 TypeParm = new (*this, alignof(TemplateTypeParmType)) TemplateTypeParmType(
5853 Depth, Index, ParameterPack, /*TTPDecl=*/nullptr, /*Canon=*/QualType());
5854
5855 Types.push_back(Elt: TypeParm);
5856 TemplateTypeParmTypes.InsertNode(N: TypeParm, InsertPos);
5857
5858 return QualType(TypeParm, 0);
5859}
5860
5861static ElaboratedTypeKeyword
5862getCanonicalElaboratedTypeKeyword(ElaboratedTypeKeyword Keyword) {
5863 switch (Keyword) {
5864 // These are just themselves.
5865 case ElaboratedTypeKeyword::None:
5866 case ElaboratedTypeKeyword::Struct:
5867 case ElaboratedTypeKeyword::Union:
5868 case ElaboratedTypeKeyword::Enum:
5869 case ElaboratedTypeKeyword::Interface:
5870 return Keyword;
5871
5872 // These are equivalent.
5873 case ElaboratedTypeKeyword::Typename:
5874 return ElaboratedTypeKeyword::None;
5875
5876 // These are functionally equivalent, so relying on their equivalence is
5877 // IFNDR. By making them equivalent, we disallow overloading, which at least
5878 // can produce a diagnostic.
5879 case ElaboratedTypeKeyword::Class:
5880 return ElaboratedTypeKeyword::Struct;
5881 }
5882 llvm_unreachable("unexpected keyword kind");
5883}
5884
5885TypeSourceInfo *ASTContext::getTemplateSpecializationTypeInfo(
5886 ElaboratedTypeKeyword Keyword, SourceLocation ElaboratedKeywordLoc,
5887 NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKeywordLoc,
5888 TemplateName Name, SourceLocation NameLoc,
5889 const TemplateArgumentListInfo &SpecifiedArgs,
5890 ArrayRef<TemplateArgument> CanonicalArgs, QualType Underlying) const {
5891 QualType TST = getTemplateSpecializationType(
5892 Keyword, T: Name, SpecifiedArgs: SpecifiedArgs.arguments(), CanonicalArgs, Canon: Underlying);
5893
5894 TypeSourceInfo *TSI = CreateTypeSourceInfo(T: TST);
5895 TSI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>().set(
5896 ElaboratedKeywordLoc, QualifierLoc, TemplateKeywordLoc, NameLoc,
5897 TAL: SpecifiedArgs);
5898 return TSI;
5899}
5900
5901QualType ASTContext::getTemplateSpecializationType(
5902 ElaboratedTypeKeyword Keyword, TemplateName Template,
5903 ArrayRef<TemplateArgumentLoc> SpecifiedArgs,
5904 ArrayRef<TemplateArgument> CanonicalArgs, QualType Underlying) const {
5905 SmallVector<TemplateArgument, 4> SpecifiedArgVec;
5906 SpecifiedArgVec.reserve(N: SpecifiedArgs.size());
5907 for (const TemplateArgumentLoc &Arg : SpecifiedArgs)
5908 SpecifiedArgVec.push_back(Elt: Arg.getArgument());
5909
5910 return getTemplateSpecializationType(Keyword, T: Template, SpecifiedArgs: SpecifiedArgVec,
5911 CanonicalArgs, Underlying);
5912}
5913
5914[[maybe_unused]] static bool
5915hasAnyPackExpansions(ArrayRef<TemplateArgument> Args) {
5916 for (const TemplateArgument &Arg : Args)
5917 if (Arg.isPackExpansion())
5918 return true;
5919 return false;
5920}
5921
5922QualType ASTContext::getCanonicalTemplateSpecializationType(
5923 ElaboratedTypeKeyword Keyword, TemplateName Template,
5924 ArrayRef<TemplateArgument> Args) const {
5925 assert(Template ==
5926 getCanonicalTemplateName(Template, /*IgnoreDeduced=*/true));
5927 assert((Keyword == ElaboratedTypeKeyword::None ||
5928 Template.getAsDependentTemplateName()));
5929#ifndef NDEBUG
5930 for (const auto &Arg : Args)
5931 assert(Arg.structurallyEquals(getCanonicalTemplateArgument(Arg)));
5932#endif
5933
5934 llvm::FoldingSetNodeID ID;
5935 TemplateSpecializationType::Profile(ID, Keyword, T: Template, Args, Underlying: QualType(),
5936 Context: *this);
5937 void *InsertPos = nullptr;
5938 if (auto *T = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos))
5939 return QualType(T, 0);
5940
5941 void *Mem = Allocate(Size: sizeof(TemplateSpecializationType) +
5942 sizeof(TemplateArgument) * Args.size(),
5943 Align: alignof(TemplateSpecializationType));
5944 auto *Spec =
5945 new (Mem) TemplateSpecializationType(Keyword, Template,
5946 /*IsAlias=*/false, Args, QualType());
5947 assert(Spec->isDependentType() &&
5948 "canonical template specialization must be dependent");
5949 Types.push_back(Elt: Spec);
5950 TemplateSpecializationTypes.InsertNode(N: Spec, InsertPos);
5951 return QualType(Spec, 0);
5952}
5953
5954QualType ASTContext::getTemplateSpecializationType(
5955 ElaboratedTypeKeyword Keyword, TemplateName Template,
5956 ArrayRef<TemplateArgument> SpecifiedArgs,
5957 ArrayRef<TemplateArgument> CanonicalArgs, QualType Underlying) const {
5958 const auto *TD = Template.getAsTemplateDecl(/*IgnoreDeduced=*/true);
5959 bool IsTypeAlias = TD && TD->isTypeAlias();
5960 if (Underlying.isNull()) {
5961 TemplateName CanonTemplate =
5962 getCanonicalTemplateName(Name: Template, /*IgnoreDeduced=*/true);
5963 ElaboratedTypeKeyword CanonKeyword =
5964 CanonTemplate.getAsDependentTemplateName()
5965 ? getCanonicalElaboratedTypeKeyword(Keyword)
5966 : ElaboratedTypeKeyword::None;
5967 bool NonCanonical = Template != CanonTemplate || Keyword != CanonKeyword;
5968 SmallVector<TemplateArgument, 4> CanonArgsVec;
5969 if (CanonicalArgs.empty()) {
5970 CanonArgsVec = SmallVector<TemplateArgument, 4>(SpecifiedArgs);
5971 NonCanonical |= canonicalizeTemplateArguments(Args: CanonArgsVec);
5972 CanonicalArgs = CanonArgsVec;
5973 } else {
5974 NonCanonical |= !llvm::equal(
5975 LRange&: SpecifiedArgs, RRange&: CanonicalArgs,
5976 P: [](const TemplateArgument &A, const TemplateArgument &B) {
5977 return A.structurallyEquals(Other: B);
5978 });
5979 }
5980
5981 // We can get here with an alias template when the specialization
5982 // contains a pack expansion that does not match up with a parameter
5983 // pack, or a builtin template which cannot be resolved due to dependency.
5984 assert((!isa_and_nonnull<TypeAliasTemplateDecl>(TD) ||
5985 hasAnyPackExpansions(CanonicalArgs)) &&
5986 "Caller must compute aliased type");
5987 IsTypeAlias = false;
5988
5989 Underlying = getCanonicalTemplateSpecializationType(
5990 Keyword: CanonKeyword, Template: CanonTemplate, Args: CanonicalArgs);
5991 if (!NonCanonical)
5992 return Underlying;
5993 }
5994 void *Mem = Allocate(Size: sizeof(TemplateSpecializationType) +
5995 sizeof(TemplateArgument) * SpecifiedArgs.size() +
5996 (IsTypeAlias ? sizeof(QualType) : 0),
5997 Align: alignof(TemplateSpecializationType));
5998 auto *Spec = new (Mem) TemplateSpecializationType(
5999 Keyword, Template, IsTypeAlias, SpecifiedArgs, Underlying);
6000 Types.push_back(Elt: Spec);
6001 return QualType(Spec, 0);
6002}
6003
6004QualType
6005ASTContext::getParenType(QualType InnerType) const {
6006 llvm::FoldingSetNodeID ID;
6007 ParenType::Profile(ID, Inner: InnerType);
6008
6009 void *InsertPos = nullptr;
6010 ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
6011 if (T)
6012 return QualType(T, 0);
6013
6014 QualType Canon = InnerType;
6015 if (!Canon.isCanonical()) {
6016 Canon = getCanonicalType(T: InnerType);
6017 ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
6018 assert(!CheckT && "Paren canonical type broken");
6019 (void)CheckT;
6020 }
6021
6022 T = new (*this, alignof(ParenType)) ParenType(InnerType, Canon);
6023 Types.push_back(Elt: T);
6024 ParenTypes.InsertNode(N: T, InsertPos);
6025 return QualType(T, 0);
6026}
6027
6028QualType
6029ASTContext::getMacroQualifiedType(QualType UnderlyingTy,
6030 const IdentifierInfo *MacroII) const {
6031 QualType Canon = UnderlyingTy;
6032 if (!Canon.isCanonical())
6033 Canon = getCanonicalType(T: UnderlyingTy);
6034
6035 auto *newType = new (*this, alignof(MacroQualifiedType))
6036 MacroQualifiedType(UnderlyingTy, Canon, MacroII);
6037 Types.push_back(Elt: newType);
6038 return QualType(newType, 0);
6039}
6040
6041QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword,
6042 NestedNameSpecifier NNS,
6043 const IdentifierInfo *Name) const {
6044 llvm::FoldingSetNodeID ID;
6045 DependentNameType::Profile(ID, Keyword, NNS, Name);
6046
6047 void *InsertPos = nullptr;
6048 if (DependentNameType *T =
6049 DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos))
6050 return QualType(T, 0);
6051
6052 ElaboratedTypeKeyword CanonKeyword =
6053 getCanonicalElaboratedTypeKeyword(Keyword);
6054 NestedNameSpecifier CanonNNS = NNS.getCanonical();
6055
6056 QualType Canon;
6057 if (CanonKeyword != Keyword || CanonNNS != NNS) {
6058 Canon = getDependentNameType(Keyword: CanonKeyword, NNS: CanonNNS, Name);
6059 [[maybe_unused]] DependentNameType *T =
6060 DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
6061 assert(!T && "broken canonicalization");
6062 assert(Canon.isCanonical());
6063 }
6064
6065 DependentNameType *T = new (*this, alignof(DependentNameType))
6066 DependentNameType(Keyword, NNS, Name, Canon);
6067 Types.push_back(Elt: T);
6068 DependentNameTypes.InsertNode(N: T, InsertPos);
6069 return QualType(T, 0);
6070}
6071
6072TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) const {
6073 TemplateArgument Arg;
6074 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Val: Param)) {
6075 QualType ArgType = getTypeDeclType(Decl: TTP);
6076 if (TTP->isParameterPack())
6077 ArgType = getPackExpansionType(Pattern: ArgType, NumExpansions: std::nullopt);
6078
6079 Arg = TemplateArgument(ArgType);
6080 } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Val: Param)) {
6081 QualType T =
6082 NTTP->getType().getNonPackExpansionType().getNonLValueExprType(Context: *this);
6083 // For class NTTPs, ensure we include the 'const' so the type matches that
6084 // of a real template argument.
6085 // FIXME: It would be more faithful to model this as something like an
6086 // lvalue-to-rvalue conversion applied to a const-qualified lvalue.
6087 ExprValueKind VK;
6088 if (T->isRecordType()) {
6089 // C++ [temp.param]p8: An id-expression naming a non-type
6090 // template-parameter of class type T denotes a static storage duration
6091 // object of type const T.
6092 T.addConst();
6093 VK = VK_LValue;
6094 } else {
6095 VK = Expr::getValueKindForType(T: NTTP->getType());
6096 }
6097 Expr *E = new (*this)
6098 DeclRefExpr(*this, NTTP, /*RefersToEnclosingVariableOrCapture=*/false,
6099 T, VK, NTTP->getLocation());
6100
6101 if (NTTP->isParameterPack())
6102 E = new (*this) PackExpansionExpr(E, NTTP->getLocation(), std::nullopt);
6103 Arg = TemplateArgument(E, /*IsCanonical=*/false);
6104 } else {
6105 auto *TTP = cast<TemplateTemplateParmDecl>(Val: Param);
6106 TemplateName Name = getQualifiedTemplateName(
6107 /*Qualifier=*/std::nullopt, /*TemplateKeyword=*/false,
6108 Template: TemplateName(TTP));
6109 if (TTP->isParameterPack())
6110 Arg = TemplateArgument(Name, /*NumExpansions=*/std::nullopt);
6111 else
6112 Arg = TemplateArgument(Name);
6113 }
6114
6115 if (Param->isTemplateParameterPack())
6116 Arg =
6117 TemplateArgument::CreatePackCopy(Context&: const_cast<ASTContext &>(*this), Args: Arg);
6118
6119 return Arg;
6120}
6121
6122QualType ASTContext::getPackExpansionType(QualType Pattern,
6123 UnsignedOrNone NumExpansions,
6124 bool ExpectPackInType) const {
6125 assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) &&
6126 "Pack expansions must expand one or more parameter packs");
6127
6128 llvm::FoldingSetNodeID ID;
6129 PackExpansionType::Profile(ID, Pattern, NumExpansions);
6130
6131 void *InsertPos = nullptr;
6132 PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
6133 if (T)
6134 return QualType(T, 0);
6135
6136 QualType Canon;
6137 if (!Pattern.isCanonical()) {
6138 Canon = getPackExpansionType(Pattern: getCanonicalType(T: Pattern), NumExpansions,
6139 /*ExpectPackInType=*/false);
6140
6141 // Find the insert position again, in case we inserted an element into
6142 // PackExpansionTypes and invalidated our insert position.
6143 PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
6144 }
6145
6146 T = new (*this, alignof(PackExpansionType))
6147 PackExpansionType(Pattern, Canon, NumExpansions);
6148 Types.push_back(Elt: T);
6149 PackExpansionTypes.InsertNode(N: T, InsertPos);
6150 return QualType(T, 0);
6151}
6152
6153/// CmpProtocolNames - Comparison predicate for sorting protocols
6154/// alphabetically.
6155static int CmpProtocolNames(ObjCProtocolDecl *const *LHS,
6156 ObjCProtocolDecl *const *RHS) {
6157 return DeclarationName::compare(LHS: (*LHS)->getDeclName(), RHS: (*RHS)->getDeclName());
6158}
6159
6160static bool areSortedAndUniqued(ArrayRef<ObjCProtocolDecl *> Protocols) {
6161 if (Protocols.empty()) return true;
6162
6163 if (Protocols[0]->getCanonicalDecl() != Protocols[0])
6164 return false;
6165
6166 for (unsigned i = 1; i != Protocols.size(); ++i)
6167 if (CmpProtocolNames(LHS: &Protocols[i - 1], RHS: &Protocols[i]) >= 0 ||
6168 Protocols[i]->getCanonicalDecl() != Protocols[i])
6169 return false;
6170 return true;
6171}
6172
6173static void
6174SortAndUniqueProtocols(SmallVectorImpl<ObjCProtocolDecl *> &Protocols) {
6175 // Sort protocols, keyed by name.
6176 llvm::array_pod_sort(Start: Protocols.begin(), End: Protocols.end(), Compare: CmpProtocolNames);
6177
6178 // Canonicalize.
6179 for (ObjCProtocolDecl *&P : Protocols)
6180 P = P->getCanonicalDecl();
6181
6182 // Remove duplicates.
6183 auto ProtocolsEnd = llvm::unique(R&: Protocols);
6184 Protocols.erase(CS: ProtocolsEnd, CE: Protocols.end());
6185}
6186
6187QualType ASTContext::getObjCObjectType(QualType BaseType,
6188 ObjCProtocolDecl * const *Protocols,
6189 unsigned NumProtocols) const {
6190 return getObjCObjectType(Base: BaseType, typeArgs: {}, protocols: ArrayRef(Protocols, NumProtocols),
6191 /*isKindOf=*/false);
6192}
6193
6194QualType ASTContext::getObjCObjectType(
6195 QualType baseType,
6196 ArrayRef<QualType> typeArgs,
6197 ArrayRef<ObjCProtocolDecl *> protocols,
6198 bool isKindOf) const {
6199 // If the base type is an interface and there aren't any protocols or
6200 // type arguments to add, then the interface type will do just fine.
6201 if (typeArgs.empty() && protocols.empty() && !isKindOf &&
6202 isa<ObjCInterfaceType>(Val: baseType))
6203 return baseType;
6204
6205 // Look in the folding set for an existing type.
6206 llvm::FoldingSetNodeID ID;
6207 ObjCObjectTypeImpl::Profile(ID, Base: baseType, typeArgs, protocols, isKindOf);
6208 void *InsertPos = nullptr;
6209 if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos))
6210 return QualType(QT, 0);
6211
6212 // Determine the type arguments to be used for canonicalization,
6213 // which may be explicitly specified here or written on the base
6214 // type.
6215 ArrayRef<QualType> effectiveTypeArgs = typeArgs;
6216 if (effectiveTypeArgs.empty()) {
6217 if (const auto *baseObject = baseType->getAs<ObjCObjectType>())
6218 effectiveTypeArgs = baseObject->getTypeArgs();
6219 }
6220
6221 // Build the canonical type, which has the canonical base type and a
6222 // sorted-and-uniqued list of protocols and the type arguments
6223 // canonicalized.
6224 QualType canonical;
6225 bool typeArgsAreCanonical = llvm::all_of(
6226 Range&: effectiveTypeArgs, P: [&](QualType type) { return type.isCanonical(); });
6227 bool protocolsSorted = areSortedAndUniqued(Protocols: protocols);
6228 if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) {
6229 // Determine the canonical type arguments.
6230 ArrayRef<QualType> canonTypeArgs;
6231 SmallVector<QualType, 4> canonTypeArgsVec;
6232 if (!typeArgsAreCanonical) {
6233 canonTypeArgsVec.reserve(N: effectiveTypeArgs.size());
6234 for (auto typeArg : effectiveTypeArgs)
6235 canonTypeArgsVec.push_back(Elt: getCanonicalType(T: typeArg));
6236 canonTypeArgs = canonTypeArgsVec;
6237 } else {
6238 canonTypeArgs = effectiveTypeArgs;
6239 }
6240
6241 ArrayRef<ObjCProtocolDecl *> canonProtocols;
6242 SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec;
6243 if (!protocolsSorted) {
6244 canonProtocolsVec.append(in_start: protocols.begin(), in_end: protocols.end());
6245 SortAndUniqueProtocols(Protocols&: canonProtocolsVec);
6246 canonProtocols = canonProtocolsVec;
6247 } else {
6248 canonProtocols = protocols;
6249 }
6250
6251 canonical = getObjCObjectType(baseType: getCanonicalType(T: baseType), typeArgs: canonTypeArgs,
6252 protocols: canonProtocols, isKindOf);
6253
6254 // Regenerate InsertPos.
6255 ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos);
6256 }
6257
6258 unsigned size = sizeof(ObjCObjectTypeImpl);
6259 size += typeArgs.size() * sizeof(QualType);
6260 size += protocols.size() * sizeof(ObjCProtocolDecl *);
6261 void *mem = Allocate(Size: size, Align: alignof(ObjCObjectTypeImpl));
6262 auto *T =
6263 new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols,
6264 isKindOf);
6265
6266 Types.push_back(Elt: T);
6267 ObjCObjectTypes.InsertNode(N: T, InsertPos);
6268 return QualType(T, 0);
6269}
6270
6271/// Apply Objective-C protocol qualifiers to the given type.
6272/// If this is for the canonical type of a type parameter, we can apply
6273/// protocol qualifiers on the ObjCObjectPointerType.
6274QualType
6275ASTContext::applyObjCProtocolQualifiers(QualType type,
6276 ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError,
6277 bool allowOnPointerType) const {
6278 hasError = false;
6279
6280 if (const auto *objT = dyn_cast<ObjCTypeParamType>(Val: type.getTypePtr())) {
6281 return getObjCTypeParamType(Decl: objT->getDecl(), protocols);
6282 }
6283
6284 // Apply protocol qualifiers to ObjCObjectPointerType.
6285 if (allowOnPointerType) {
6286 if (const auto *objPtr =
6287 dyn_cast<ObjCObjectPointerType>(Val: type.getTypePtr())) {
6288 const ObjCObjectType *objT = objPtr->getObjectType();
6289 // Merge protocol lists and construct ObjCObjectType.
6290 SmallVector<ObjCProtocolDecl*, 8> protocolsVec;
6291 protocolsVec.append(in_start: objT->qual_begin(),
6292 in_end: objT->qual_end());
6293 protocolsVec.append(in_start: protocols.begin(), in_end: protocols.end());
6294 ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec;
6295 type = getObjCObjectType(
6296 baseType: objT->getBaseType(),
6297 typeArgs: objT->getTypeArgsAsWritten(),
6298 protocols,
6299 isKindOf: objT->isKindOfTypeAsWritten());
6300 return getObjCObjectPointerType(OIT: type);
6301 }
6302 }
6303
6304 // Apply protocol qualifiers to ObjCObjectType.
6305 if (const auto *objT = dyn_cast<ObjCObjectType>(Val: type.getTypePtr())){
6306 // FIXME: Check for protocols to which the class type is already
6307 // known to conform.
6308
6309 return getObjCObjectType(baseType: objT->getBaseType(),
6310 typeArgs: objT->getTypeArgsAsWritten(),
6311 protocols,
6312 isKindOf: objT->isKindOfTypeAsWritten());
6313 }
6314
6315 // If the canonical type is ObjCObjectType, ...
6316 if (type->isObjCObjectType()) {
6317 // Silently overwrite any existing protocol qualifiers.
6318 // TODO: determine whether that's the right thing to do.
6319
6320 // FIXME: Check for protocols to which the class type is already
6321 // known to conform.
6322 return getObjCObjectType(baseType: type, typeArgs: {}, protocols, isKindOf: false);
6323 }
6324
6325 // id<protocol-list>
6326 if (type->isObjCIdType()) {
6327 const auto *objPtr = type->castAs<ObjCObjectPointerType>();
6328 type = getObjCObjectType(baseType: ObjCBuiltinIdTy, typeArgs: {}, protocols,
6329 isKindOf: objPtr->isKindOfType());
6330 return getObjCObjectPointerType(OIT: type);
6331 }
6332
6333 // Class<protocol-list>
6334 if (type->isObjCClassType()) {
6335 const auto *objPtr = type->castAs<ObjCObjectPointerType>();
6336 type = getObjCObjectType(baseType: ObjCBuiltinClassTy, typeArgs: {}, protocols,
6337 isKindOf: objPtr->isKindOfType());
6338 return getObjCObjectPointerType(OIT: type);
6339 }
6340
6341 hasError = true;
6342 return type;
6343}
6344
6345QualType
6346ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl,
6347 ArrayRef<ObjCProtocolDecl *> protocols) const {
6348 // Look in the folding set for an existing type.
6349 llvm::FoldingSetNodeID ID;
6350 ObjCTypeParamType::Profile(ID, OTPDecl: Decl, CanonicalType: Decl->getUnderlyingType(), protocols);
6351 void *InsertPos = nullptr;
6352 if (ObjCTypeParamType *TypeParam =
6353 ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos))
6354 return QualType(TypeParam, 0);
6355
6356 // We canonicalize to the underlying type.
6357 QualType Canonical = getCanonicalType(T: Decl->getUnderlyingType());
6358 if (!protocols.empty()) {
6359 // Apply the protocol qualifers.
6360 bool hasError;
6361 Canonical = getCanonicalType(T: applyObjCProtocolQualifiers(
6362 type: Canonical, protocols, hasError, allowOnPointerType: true /*allowOnPointerType*/));
6363 assert(!hasError && "Error when apply protocol qualifier to bound type");
6364 }
6365
6366 unsigned size = sizeof(ObjCTypeParamType);
6367 size += protocols.size() * sizeof(ObjCProtocolDecl *);
6368 void *mem = Allocate(Size: size, Align: alignof(ObjCTypeParamType));
6369 auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols);
6370
6371 Types.push_back(Elt: newType);
6372 ObjCTypeParamTypes.InsertNode(N: newType, InsertPos);
6373 return QualType(newType, 0);
6374}
6375
6376void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig,
6377 ObjCTypeParamDecl *New) const {
6378 New->setTypeSourceInfo(getTrivialTypeSourceInfo(T: Orig->getUnderlyingType()));
6379 // Update TypeForDecl after updating TypeSourceInfo.
6380 auto *NewTypeParamTy = cast<ObjCTypeParamType>(Val: New->TypeForDecl);
6381 SmallVector<ObjCProtocolDecl *, 8> protocols;
6382 protocols.append(in_start: NewTypeParamTy->qual_begin(), in_end: NewTypeParamTy->qual_end());
6383 QualType UpdatedTy = getObjCTypeParamType(Decl: New, protocols);
6384 New->TypeForDecl = UpdatedTy.getTypePtr();
6385}
6386
6387/// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's
6388/// protocol list adopt all protocols in QT's qualified-id protocol
6389/// list.
6390bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT,
6391 ObjCInterfaceDecl *IC) {
6392 if (!QT->isObjCQualifiedIdType())
6393 return false;
6394
6395 if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) {
6396 // If both the right and left sides have qualifiers.
6397 for (auto *Proto : OPT->quals()) {
6398 if (!IC->ClassImplementsProtocol(lProto: Proto, lookupCategory: false))
6399 return false;
6400 }
6401 return true;
6402 }
6403 return false;
6404}
6405
6406/// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in
6407/// QT's qualified-id protocol list adopt all protocols in IDecl's list
6408/// of protocols.
6409bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT,
6410 ObjCInterfaceDecl *IDecl) {
6411 if (!QT->isObjCQualifiedIdType())
6412 return false;
6413 const auto *OPT = QT->getAs<ObjCObjectPointerType>();
6414 if (!OPT)
6415 return false;
6416 if (!IDecl->hasDefinition())
6417 return false;
6418 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols;
6419 CollectInheritedProtocols(CDecl: IDecl, Protocols&: InheritedProtocols);
6420 if (InheritedProtocols.empty())
6421 return false;
6422 // Check that if every protocol in list of id<plist> conforms to a protocol
6423 // of IDecl's, then bridge casting is ok.
6424 bool Conforms = false;
6425 for (auto *Proto : OPT->quals()) {
6426 Conforms = false;
6427 for (auto *PI : InheritedProtocols) {
6428 if (ProtocolCompatibleWithProtocol(lProto: Proto, rProto: PI)) {
6429 Conforms = true;
6430 break;
6431 }
6432 }
6433 if (!Conforms)
6434 break;
6435 }
6436 if (Conforms)
6437 return true;
6438
6439 for (auto *PI : InheritedProtocols) {
6440 // If both the right and left sides have qualifiers.
6441 bool Adopts = false;
6442 for (auto *Proto : OPT->quals()) {
6443 // return 'true' if 'PI' is in the inheritance hierarchy of Proto
6444 if ((Adopts = ProtocolCompatibleWithProtocol(lProto: PI, rProto: Proto)))
6445 break;
6446 }
6447 if (!Adopts)
6448 return false;
6449 }
6450 return true;
6451}
6452
6453/// getObjCObjectPointerType - Return a ObjCObjectPointerType type for
6454/// the given object type.
6455QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const {
6456 llvm::FoldingSetNodeID ID;
6457 ObjCObjectPointerType::Profile(ID, T: ObjectT);
6458
6459 void *InsertPos = nullptr;
6460 if (ObjCObjectPointerType *QT =
6461 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
6462 return QualType(QT, 0);
6463
6464 // Find the canonical object type.
6465 QualType Canonical;
6466 if (!ObjectT.isCanonical()) {
6467 Canonical = getObjCObjectPointerType(ObjectT: getCanonicalType(T: ObjectT));
6468
6469 // Regenerate InsertPos.
6470 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
6471 }
6472
6473 // No match.
6474 void *Mem =
6475 Allocate(Size: sizeof(ObjCObjectPointerType), Align: alignof(ObjCObjectPointerType));
6476 auto *QType =
6477 new (Mem) ObjCObjectPointerType(Canonical, ObjectT);
6478
6479 Types.push_back(Elt: QType);
6480 ObjCObjectPointerTypes.InsertNode(N: QType, InsertPos);
6481 return QualType(QType, 0);
6482}
6483
6484/// getObjCInterfaceType - Return the unique reference to the type for the
6485/// specified ObjC interface decl. The list of protocols is optional.
6486QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl,
6487 ObjCInterfaceDecl *PrevDecl) const {
6488 if (Decl->TypeForDecl)
6489 return QualType(Decl->TypeForDecl, 0);
6490
6491 if (PrevDecl) {
6492 assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl");
6493 Decl->TypeForDecl = PrevDecl->TypeForDecl;
6494 return QualType(PrevDecl->TypeForDecl, 0);
6495 }
6496
6497 // Prefer the definition, if there is one.
6498 if (const ObjCInterfaceDecl *Def = Decl->getDefinition())
6499 Decl = Def;
6500
6501 void *Mem = Allocate(Size: sizeof(ObjCInterfaceType), Align: alignof(ObjCInterfaceType));
6502 auto *T = new (Mem) ObjCInterfaceType(Decl);
6503 Decl->TypeForDecl = T;
6504 Types.push_back(Elt: T);
6505 return QualType(T, 0);
6506}
6507
6508/// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique
6509/// TypeOfExprType AST's (since expression's are never shared). For example,
6510/// multiple declarations that refer to "typeof(x)" all contain different
6511/// DeclRefExpr's. This doesn't effect the type checker, since it operates
6512/// on canonical type's (which are always unique).
6513QualType ASTContext::getTypeOfExprType(Expr *tofExpr, TypeOfKind Kind) const {
6514 TypeOfExprType *toe;
6515 if (tofExpr->isTypeDependent()) {
6516 llvm::FoldingSetNodeID ID;
6517 DependentTypeOfExprType::Profile(ID, Context: *this, E: tofExpr,
6518 IsUnqual: Kind == TypeOfKind::Unqualified);
6519
6520 void *InsertPos = nullptr;
6521 DependentTypeOfExprType *Canon =
6522 DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos);
6523 if (Canon) {
6524 // We already have a "canonical" version of an identical, dependent
6525 // typeof(expr) type. Use that as our canonical type.
6526 toe = new (*this, alignof(TypeOfExprType)) TypeOfExprType(
6527 *this, tofExpr, Kind, QualType((TypeOfExprType *)Canon, 0));
6528 } else {
6529 // Build a new, canonical typeof(expr) type.
6530 Canon = new (*this, alignof(DependentTypeOfExprType))
6531 DependentTypeOfExprType(*this, tofExpr, Kind);
6532 DependentTypeOfExprTypes.InsertNode(N: Canon, InsertPos);
6533 toe = Canon;
6534 }
6535 } else {
6536 QualType Canonical = getCanonicalType(T: tofExpr->getType());
6537 toe = new (*this, alignof(TypeOfExprType))
6538 TypeOfExprType(*this, tofExpr, Kind, Canonical);
6539 }
6540 Types.push_back(Elt: toe);
6541 return QualType(toe, 0);
6542}
6543
6544/// getTypeOfType - Unlike many "get<Type>" functions, we don't unique
6545/// TypeOfType nodes. The only motivation to unique these nodes would be
6546/// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be
6547/// an issue. This doesn't affect the type checker, since it operates
6548/// on canonical types (which are always unique).
6549QualType ASTContext::getTypeOfType(QualType tofType, TypeOfKind Kind) const {
6550 QualType Canonical = getCanonicalType(T: tofType);
6551 auto *tot = new (*this, alignof(TypeOfType))
6552 TypeOfType(*this, tofType, Canonical, Kind);
6553 Types.push_back(Elt: tot);
6554 return QualType(tot, 0);
6555}
6556
6557/// getReferenceQualifiedType - Given an expr, will return the type for
6558/// that expression, as in [dcl.type.simple]p4 but without taking id-expressions
6559/// and class member access into account.
6560QualType ASTContext::getReferenceQualifiedType(const Expr *E) const {
6561 // C++11 [dcl.type.simple]p4:
6562 // [...]
6563 QualType T = E->getType();
6564 switch (E->getValueKind()) {
6565 // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the
6566 // type of e;
6567 case VK_XValue:
6568 return getRValueReferenceType(T);
6569 // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the
6570 // type of e;
6571 case VK_LValue:
6572 return getLValueReferenceType(T);
6573 // - otherwise, decltype(e) is the type of e.
6574 case VK_PRValue:
6575 return T;
6576 }
6577 llvm_unreachable("Unknown value kind");
6578}
6579
6580/// Unlike many "get<Type>" functions, we don't unique DecltypeType
6581/// nodes. This would never be helpful, since each such type has its own
6582/// expression, and would not give a significant memory saving, since there
6583/// is an Expr tree under each such type.
6584QualType ASTContext::getDecltypeType(Expr *E, QualType UnderlyingType) const {
6585 // C++11 [temp.type]p2:
6586 // If an expression e involves a template parameter, decltype(e) denotes a
6587 // unique dependent type. Two such decltype-specifiers refer to the same
6588 // type only if their expressions are equivalent (14.5.6.1).
6589 QualType CanonType;
6590 if (!E->isInstantiationDependent()) {
6591 CanonType = getCanonicalType(T: UnderlyingType);
6592 } else if (!UnderlyingType.isNull()) {
6593 CanonType = getDecltypeType(E, UnderlyingType: QualType());
6594 } else {
6595 llvm::FoldingSetNodeID ID;
6596 DependentDecltypeType::Profile(ID, Context: *this, E);
6597
6598 void *InsertPos = nullptr;
6599 if (DependentDecltypeType *Canon =
6600 DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos))
6601 return QualType(Canon, 0);
6602
6603 // Build a new, canonical decltype(expr) type.
6604 auto *DT =
6605 new (*this, alignof(DependentDecltypeType)) DependentDecltypeType(E);
6606 DependentDecltypeTypes.InsertNode(N: DT, InsertPos);
6607 Types.push_back(Elt: DT);
6608 return QualType(DT, 0);
6609 }
6610 auto *DT = new (*this, alignof(DecltypeType))
6611 DecltypeType(E, UnderlyingType, CanonType);
6612 Types.push_back(Elt: DT);
6613 return QualType(DT, 0);
6614}
6615
6616QualType ASTContext::getPackIndexingType(QualType Pattern, Expr *IndexExpr,
6617 bool FullySubstituted,
6618 ArrayRef<QualType> Expansions,
6619 UnsignedOrNone Index) const {
6620 QualType Canonical;
6621 if (FullySubstituted && Index) {
6622 Canonical = getCanonicalType(T: Expansions[*Index]);
6623 } else {
6624 llvm::FoldingSetNodeID ID;
6625 PackIndexingType::Profile(ID, Context: *this, Pattern: Pattern.getCanonicalType(), E: IndexExpr,
6626 FullySubstituted, Expansions);
6627 void *InsertPos = nullptr;
6628 PackIndexingType *Canon =
6629 DependentPackIndexingTypes.FindNodeOrInsertPos(ID, InsertPos);
6630 if (!Canon) {
6631 void *Mem = Allocate(
6632 Size: PackIndexingType::totalSizeToAlloc<QualType>(Counts: Expansions.size()),
6633 Align: TypeAlignment);
6634 Canon =
6635 new (Mem) PackIndexingType(QualType(), Pattern.getCanonicalType(),
6636 IndexExpr, FullySubstituted, Expansions);
6637 DependentPackIndexingTypes.InsertNode(N: Canon, InsertPos);
6638 }
6639 Canonical = QualType(Canon, 0);
6640 }
6641
6642 void *Mem =
6643 Allocate(Size: PackIndexingType::totalSizeToAlloc<QualType>(Counts: Expansions.size()),
6644 Align: TypeAlignment);
6645 auto *T = new (Mem) PackIndexingType(Canonical, Pattern, IndexExpr,
6646 FullySubstituted, Expansions);
6647 Types.push_back(Elt: T);
6648 return QualType(T, 0);
6649}
6650
6651/// getUnaryTransformationType - We don't unique these, since the memory
6652/// savings are minimal and these are rare.
6653QualType
6654ASTContext::getUnaryTransformType(QualType BaseType, QualType UnderlyingType,
6655 UnaryTransformType::UTTKind Kind) const {
6656
6657 llvm::FoldingSetNodeID ID;
6658 UnaryTransformType::Profile(ID, BaseType, UnderlyingType, UKind: Kind);
6659
6660 void *InsertPos = nullptr;
6661 if (UnaryTransformType *UT =
6662 UnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos))
6663 return QualType(UT, 0);
6664
6665 QualType CanonType;
6666 if (!BaseType->isDependentType()) {
6667 CanonType = UnderlyingType.getCanonicalType();
6668 } else {
6669 assert(UnderlyingType.isNull() || BaseType == UnderlyingType);
6670 UnderlyingType = QualType();
6671 if (QualType CanonBase = BaseType.getCanonicalType();
6672 BaseType != CanonBase) {
6673 CanonType = getUnaryTransformType(BaseType: CanonBase, UnderlyingType: QualType(), Kind);
6674 assert(CanonType.isCanonical());
6675
6676 // Find the insertion position again.
6677 [[maybe_unused]] UnaryTransformType *UT =
6678 UnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos);
6679 assert(!UT && "broken canonicalization");
6680 }
6681 }
6682
6683 auto *UT = new (*this, alignof(UnaryTransformType))
6684 UnaryTransformType(BaseType, UnderlyingType, Kind, CanonType);
6685 UnaryTransformTypes.InsertNode(N: UT, InsertPos);
6686 Types.push_back(Elt: UT);
6687 return QualType(UT, 0);
6688}
6689
6690QualType ASTContext::getAutoTypeInternal(
6691 QualType DeducedType, AutoTypeKeyword Keyword, bool IsDependent,
6692 bool IsPack, TemplateDecl *TypeConstraintConcept,
6693 ArrayRef<TemplateArgument> TypeConstraintArgs, bool IsCanon) const {
6694 if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto &&
6695 !TypeConstraintConcept && !IsDependent)
6696 return getAutoDeductType();
6697
6698 // Look in the folding set for an existing type.
6699 llvm::FoldingSetNodeID ID;
6700 bool IsDeducedDependent =
6701 isa_and_nonnull<TemplateTemplateParmDecl>(Val: TypeConstraintConcept) ||
6702 (!DeducedType.isNull() && DeducedType->isDependentType());
6703 AutoType::Profile(ID, Context: *this, Deduced: DeducedType, Keyword,
6704 IsDependent: IsDependent || IsDeducedDependent, CD: TypeConstraintConcept,
6705 Arguments: TypeConstraintArgs);
6706 if (auto const AT_iter = AutoTypes.find(Val: ID); AT_iter != AutoTypes.end())
6707 return QualType(AT_iter->getSecond(), 0);
6708
6709 QualType Canon;
6710 if (!IsCanon) {
6711 if (!DeducedType.isNull()) {
6712 Canon = DeducedType.getCanonicalType();
6713 } else if (TypeConstraintConcept) {
6714 bool AnyNonCanonArgs = false;
6715 auto *CanonicalConcept =
6716 cast<TemplateDecl>(Val: TypeConstraintConcept->getCanonicalDecl());
6717 auto CanonicalConceptArgs = ::getCanonicalTemplateArguments(
6718 C: *this, Args: TypeConstraintArgs, AnyNonCanonArgs);
6719 if (CanonicalConcept != TypeConstraintConcept || AnyNonCanonArgs) {
6720 Canon = getAutoTypeInternal(DeducedType: QualType(), Keyword, IsDependent, IsPack,
6721 TypeConstraintConcept: CanonicalConcept, TypeConstraintArgs: CanonicalConceptArgs,
6722 /*IsCanon=*/true);
6723 }
6724 }
6725 }
6726
6727 void *Mem = Allocate(Size: sizeof(AutoType) +
6728 sizeof(TemplateArgument) * TypeConstraintArgs.size(),
6729 Align: alignof(AutoType));
6730 auto *AT = new (Mem) AutoType(
6731 DeducedType, Keyword,
6732 (IsDependent ? TypeDependence::DependentInstantiation
6733 : TypeDependence::None) |
6734 (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None),
6735 Canon, TypeConstraintConcept, TypeConstraintArgs);
6736#ifndef NDEBUG
6737 llvm::FoldingSetNodeID InsertedID;
6738 AT->Profile(InsertedID, *this);
6739 assert(InsertedID == ID && "ID does not match");
6740#endif
6741 Types.push_back(Elt: AT);
6742 AutoTypes.try_emplace(Key: ID, Args&: AT);
6743 return QualType(AT, 0);
6744}
6745
6746/// getAutoType - Return the uniqued reference to the 'auto' type which has been
6747/// deduced to the given type, or to the canonical undeduced 'auto' type, or the
6748/// canonical deduced-but-dependent 'auto' type.
6749QualType
6750ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword,
6751 bool IsDependent, bool IsPack,
6752 TemplateDecl *TypeConstraintConcept,
6753 ArrayRef<TemplateArgument> TypeConstraintArgs) const {
6754 assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack");
6755 assert((!IsDependent || DeducedType.isNull()) &&
6756 "A dependent auto should be undeduced");
6757 return getAutoTypeInternal(DeducedType, Keyword, IsDependent, IsPack,
6758 TypeConstraintConcept, TypeConstraintArgs);
6759}
6760
6761QualType ASTContext::getUnconstrainedType(QualType T) const {
6762 QualType CanonT = T.getNonPackExpansionType().getCanonicalType();
6763
6764 // Remove a type-constraint from a top-level auto or decltype(auto).
6765 if (auto *AT = CanonT->getAs<AutoType>()) {
6766 if (!AT->isConstrained())
6767 return T;
6768 return getQualifiedType(T: getAutoType(DeducedType: QualType(), Keyword: AT->getKeyword(),
6769 IsDependent: AT->isDependentType(),
6770 IsPack: AT->containsUnexpandedParameterPack()),
6771 Qs: T.getQualifiers());
6772 }
6773
6774 // FIXME: We only support constrained auto at the top level in the type of a
6775 // non-type template parameter at the moment. Once we lift that restriction,
6776 // we'll need to recursively build types containing auto here.
6777 assert(!CanonT->getContainedAutoType() ||
6778 !CanonT->getContainedAutoType()->isConstrained());
6779 return T;
6780}
6781
6782QualType ASTContext::getDeducedTemplateSpecializationTypeInternal(
6783 ElaboratedTypeKeyword Keyword, TemplateName Template, QualType DeducedType,
6784 bool IsDependent, QualType Canon) const {
6785 // Look in the folding set for an existing type.
6786 void *InsertPos = nullptr;
6787 llvm::FoldingSetNodeID ID;
6788 DeducedTemplateSpecializationType::Profile(ID, Keyword, Template, Deduced: DeducedType,
6789 IsDependent);
6790 if (DeducedTemplateSpecializationType *DTST =
6791 DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos))
6792 return QualType(DTST, 0);
6793
6794 auto *DTST = new (*this, alignof(DeducedTemplateSpecializationType))
6795 DeducedTemplateSpecializationType(Keyword, Template, DeducedType,
6796 IsDependent, Canon);
6797
6798#ifndef NDEBUG
6799 llvm::FoldingSetNodeID TempID;
6800 DTST->Profile(TempID);
6801 assert(ID == TempID && "ID does not match");
6802#endif
6803 Types.push_back(Elt: DTST);
6804 DeducedTemplateSpecializationTypes.InsertNode(N: DTST, InsertPos);
6805 return QualType(DTST, 0);
6806}
6807
6808/// Return the uniqued reference to the deduced template specialization type
6809/// which has been deduced to the given type, or to the canonical undeduced
6810/// such type, or the canonical deduced-but-dependent such type.
6811QualType ASTContext::getDeducedTemplateSpecializationType(
6812 ElaboratedTypeKeyword Keyword, TemplateName Template, QualType DeducedType,
6813 bool IsDependent) const {
6814 // FIXME: This could save an extra hash table lookup if it handled all the
6815 // parameters already being canonical.
6816 // FIXME: Can this be formed from a DependentTemplateName, such that the
6817 // keyword should be part of the canonical type?
6818 QualType Canon =
6819 DeducedType.isNull()
6820 ? getDeducedTemplateSpecializationTypeInternal(
6821 Keyword: ElaboratedTypeKeyword::None, Template: getCanonicalTemplateName(Name: Template),
6822 DeducedType: QualType(), IsDependent, Canon: QualType())
6823 : DeducedType.getCanonicalType();
6824 return getDeducedTemplateSpecializationTypeInternal(
6825 Keyword, Template, DeducedType, IsDependent, Canon);
6826}
6827
6828/// getAtomicType - Return the uniqued reference to the atomic type for
6829/// the given value type.
6830QualType ASTContext::getAtomicType(QualType T) const {
6831 // Unique pointers, to guarantee there is only one pointer of a particular
6832 // structure.
6833 llvm::FoldingSetNodeID ID;
6834 AtomicType::Profile(ID, T);
6835
6836 void *InsertPos = nullptr;
6837 if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos))
6838 return QualType(AT, 0);
6839
6840 // If the atomic value type isn't canonical, this won't be a canonical type
6841 // either, so fill in the canonical type field.
6842 QualType Canonical;
6843 if (!T.isCanonical()) {
6844 Canonical = getAtomicType(T: getCanonicalType(T));
6845
6846 // Get the new insert position for the node we care about.
6847 AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos);
6848 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
6849 }
6850 auto *New = new (*this, alignof(AtomicType)) AtomicType(T, Canonical);
6851 Types.push_back(Elt: New);
6852 AtomicTypes.InsertNode(N: New, InsertPos);
6853 return QualType(New, 0);
6854}
6855
6856/// getAutoDeductType - Get type pattern for deducing against 'auto'.
6857QualType ASTContext::getAutoDeductType() const {
6858 if (AutoDeductTy.isNull())
6859 AutoDeductTy = QualType(new (*this, alignof(AutoType))
6860 AutoType(QualType(), AutoTypeKeyword::Auto,
6861 TypeDependence::None, QualType(),
6862 /*concept*/ nullptr, /*args*/ {}),
6863 0);
6864 return AutoDeductTy;
6865}
6866
6867/// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'.
6868QualType ASTContext::getAutoRRefDeductType() const {
6869 if (AutoRRefDeductTy.isNull())
6870 AutoRRefDeductTy = getRValueReferenceType(T: getAutoDeductType());
6871 assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern");
6872 return AutoRRefDeductTy;
6873}
6874
6875/// getSizeType - Return the unique type for "size_t" (C99 7.17), the result
6876/// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and
6877/// needs to agree with the definition in <stddef.h>.
6878QualType ASTContext::getSizeType() const {
6879 return getPredefinedSugarType(KD: PredefinedSugarType::Kind::SizeT);
6880}
6881
6882CanQualType ASTContext::getCanonicalSizeType() const {
6883 return getFromTargetType(Type: Target->getSizeType());
6884}
6885
6886/// Return the unique signed counterpart of the integer type
6887/// corresponding to size_t.
6888QualType ASTContext::getSignedSizeType() const {
6889 return getPredefinedSugarType(KD: PredefinedSugarType::Kind::SignedSizeT);
6890}
6891
6892/// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17)
6893/// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
6894QualType ASTContext::getPointerDiffType() const {
6895 return getPredefinedSugarType(KD: PredefinedSugarType::Kind::PtrdiffT);
6896}
6897
6898/// Return the unique unsigned counterpart of "ptrdiff_t"
6899/// integer type. The standard (C11 7.21.6.1p7) refers to this type
6900/// in the definition of %tu format specifier.
6901QualType ASTContext::getUnsignedPointerDiffType() const {
6902 return getFromTargetType(Type: Target->getUnsignedPtrDiffType(AddrSpace: LangAS::Default));
6903}
6904
6905/// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5).
6906CanQualType ASTContext::getIntMaxType() const {
6907 return getFromTargetType(Type: Target->getIntMaxType());
6908}
6909
6910/// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5).
6911CanQualType ASTContext::getUIntMaxType() const {
6912 return getFromTargetType(Type: Target->getUIntMaxType());
6913}
6914
6915/// getSignedWCharType - Return the type of "signed wchar_t".
6916/// Used when in C++, as a GCC extension.
6917QualType ASTContext::getSignedWCharType() const {
6918 // FIXME: derive from "Target" ?
6919 return WCharTy;
6920}
6921
6922/// getUnsignedWCharType - Return the type of "unsigned wchar_t".
6923/// Used when in C++, as a GCC extension.
6924QualType ASTContext::getUnsignedWCharType() const {
6925 // FIXME: derive from "Target" ?
6926 return UnsignedIntTy;
6927}
6928
6929QualType ASTContext::getIntPtrType() const {
6930 return getFromTargetType(Type: Target->getIntPtrType());
6931}
6932
6933QualType ASTContext::getUIntPtrType() const {
6934 return getCorrespondingUnsignedType(T: getIntPtrType());
6935}
6936
6937/// Return the unique type for "pid_t" defined in
6938/// <sys/types.h>. We need this to compute the correct type for vfork().
6939QualType ASTContext::getProcessIDType() const {
6940 return getFromTargetType(Type: Target->getProcessIDType());
6941}
6942
6943//===----------------------------------------------------------------------===//
6944// Type Operators
6945//===----------------------------------------------------------------------===//
6946
6947CanQualType ASTContext::getCanonicalParamType(QualType T) const {
6948 // Push qualifiers into arrays, and then discard any remaining
6949 // qualifiers.
6950 T = getCanonicalType(T);
6951 T = getVariableArrayDecayedType(type: T);
6952 const Type *Ty = T.getTypePtr();
6953 QualType Result;
6954 if (getLangOpts().HLSL && isa<ConstantArrayType>(Val: Ty)) {
6955 Result = getArrayParameterType(Ty: QualType(Ty, 0));
6956 } else if (isa<ArrayType>(Val: Ty)) {
6957 Result = getArrayDecayedType(T: QualType(Ty,0));
6958 } else if (isa<FunctionType>(Val: Ty)) {
6959 Result = getPointerType(T: QualType(Ty, 0));
6960 } else {
6961 Result = QualType(Ty, 0);
6962 }
6963
6964 return CanQualType::CreateUnsafe(Other: Result);
6965}
6966
6967QualType ASTContext::getUnqualifiedArrayType(QualType type,
6968 Qualifiers &quals) const {
6969 SplitQualType splitType = type.getSplitUnqualifiedType();
6970
6971 // FIXME: getSplitUnqualifiedType() actually walks all the way to
6972 // the unqualified desugared type and then drops it on the floor.
6973 // We then have to strip that sugar back off with
6974 // getUnqualifiedDesugaredType(), which is silly.
6975 const auto *AT =
6976 dyn_cast<ArrayType>(Val: splitType.Ty->getUnqualifiedDesugaredType());
6977
6978 // If we don't have an array, just use the results in splitType.
6979 if (!AT) {
6980 quals = splitType.Quals;
6981 return QualType(splitType.Ty, 0);
6982 }
6983
6984 // Otherwise, recurse on the array's element type.
6985 QualType elementType = AT->getElementType();
6986 QualType unqualElementType = getUnqualifiedArrayType(type: elementType, quals);
6987
6988 // If that didn't change the element type, AT has no qualifiers, so we
6989 // can just use the results in splitType.
6990 if (elementType == unqualElementType) {
6991 assert(quals.empty()); // from the recursive call
6992 quals = splitType.Quals;
6993 return QualType(splitType.Ty, 0);
6994 }
6995
6996 // Otherwise, add in the qualifiers from the outermost type, then
6997 // build the type back up.
6998 quals.addConsistentQualifiers(qs: splitType.Quals);
6999
7000 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: AT)) {
7001 return getConstantArrayType(EltTy: unqualElementType, ArySizeIn: CAT->getSize(),
7002 SizeExpr: CAT->getSizeExpr(), ASM: CAT->getSizeModifier(), IndexTypeQuals: 0);
7003 }
7004
7005 if (const auto *IAT = dyn_cast<IncompleteArrayType>(Val: AT)) {
7006 return getIncompleteArrayType(elementType: unqualElementType, ASM: IAT->getSizeModifier(), elementTypeQuals: 0);
7007 }
7008
7009 if (const auto *VAT = dyn_cast<VariableArrayType>(Val: AT)) {
7010 return getVariableArrayType(EltTy: unqualElementType, NumElts: VAT->getSizeExpr(),
7011 ASM: VAT->getSizeModifier(),
7012 IndexTypeQuals: VAT->getIndexTypeCVRQualifiers());
7013 }
7014
7015 const auto *DSAT = cast<DependentSizedArrayType>(Val: AT);
7016 return getDependentSizedArrayType(elementType: unqualElementType, numElements: DSAT->getSizeExpr(),
7017 ASM: DSAT->getSizeModifier(), elementTypeQuals: 0);
7018}
7019
7020/// Attempt to unwrap two types that may both be array types with the same bound
7021/// (or both be array types of unknown bound) for the purpose of comparing the
7022/// cv-decomposition of two types per C++ [conv.qual].
7023///
7024/// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in
7025/// C++20 [conv.qual], if permitted by the current language mode.
7026void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2,
7027 bool AllowPiMismatch) const {
7028 while (true) {
7029 auto *AT1 = getAsArrayType(T: T1);
7030 if (!AT1)
7031 return;
7032
7033 auto *AT2 = getAsArrayType(T: T2);
7034 if (!AT2)
7035 return;
7036
7037 // If we don't have two array types with the same constant bound nor two
7038 // incomplete array types, we've unwrapped everything we can.
7039 // C++20 also permits one type to be a constant array type and the other
7040 // to be an incomplete array type.
7041 // FIXME: Consider also unwrapping array of unknown bound and VLA.
7042 if (auto *CAT1 = dyn_cast<ConstantArrayType>(Val: AT1)) {
7043 auto *CAT2 = dyn_cast<ConstantArrayType>(Val: AT2);
7044 if (!((CAT2 && CAT1->getSize() == CAT2->getSize()) ||
7045 (AllowPiMismatch && getLangOpts().CPlusPlus20 &&
7046 isa<IncompleteArrayType>(Val: AT2))))
7047 return;
7048 } else if (isa<IncompleteArrayType>(Val: AT1)) {
7049 if (!(isa<IncompleteArrayType>(Val: AT2) ||
7050 (AllowPiMismatch && getLangOpts().CPlusPlus20 &&
7051 isa<ConstantArrayType>(Val: AT2))))
7052 return;
7053 } else {
7054 return;
7055 }
7056
7057 T1 = AT1->getElementType();
7058 T2 = AT2->getElementType();
7059 }
7060}
7061
7062/// Attempt to unwrap two types that may be similar (C++ [conv.qual]).
7063///
7064/// If T1 and T2 are both pointer types of the same kind, or both array types
7065/// with the same bound, unwraps layers from T1 and T2 until a pointer type is
7066/// unwrapped. Top-level qualifiers on T1 and T2 are ignored.
7067///
7068/// This function will typically be called in a loop that successively
7069/// "unwraps" pointer and pointer-to-member types to compare them at each
7070/// level.
7071///
7072/// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in
7073/// C++20 [conv.qual], if permitted by the current language mode.
7074///
7075/// \return \c true if a pointer type was unwrapped, \c false if we reached a
7076/// pair of types that can't be unwrapped further.
7077bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2,
7078 bool AllowPiMismatch) const {
7079 UnwrapSimilarArrayTypes(T1, T2, AllowPiMismatch);
7080
7081 const auto *T1PtrType = T1->getAs<PointerType>();
7082 const auto *T2PtrType = T2->getAs<PointerType>();
7083 if (T1PtrType && T2PtrType) {
7084 T1 = T1PtrType->getPointeeType();
7085 T2 = T2PtrType->getPointeeType();
7086 return true;
7087 }
7088
7089 if (const auto *T1MPType = T1->getAs<MemberPointerType>(),
7090 *T2MPType = T2->getAs<MemberPointerType>();
7091 T1MPType && T2MPType) {
7092 if (auto *RD1 = T1MPType->getMostRecentCXXRecordDecl(),
7093 *RD2 = T2MPType->getMostRecentCXXRecordDecl();
7094 RD1 != RD2 && RD1->getCanonicalDecl() != RD2->getCanonicalDecl())
7095 return false;
7096 if (T1MPType->getQualifier().getCanonical() !=
7097 T2MPType->getQualifier().getCanonical())
7098 return false;
7099 T1 = T1MPType->getPointeeType();
7100 T2 = T2MPType->getPointeeType();
7101 return true;
7102 }
7103
7104 if (getLangOpts().ObjC) {
7105 const auto *T1OPType = T1->getAs<ObjCObjectPointerType>();
7106 const auto *T2OPType = T2->getAs<ObjCObjectPointerType>();
7107 if (T1OPType && T2OPType) {
7108 T1 = T1OPType->getPointeeType();
7109 T2 = T2OPType->getPointeeType();
7110 return true;
7111 }
7112 }
7113
7114 // FIXME: Block pointers, too?
7115
7116 return false;
7117}
7118
7119bool ASTContext::hasSimilarType(QualType T1, QualType T2) const {
7120 while (true) {
7121 Qualifiers Quals;
7122 T1 = getUnqualifiedArrayType(type: T1, quals&: Quals);
7123 T2 = getUnqualifiedArrayType(type: T2, quals&: Quals);
7124 if (hasSameType(T1, T2))
7125 return true;
7126 if (!UnwrapSimilarTypes(T1, T2))
7127 return false;
7128 }
7129}
7130
7131bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) {
7132 while (true) {
7133 Qualifiers Quals1, Quals2;
7134 T1 = getUnqualifiedArrayType(type: T1, quals&: Quals1);
7135 T2 = getUnqualifiedArrayType(type: T2, quals&: Quals2);
7136
7137 Quals1.removeCVRQualifiers();
7138 Quals2.removeCVRQualifiers();
7139 if (Quals1 != Quals2)
7140 return false;
7141
7142 if (hasSameType(T1, T2))
7143 return true;
7144
7145 if (!UnwrapSimilarTypes(T1, T2, /*AllowPiMismatch*/ false))
7146 return false;
7147 }
7148}
7149
7150DeclarationNameInfo
7151ASTContext::getNameForTemplate(TemplateName Name,
7152 SourceLocation NameLoc) const {
7153 switch (Name.getKind()) {
7154 case TemplateName::QualifiedTemplate:
7155 case TemplateName::Template:
7156 // DNInfo work in progress: CHECKME: what about DNLoc?
7157 return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(),
7158 NameLoc);
7159
7160 case TemplateName::OverloadedTemplate: {
7161 OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate();
7162 // DNInfo work in progress: CHECKME: what about DNLoc?
7163 return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc);
7164 }
7165
7166 case TemplateName::AssumedTemplate: {
7167 AssumedTemplateStorage *Storage = Name.getAsAssumedTemplateName();
7168 return DeclarationNameInfo(Storage->getDeclName(), NameLoc);
7169 }
7170
7171 case TemplateName::DependentTemplate: {
7172 DependentTemplateName *DTN = Name.getAsDependentTemplateName();
7173 IdentifierOrOverloadedOperator TN = DTN->getName();
7174 DeclarationName DName;
7175 if (const IdentifierInfo *II = TN.getIdentifier()) {
7176 DName = DeclarationNames.getIdentifier(ID: II);
7177 return DeclarationNameInfo(DName, NameLoc);
7178 } else {
7179 DName = DeclarationNames.getCXXOperatorName(Op: TN.getOperator());
7180 // DNInfo work in progress: FIXME: source locations?
7181 DeclarationNameLoc DNLoc =
7182 DeclarationNameLoc::makeCXXOperatorNameLoc(Range: SourceRange());
7183 return DeclarationNameInfo(DName, NameLoc, DNLoc);
7184 }
7185 }
7186
7187 case TemplateName::SubstTemplateTemplateParm: {
7188 SubstTemplateTemplateParmStorage *subst
7189 = Name.getAsSubstTemplateTemplateParm();
7190 return DeclarationNameInfo(subst->getParameter()->getDeclName(),
7191 NameLoc);
7192 }
7193
7194 case TemplateName::SubstTemplateTemplateParmPack: {
7195 SubstTemplateTemplateParmPackStorage *subst
7196 = Name.getAsSubstTemplateTemplateParmPack();
7197 return DeclarationNameInfo(subst->getParameterPack()->getDeclName(),
7198 NameLoc);
7199 }
7200 case TemplateName::UsingTemplate:
7201 return DeclarationNameInfo(Name.getAsUsingShadowDecl()->getDeclName(),
7202 NameLoc);
7203 case TemplateName::DeducedTemplate: {
7204 DeducedTemplateStorage *DTS = Name.getAsDeducedTemplateName();
7205 return getNameForTemplate(Name: DTS->getUnderlying(), NameLoc);
7206 }
7207 }
7208
7209 llvm_unreachable("bad template name kind!");
7210}
7211
7212static const TemplateArgument *
7213getDefaultTemplateArgumentOrNone(const NamedDecl *P) {
7214 auto handleParam = [](auto *TP) -> const TemplateArgument * {
7215 if (!TP->hasDefaultArgument())
7216 return nullptr;
7217 return &TP->getDefaultArgument().getArgument();
7218 };
7219 switch (P->getKind()) {
7220 case NamedDecl::TemplateTypeParm:
7221 return handleParam(cast<TemplateTypeParmDecl>(Val: P));
7222 case NamedDecl::NonTypeTemplateParm:
7223 return handleParam(cast<NonTypeTemplateParmDecl>(Val: P));
7224 case NamedDecl::TemplateTemplateParm:
7225 return handleParam(cast<TemplateTemplateParmDecl>(Val: P));
7226 default:
7227 llvm_unreachable("Unexpected template parameter kind");
7228 }
7229}
7230
7231TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name,
7232 bool IgnoreDeduced) const {
7233 while (std::optional<TemplateName> UnderlyingOrNone =
7234 Name.desugar(IgnoreDeduced))
7235 Name = *UnderlyingOrNone;
7236
7237 switch (Name.getKind()) {
7238 case TemplateName::Template: {
7239 TemplateDecl *Template = Name.getAsTemplateDecl();
7240 if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Val: Template))
7241 Template = getCanonicalTemplateTemplateParmDecl(TTP);
7242
7243 // The canonical template name is the canonical template declaration.
7244 return TemplateName(cast<TemplateDecl>(Val: Template->getCanonicalDecl()));
7245 }
7246
7247 case TemplateName::OverloadedTemplate:
7248 case TemplateName::AssumedTemplate:
7249 llvm_unreachable("cannot canonicalize unresolved template");
7250
7251 case TemplateName::DependentTemplate: {
7252 DependentTemplateName *DTN = Name.getAsDependentTemplateName();
7253 assert(DTN && "Non-dependent template names must refer to template decls.");
7254 NestedNameSpecifier Qualifier = DTN->getQualifier();
7255 NestedNameSpecifier CanonQualifier = Qualifier.getCanonical();
7256 if (Qualifier != CanonQualifier || !DTN->hasTemplateKeyword())
7257 return getDependentTemplateName(Name: {CanonQualifier, DTN->getName(),
7258 /*HasTemplateKeyword=*/true});
7259 return Name;
7260 }
7261
7262 case TemplateName::SubstTemplateTemplateParmPack: {
7263 SubstTemplateTemplateParmPackStorage *subst =
7264 Name.getAsSubstTemplateTemplateParmPack();
7265 TemplateArgument canonArgPack =
7266 getCanonicalTemplateArgument(Arg: subst->getArgumentPack());
7267 return getSubstTemplateTemplateParmPack(
7268 ArgPack: canonArgPack, AssociatedDecl: subst->getAssociatedDecl()->getCanonicalDecl(),
7269 Index: subst->getIndex(), Final: subst->getFinal());
7270 }
7271 case TemplateName::DeducedTemplate: {
7272 assert(IgnoreDeduced == false);
7273 DeducedTemplateStorage *DTS = Name.getAsDeducedTemplateName();
7274 DefaultArguments DefArgs = DTS->getDefaultArguments();
7275 TemplateName Underlying = DTS->getUnderlying();
7276
7277 TemplateName CanonUnderlying =
7278 getCanonicalTemplateName(Name: Underlying, /*IgnoreDeduced=*/true);
7279 bool NonCanonical = CanonUnderlying != Underlying;
7280 auto CanonArgs =
7281 getCanonicalTemplateArguments(C: *this, Args: DefArgs.Args, AnyNonCanonArgs&: NonCanonical);
7282
7283 ArrayRef<NamedDecl *> Params =
7284 CanonUnderlying.getAsTemplateDecl()->getTemplateParameters()->asArray();
7285 assert(CanonArgs.size() <= Params.size());
7286 // A deduced template name which deduces the same default arguments already
7287 // declared in the underlying template is the same template as the
7288 // underlying template. We need need to note any arguments which differ from
7289 // the corresponding declaration. If any argument differs, we must build a
7290 // deduced template name.
7291 for (int I = CanonArgs.size() - 1; I >= 0; --I) {
7292 const TemplateArgument *A = getDefaultTemplateArgumentOrNone(P: Params[I]);
7293 if (!A)
7294 break;
7295 auto CanonParamDefArg = getCanonicalTemplateArgument(Arg: *A);
7296 TemplateArgument &CanonDefArg = CanonArgs[I];
7297 if (CanonDefArg.structurallyEquals(Other: CanonParamDefArg))
7298 continue;
7299 // Keep popping from the back any deault arguments which are the same.
7300 if (I == int(CanonArgs.size() - 1))
7301 CanonArgs.pop_back();
7302 NonCanonical = true;
7303 }
7304 return NonCanonical ? getDeducedTemplateName(
7305 Underlying: CanonUnderlying,
7306 /*DefaultArgs=*/{.StartPos: DefArgs.StartPos, .Args: CanonArgs})
7307 : Name;
7308 }
7309 case TemplateName::UsingTemplate:
7310 case TemplateName::QualifiedTemplate:
7311 case TemplateName::SubstTemplateTemplateParm:
7312 llvm_unreachable("always sugar node");
7313 }
7314
7315 llvm_unreachable("bad template name!");
7316}
7317
7318bool ASTContext::hasSameTemplateName(const TemplateName &X,
7319 const TemplateName &Y,
7320 bool IgnoreDeduced) const {
7321 return getCanonicalTemplateName(Name: X, IgnoreDeduced) ==
7322 getCanonicalTemplateName(Name: Y, IgnoreDeduced);
7323}
7324
7325bool ASTContext::isSameAssociatedConstraint(
7326 const AssociatedConstraint &ACX, const AssociatedConstraint &ACY) const {
7327 if (ACX.ArgPackSubstIndex != ACY.ArgPackSubstIndex)
7328 return false;
7329 if (!isSameConstraintExpr(XCE: ACX.ConstraintExpr, YCE: ACY.ConstraintExpr))
7330 return false;
7331 return true;
7332}
7333
7334bool ASTContext::isSameConstraintExpr(const Expr *XCE, const Expr *YCE) const {
7335 if (!XCE != !YCE)
7336 return false;
7337
7338 if (!XCE)
7339 return true;
7340
7341 llvm::FoldingSetNodeID XCEID, YCEID;
7342 XCE->Profile(ID&: XCEID, Context: *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true);
7343 YCE->Profile(ID&: YCEID, Context: *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true);
7344 return XCEID == YCEID;
7345}
7346
7347bool ASTContext::isSameTypeConstraint(const TypeConstraint *XTC,
7348 const TypeConstraint *YTC) const {
7349 if (!XTC != !YTC)
7350 return false;
7351
7352 if (!XTC)
7353 return true;
7354
7355 auto *NCX = XTC->getNamedConcept();
7356 auto *NCY = YTC->getNamedConcept();
7357 if (!NCX || !NCY || !isSameEntity(X: NCX, Y: NCY))
7358 return false;
7359 if (XTC->getConceptReference()->hasExplicitTemplateArgs() !=
7360 YTC->getConceptReference()->hasExplicitTemplateArgs())
7361 return false;
7362 if (XTC->getConceptReference()->hasExplicitTemplateArgs())
7363 if (XTC->getConceptReference()
7364 ->getTemplateArgsAsWritten()
7365 ->NumTemplateArgs !=
7366 YTC->getConceptReference()->getTemplateArgsAsWritten()->NumTemplateArgs)
7367 return false;
7368
7369 // Compare slowly by profiling.
7370 //
7371 // We couldn't compare the profiling result for the template
7372 // args here. Consider the following example in different modules:
7373 //
7374 // template <__integer_like _Tp, C<_Tp> Sentinel>
7375 // constexpr _Tp operator()(_Tp &&__t, Sentinel &&last) const {
7376 // return __t;
7377 // }
7378 //
7379 // When we compare the profiling result for `C<_Tp>` in different
7380 // modules, it will compare the type of `_Tp` in different modules.
7381 // However, the type of `_Tp` in different modules refer to different
7382 // types here naturally. So we couldn't compare the profiling result
7383 // for the template args directly.
7384 return isSameConstraintExpr(XCE: XTC->getImmediatelyDeclaredConstraint(),
7385 YCE: YTC->getImmediatelyDeclaredConstraint());
7386}
7387
7388bool ASTContext::isSameTemplateParameter(const NamedDecl *X,
7389 const NamedDecl *Y) const {
7390 if (X->getKind() != Y->getKind())
7391 return false;
7392
7393 if (auto *TX = dyn_cast<TemplateTypeParmDecl>(Val: X)) {
7394 auto *TY = cast<TemplateTypeParmDecl>(Val: Y);
7395 if (TX->isParameterPack() != TY->isParameterPack())
7396 return false;
7397 if (TX->hasTypeConstraint() != TY->hasTypeConstraint())
7398 return false;
7399 return isSameTypeConstraint(XTC: TX->getTypeConstraint(),
7400 YTC: TY->getTypeConstraint());
7401 }
7402
7403 if (auto *TX = dyn_cast<NonTypeTemplateParmDecl>(Val: X)) {
7404 auto *TY = cast<NonTypeTemplateParmDecl>(Val: Y);
7405 return TX->isParameterPack() == TY->isParameterPack() &&
7406 TX->getASTContext().hasSameType(T1: TX->getType(), T2: TY->getType()) &&
7407 isSameConstraintExpr(XCE: TX->getPlaceholderTypeConstraint(),
7408 YCE: TY->getPlaceholderTypeConstraint());
7409 }
7410
7411 auto *TX = cast<TemplateTemplateParmDecl>(Val: X);
7412 auto *TY = cast<TemplateTemplateParmDecl>(Val: Y);
7413 return TX->isParameterPack() == TY->isParameterPack() &&
7414 isSameTemplateParameterList(X: TX->getTemplateParameters(),
7415 Y: TY->getTemplateParameters());
7416}
7417
7418bool ASTContext::isSameTemplateParameterList(
7419 const TemplateParameterList *X, const TemplateParameterList *Y) const {
7420 if (X->size() != Y->size())
7421 return false;
7422
7423 for (unsigned I = 0, N = X->size(); I != N; ++I)
7424 if (!isSameTemplateParameter(X: X->getParam(Idx: I), Y: Y->getParam(Idx: I)))
7425 return false;
7426
7427 return isSameConstraintExpr(XCE: X->getRequiresClause(), YCE: Y->getRequiresClause());
7428}
7429
7430bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl *X,
7431 const NamedDecl *Y) const {
7432 // If the type parameter isn't the same already, we don't need to check the
7433 // default argument further.
7434 if (!isSameTemplateParameter(X, Y))
7435 return false;
7436
7437 if (auto *TTPX = dyn_cast<TemplateTypeParmDecl>(Val: X)) {
7438 auto *TTPY = cast<TemplateTypeParmDecl>(Val: Y);
7439 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument())
7440 return false;
7441
7442 return hasSameType(T1: TTPX->getDefaultArgument().getArgument().getAsType(),
7443 T2: TTPY->getDefaultArgument().getArgument().getAsType());
7444 }
7445
7446 if (auto *NTTPX = dyn_cast<NonTypeTemplateParmDecl>(Val: X)) {
7447 auto *NTTPY = cast<NonTypeTemplateParmDecl>(Val: Y);
7448 if (!NTTPX->hasDefaultArgument() || !NTTPY->hasDefaultArgument())
7449 return false;
7450
7451 Expr *DefaultArgumentX =
7452 NTTPX->getDefaultArgument().getArgument().getAsExpr()->IgnoreImpCasts();
7453 Expr *DefaultArgumentY =
7454 NTTPY->getDefaultArgument().getArgument().getAsExpr()->IgnoreImpCasts();
7455 llvm::FoldingSetNodeID XID, YID;
7456 DefaultArgumentX->Profile(ID&: XID, Context: *this, /*Canonical=*/true);
7457 DefaultArgumentY->Profile(ID&: YID, Context: *this, /*Canonical=*/true);
7458 return XID == YID;
7459 }
7460
7461 auto *TTPX = cast<TemplateTemplateParmDecl>(Val: X);
7462 auto *TTPY = cast<TemplateTemplateParmDecl>(Val: Y);
7463
7464 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument())
7465 return false;
7466
7467 const TemplateArgument &TAX = TTPX->getDefaultArgument().getArgument();
7468 const TemplateArgument &TAY = TTPY->getDefaultArgument().getArgument();
7469 return hasSameTemplateName(X: TAX.getAsTemplate(), Y: TAY.getAsTemplate());
7470}
7471
7472static bool isSameQualifier(const NestedNameSpecifier X,
7473 const NestedNameSpecifier Y) {
7474 if (X == Y)
7475 return true;
7476 if (!X || !Y)
7477 return false;
7478
7479 auto Kind = X.getKind();
7480 if (Kind != Y.getKind())
7481 return false;
7482
7483 // FIXME: For namespaces and types, we're permitted to check that the entity
7484 // is named via the same tokens. We should probably do so.
7485 switch (Kind) {
7486 case NestedNameSpecifier::Kind::Namespace: {
7487 auto [NamespaceX, PrefixX] = X.getAsNamespaceAndPrefix();
7488 auto [NamespaceY, PrefixY] = Y.getAsNamespaceAndPrefix();
7489 if (!declaresSameEntity(D1: NamespaceX->getNamespace(),
7490 D2: NamespaceY->getNamespace()))
7491 return false;
7492 return isSameQualifier(X: PrefixX, Y: PrefixY);
7493 }
7494 case NestedNameSpecifier::Kind::Type: {
7495 const auto *TX = X.getAsType(), *TY = Y.getAsType();
7496 if (TX->getCanonicalTypeInternal() != TY->getCanonicalTypeInternal())
7497 return false;
7498 return isSameQualifier(X: TX->getPrefix(), Y: TY->getPrefix());
7499 }
7500 case NestedNameSpecifier::Kind::Null:
7501 case NestedNameSpecifier::Kind::Global:
7502 case NestedNameSpecifier::Kind::MicrosoftSuper:
7503 return true;
7504 }
7505 llvm_unreachable("unhandled qualifier kind");
7506}
7507
7508static bool hasSameCudaAttrs(const FunctionDecl *A, const FunctionDecl *B) {
7509 if (!A->getASTContext().getLangOpts().CUDA)
7510 return true; // Target attributes are overloadable in CUDA compilation only.
7511 if (A->hasAttr<CUDADeviceAttr>() != B->hasAttr<CUDADeviceAttr>())
7512 return false;
7513 if (A->hasAttr<CUDADeviceAttr>() && B->hasAttr<CUDADeviceAttr>())
7514 return A->hasAttr<CUDAHostAttr>() == B->hasAttr<CUDAHostAttr>();
7515 return true; // unattributed and __host__ functions are the same.
7516}
7517
7518/// Determine whether the attributes we can overload on are identical for A and
7519/// B. Will ignore any overloadable attrs represented in the type of A and B.
7520static bool hasSameOverloadableAttrs(const FunctionDecl *A,
7521 const FunctionDecl *B) {
7522 // Note that pass_object_size attributes are represented in the function's
7523 // ExtParameterInfo, so we don't need to check them here.
7524
7525 llvm::FoldingSetNodeID Cand1ID, Cand2ID;
7526 auto AEnableIfAttrs = A->specific_attrs<EnableIfAttr>();
7527 auto BEnableIfAttrs = B->specific_attrs<EnableIfAttr>();
7528
7529 for (auto Pair : zip_longest(t&: AEnableIfAttrs, u&: BEnableIfAttrs)) {
7530 std::optional<EnableIfAttr *> Cand1A = std::get<0>(t&: Pair);
7531 std::optional<EnableIfAttr *> Cand2A = std::get<1>(t&: Pair);
7532
7533 // Return false if the number of enable_if attributes is different.
7534 if (!Cand1A || !Cand2A)
7535 return false;
7536
7537 Cand1ID.clear();
7538 Cand2ID.clear();
7539
7540 (*Cand1A)->getCond()->Profile(ID&: Cand1ID, Context: A->getASTContext(), Canonical: true);
7541 (*Cand2A)->getCond()->Profile(ID&: Cand2ID, Context: B->getASTContext(), Canonical: true);
7542
7543 // Return false if any of the enable_if expressions of A and B are
7544 // different.
7545 if (Cand1ID != Cand2ID)
7546 return false;
7547 }
7548 return hasSameCudaAttrs(A, B);
7549}
7550
7551bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) const {
7552 // Caution: this function is called by the AST reader during deserialization,
7553 // so it cannot rely on AST invariants being met. Non-trivial accessors
7554 // should be avoided, along with any traversal of redeclaration chains.
7555
7556 if (X == Y)
7557 return true;
7558
7559 if (X->getDeclName() != Y->getDeclName())
7560 return false;
7561
7562 // Must be in the same context.
7563 //
7564 // Note that we can't use DeclContext::Equals here, because the DeclContexts
7565 // could be two different declarations of the same function. (We will fix the
7566 // semantic DC to refer to the primary definition after merging.)
7567 if (!declaresSameEntity(D1: cast<Decl>(Val: X->getDeclContext()->getRedeclContext()),
7568 D2: cast<Decl>(Val: Y->getDeclContext()->getRedeclContext())))
7569 return false;
7570
7571 // If either X or Y are local to the owning module, they are only possible to
7572 // be the same entity if they are in the same module.
7573 if (X->isModuleLocal() || Y->isModuleLocal())
7574 if (!isInSameModule(M1: X->getOwningModule(), M2: Y->getOwningModule()))
7575 return false;
7576
7577 // Two typedefs refer to the same entity if they have the same underlying
7578 // type.
7579 if (const auto *TypedefX = dyn_cast<TypedefNameDecl>(Val: X))
7580 if (const auto *TypedefY = dyn_cast<TypedefNameDecl>(Val: Y))
7581 return hasSameType(T1: TypedefX->getUnderlyingType(),
7582 T2: TypedefY->getUnderlyingType());
7583
7584 // Must have the same kind.
7585 if (X->getKind() != Y->getKind())
7586 return false;
7587
7588 // Objective-C classes and protocols with the same name always match.
7589 if (isa<ObjCInterfaceDecl>(Val: X) || isa<ObjCProtocolDecl>(Val: X))
7590 return true;
7591
7592 if (isa<ClassTemplateSpecializationDecl>(Val: X)) {
7593 // No need to handle these here: we merge them when adding them to the
7594 // template.
7595 return false;
7596 }
7597
7598 // Compatible tags match.
7599 if (const auto *TagX = dyn_cast<TagDecl>(Val: X)) {
7600 const auto *TagY = cast<TagDecl>(Val: Y);
7601 return (TagX->getTagKind() == TagY->getTagKind()) ||
7602 ((TagX->getTagKind() == TagTypeKind::Struct ||
7603 TagX->getTagKind() == TagTypeKind::Class ||
7604 TagX->getTagKind() == TagTypeKind::Interface) &&
7605 (TagY->getTagKind() == TagTypeKind::Struct ||
7606 TagY->getTagKind() == TagTypeKind::Class ||
7607 TagY->getTagKind() == TagTypeKind::Interface));
7608 }
7609
7610 // Functions with the same type and linkage match.
7611 // FIXME: This needs to cope with merging of prototyped/non-prototyped
7612 // functions, etc.
7613 if (const auto *FuncX = dyn_cast<FunctionDecl>(Val: X)) {
7614 const auto *FuncY = cast<FunctionDecl>(Val: Y);
7615 if (const auto *CtorX = dyn_cast<CXXConstructorDecl>(Val: X)) {
7616 const auto *CtorY = cast<CXXConstructorDecl>(Val: Y);
7617 if (CtorX->getInheritedConstructor() &&
7618 !isSameEntity(X: CtorX->getInheritedConstructor().getConstructor(),
7619 Y: CtorY->getInheritedConstructor().getConstructor()))
7620 return false;
7621 }
7622
7623 if (FuncX->isMultiVersion() != FuncY->isMultiVersion())
7624 return false;
7625
7626 // Multiversioned functions with different feature strings are represented
7627 // as separate declarations.
7628 if (FuncX->isMultiVersion()) {
7629 const auto *TAX = FuncX->getAttr<TargetAttr>();
7630 const auto *TAY = FuncY->getAttr<TargetAttr>();
7631 assert(TAX && TAY && "Multiversion Function without target attribute");
7632
7633 if (TAX->getFeaturesStr() != TAY->getFeaturesStr())
7634 return false;
7635 }
7636
7637 // Per C++20 [temp.over.link]/4, friends in different classes are sometimes
7638 // not the same entity if they are constrained.
7639 if ((FuncX->isMemberLikeConstrainedFriend() ||
7640 FuncY->isMemberLikeConstrainedFriend()) &&
7641 !FuncX->getLexicalDeclContext()->Equals(
7642 DC: FuncY->getLexicalDeclContext())) {
7643 return false;
7644 }
7645
7646 if (!isSameAssociatedConstraint(ACX: FuncX->getTrailingRequiresClause(),
7647 ACY: FuncY->getTrailingRequiresClause()))
7648 return false;
7649
7650 auto GetTypeAsWritten = [](const FunctionDecl *FD) {
7651 // Map to the first declaration that we've already merged into this one.
7652 // The TSI of redeclarations might not match (due to calling conventions
7653 // being inherited onto the type but not the TSI), but the TSI type of
7654 // the first declaration of the function should match across modules.
7655 FD = FD->getCanonicalDecl();
7656 return FD->getTypeSourceInfo() ? FD->getTypeSourceInfo()->getType()
7657 : FD->getType();
7658 };
7659 QualType XT = GetTypeAsWritten(FuncX), YT = GetTypeAsWritten(FuncY);
7660 if (!hasSameType(T1: XT, T2: YT)) {
7661 // We can get functions with different types on the redecl chain in C++17
7662 // if they have differing exception specifications and at least one of
7663 // the excpetion specs is unresolved.
7664 auto *XFPT = XT->getAs<FunctionProtoType>();
7665 auto *YFPT = YT->getAs<FunctionProtoType>();
7666 if (getLangOpts().CPlusPlus17 && XFPT && YFPT &&
7667 (isUnresolvedExceptionSpec(ESpecType: XFPT->getExceptionSpecType()) ||
7668 isUnresolvedExceptionSpec(ESpecType: YFPT->getExceptionSpecType())) &&
7669 hasSameFunctionTypeIgnoringExceptionSpec(T: XT, U: YT))
7670 return true;
7671 return false;
7672 }
7673
7674 return FuncX->getLinkageInternal() == FuncY->getLinkageInternal() &&
7675 hasSameOverloadableAttrs(A: FuncX, B: FuncY);
7676 }
7677
7678 // Variables with the same type and linkage match.
7679 if (const auto *VarX = dyn_cast<VarDecl>(Val: X)) {
7680 const auto *VarY = cast<VarDecl>(Val: Y);
7681 if (VarX->getLinkageInternal() == VarY->getLinkageInternal()) {
7682 // During deserialization, we might compare variables before we load
7683 // their types. Assume the types will end up being the same.
7684 if (VarX->getType().isNull() || VarY->getType().isNull())
7685 return true;
7686
7687 if (hasSameType(T1: VarX->getType(), T2: VarY->getType()))
7688 return true;
7689
7690 // We can get decls with different types on the redecl chain. Eg.
7691 // template <typename T> struct S { static T Var[]; }; // #1
7692 // template <typename T> T S<T>::Var[sizeof(T)]; // #2
7693 // Only? happens when completing an incomplete array type. In this case
7694 // when comparing #1 and #2 we should go through their element type.
7695 const ArrayType *VarXTy = getAsArrayType(T: VarX->getType());
7696 const ArrayType *VarYTy = getAsArrayType(T: VarY->getType());
7697 if (!VarXTy || !VarYTy)
7698 return false;
7699 if (VarXTy->isIncompleteArrayType() || VarYTy->isIncompleteArrayType())
7700 return hasSameType(T1: VarXTy->getElementType(), T2: VarYTy->getElementType());
7701 }
7702 return false;
7703 }
7704
7705 // Namespaces with the same name and inlinedness match.
7706 if (const auto *NamespaceX = dyn_cast<NamespaceDecl>(Val: X)) {
7707 const auto *NamespaceY = cast<NamespaceDecl>(Val: Y);
7708 return NamespaceX->isInline() == NamespaceY->isInline();
7709 }
7710
7711 // Identical template names and kinds match if their template parameter lists
7712 // and patterns match.
7713 if (const auto *TemplateX = dyn_cast<TemplateDecl>(Val: X)) {
7714 const auto *TemplateY = cast<TemplateDecl>(Val: Y);
7715
7716 // ConceptDecl wouldn't be the same if their constraint expression differs.
7717 if (const auto *ConceptX = dyn_cast<ConceptDecl>(Val: X)) {
7718 const auto *ConceptY = cast<ConceptDecl>(Val: Y);
7719 if (!isSameConstraintExpr(XCE: ConceptX->getConstraintExpr(),
7720 YCE: ConceptY->getConstraintExpr()))
7721 return false;
7722 }
7723
7724 return isSameEntity(X: TemplateX->getTemplatedDecl(),
7725 Y: TemplateY->getTemplatedDecl()) &&
7726 isSameTemplateParameterList(X: TemplateX->getTemplateParameters(),
7727 Y: TemplateY->getTemplateParameters());
7728 }
7729
7730 // Fields with the same name and the same type match.
7731 if (const auto *FDX = dyn_cast<FieldDecl>(Val: X)) {
7732 const auto *FDY = cast<FieldDecl>(Val: Y);
7733 // FIXME: Also check the bitwidth is odr-equivalent, if any.
7734 return hasSameType(T1: FDX->getType(), T2: FDY->getType());
7735 }
7736
7737 // Indirect fields with the same target field match.
7738 if (const auto *IFDX = dyn_cast<IndirectFieldDecl>(Val: X)) {
7739 const auto *IFDY = cast<IndirectFieldDecl>(Val: Y);
7740 return IFDX->getAnonField()->getCanonicalDecl() ==
7741 IFDY->getAnonField()->getCanonicalDecl();
7742 }
7743
7744 // Enumerators with the same name match.
7745 if (isa<EnumConstantDecl>(Val: X))
7746 // FIXME: Also check the value is odr-equivalent.
7747 return true;
7748
7749 // Using shadow declarations with the same target match.
7750 if (const auto *USX = dyn_cast<UsingShadowDecl>(Val: X)) {
7751 const auto *USY = cast<UsingShadowDecl>(Val: Y);
7752 return declaresSameEntity(D1: USX->getTargetDecl(), D2: USY->getTargetDecl());
7753 }
7754
7755 // Using declarations with the same qualifier match. (We already know that
7756 // the name matches.)
7757 if (const auto *UX = dyn_cast<UsingDecl>(Val: X)) {
7758 const auto *UY = cast<UsingDecl>(Val: Y);
7759 return isSameQualifier(X: UX->getQualifier(), Y: UY->getQualifier()) &&
7760 UX->hasTypename() == UY->hasTypename() &&
7761 UX->isAccessDeclaration() == UY->isAccessDeclaration();
7762 }
7763 if (const auto *UX = dyn_cast<UnresolvedUsingValueDecl>(Val: X)) {
7764 const auto *UY = cast<UnresolvedUsingValueDecl>(Val: Y);
7765 return isSameQualifier(X: UX->getQualifier(), Y: UY->getQualifier()) &&
7766 UX->isAccessDeclaration() == UY->isAccessDeclaration();
7767 }
7768 if (const auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(Val: X)) {
7769 return isSameQualifier(
7770 X: UX->getQualifier(),
7771 Y: cast<UnresolvedUsingTypenameDecl>(Val: Y)->getQualifier());
7772 }
7773
7774 // Using-pack declarations are only created by instantiation, and match if
7775 // they're instantiated from matching UnresolvedUsing...Decls.
7776 if (const auto *UX = dyn_cast<UsingPackDecl>(Val: X)) {
7777 return declaresSameEntity(
7778 D1: UX->getInstantiatedFromUsingDecl(),
7779 D2: cast<UsingPackDecl>(Val: Y)->getInstantiatedFromUsingDecl());
7780 }
7781
7782 // Namespace alias definitions with the same target match.
7783 if (const auto *NAX = dyn_cast<NamespaceAliasDecl>(Val: X)) {
7784 const auto *NAY = cast<NamespaceAliasDecl>(Val: Y);
7785 return NAX->getNamespace()->Equals(DC: NAY->getNamespace());
7786 }
7787
7788 return false;
7789}
7790
7791TemplateArgument
7792ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const {
7793 switch (Arg.getKind()) {
7794 case TemplateArgument::Null:
7795 return Arg;
7796
7797 case TemplateArgument::Expression:
7798 return TemplateArgument(Arg.getAsExpr(), /*IsCanonical=*/true,
7799 Arg.getIsDefaulted());
7800
7801 case TemplateArgument::Declaration: {
7802 auto *D = cast<ValueDecl>(Val: Arg.getAsDecl()->getCanonicalDecl());
7803 return TemplateArgument(D, getCanonicalType(T: Arg.getParamTypeForDecl()),
7804 Arg.getIsDefaulted());
7805 }
7806
7807 case TemplateArgument::NullPtr:
7808 return TemplateArgument(getCanonicalType(T: Arg.getNullPtrType()),
7809 /*isNullPtr*/ true, Arg.getIsDefaulted());
7810
7811 case TemplateArgument::Template:
7812 return TemplateArgument(getCanonicalTemplateName(Name: Arg.getAsTemplate()),
7813 Arg.getIsDefaulted());
7814
7815 case TemplateArgument::TemplateExpansion:
7816 return TemplateArgument(
7817 getCanonicalTemplateName(Name: Arg.getAsTemplateOrTemplatePattern()),
7818 Arg.getNumTemplateExpansions(), Arg.getIsDefaulted());
7819
7820 case TemplateArgument::Integral:
7821 return TemplateArgument(Arg, getCanonicalType(T: Arg.getIntegralType()));
7822
7823 case TemplateArgument::StructuralValue:
7824 return TemplateArgument(*this,
7825 getCanonicalType(T: Arg.getStructuralValueType()),
7826 Arg.getAsStructuralValue(), Arg.getIsDefaulted());
7827
7828 case TemplateArgument::Type:
7829 return TemplateArgument(getCanonicalType(T: Arg.getAsType()),
7830 /*isNullPtr*/ false, Arg.getIsDefaulted());
7831
7832 case TemplateArgument::Pack: {
7833 bool AnyNonCanonArgs = false;
7834 auto CanonArgs = ::getCanonicalTemplateArguments(
7835 C: *this, Args: Arg.pack_elements(), AnyNonCanonArgs);
7836 if (!AnyNonCanonArgs)
7837 return Arg;
7838 auto NewArg = TemplateArgument::CreatePackCopy(
7839 Context&: const_cast<ASTContext &>(*this), Args: CanonArgs);
7840 NewArg.setIsDefaulted(Arg.getIsDefaulted());
7841 return NewArg;
7842 }
7843 }
7844
7845 // Silence GCC warning
7846 llvm_unreachable("Unhandled template argument kind");
7847}
7848
7849bool ASTContext::isSameTemplateArgument(const TemplateArgument &Arg1,
7850 const TemplateArgument &Arg2) const {
7851 if (Arg1.getKind() != Arg2.getKind())
7852 return false;
7853
7854 switch (Arg1.getKind()) {
7855 case TemplateArgument::Null:
7856 llvm_unreachable("Comparing NULL template argument");
7857
7858 case TemplateArgument::Type:
7859 return hasSameType(T1: Arg1.getAsType(), T2: Arg2.getAsType());
7860
7861 case TemplateArgument::Declaration:
7862 return Arg1.getAsDecl()->getUnderlyingDecl()->getCanonicalDecl() ==
7863 Arg2.getAsDecl()->getUnderlyingDecl()->getCanonicalDecl();
7864
7865 case TemplateArgument::NullPtr:
7866 return hasSameType(T1: Arg1.getNullPtrType(), T2: Arg2.getNullPtrType());
7867
7868 case TemplateArgument::Template:
7869 case TemplateArgument::TemplateExpansion:
7870 return getCanonicalTemplateName(Name: Arg1.getAsTemplateOrTemplatePattern()) ==
7871 getCanonicalTemplateName(Name: Arg2.getAsTemplateOrTemplatePattern());
7872
7873 case TemplateArgument::Integral:
7874 return llvm::APSInt::isSameValue(I1: Arg1.getAsIntegral(),
7875 I2: Arg2.getAsIntegral());
7876
7877 case TemplateArgument::StructuralValue:
7878 return Arg1.structurallyEquals(Other: Arg2);
7879
7880 case TemplateArgument::Expression: {
7881 llvm::FoldingSetNodeID ID1, ID2;
7882 Arg1.getAsExpr()->Profile(ID&: ID1, Context: *this, /*Canonical=*/true);
7883 Arg2.getAsExpr()->Profile(ID&: ID2, Context: *this, /*Canonical=*/true);
7884 return ID1 == ID2;
7885 }
7886
7887 case TemplateArgument::Pack:
7888 return llvm::equal(
7889 LRange: Arg1.getPackAsArray(), RRange: Arg2.getPackAsArray(),
7890 P: [&](const TemplateArgument &Arg1, const TemplateArgument &Arg2) {
7891 return isSameTemplateArgument(Arg1, Arg2);
7892 });
7893 }
7894
7895 llvm_unreachable("Unhandled template argument kind");
7896}
7897
7898const ArrayType *ASTContext::getAsArrayType(QualType T) const {
7899 // Handle the non-qualified case efficiently.
7900 if (!T.hasLocalQualifiers()) {
7901 // Handle the common positive case fast.
7902 if (const auto *AT = dyn_cast<ArrayType>(Val&: T))
7903 return AT;
7904 }
7905
7906 // Handle the common negative case fast.
7907 if (!isa<ArrayType>(Val: T.getCanonicalType()))
7908 return nullptr;
7909
7910 // Apply any qualifiers from the array type to the element type. This
7911 // implements C99 6.7.3p8: "If the specification of an array type includes
7912 // any type qualifiers, the element type is so qualified, not the array type."
7913
7914 // If we get here, we either have type qualifiers on the type, or we have
7915 // sugar such as a typedef in the way. If we have type qualifiers on the type
7916 // we must propagate them down into the element type.
7917
7918 SplitQualType split = T.getSplitDesugaredType();
7919 Qualifiers qs = split.Quals;
7920
7921 // If we have a simple case, just return now.
7922 const auto *ATy = dyn_cast<ArrayType>(Val: split.Ty);
7923 if (!ATy || qs.empty())
7924 return ATy;
7925
7926 // Otherwise, we have an array and we have qualifiers on it. Push the
7927 // qualifiers into the array element type and return a new array type.
7928 QualType NewEltTy = getQualifiedType(T: ATy->getElementType(), Qs: qs);
7929
7930 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: ATy))
7931 return cast<ArrayType>(Val: getConstantArrayType(EltTy: NewEltTy, ArySizeIn: CAT->getSize(),
7932 SizeExpr: CAT->getSizeExpr(),
7933 ASM: CAT->getSizeModifier(),
7934 IndexTypeQuals: CAT->getIndexTypeCVRQualifiers()));
7935 if (const auto *IAT = dyn_cast<IncompleteArrayType>(Val: ATy))
7936 return cast<ArrayType>(Val: getIncompleteArrayType(elementType: NewEltTy,
7937 ASM: IAT->getSizeModifier(),
7938 elementTypeQuals: IAT->getIndexTypeCVRQualifiers()));
7939
7940 if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(Val: ATy))
7941 return cast<ArrayType>(Val: getDependentSizedArrayType(
7942 elementType: NewEltTy, numElements: DSAT->getSizeExpr(), ASM: DSAT->getSizeModifier(),
7943 elementTypeQuals: DSAT->getIndexTypeCVRQualifiers()));
7944
7945 const auto *VAT = cast<VariableArrayType>(Val: ATy);
7946 return cast<ArrayType>(
7947 Val: getVariableArrayType(EltTy: NewEltTy, NumElts: VAT->getSizeExpr(), ASM: VAT->getSizeModifier(),
7948 IndexTypeQuals: VAT->getIndexTypeCVRQualifiers()));
7949}
7950
7951QualType ASTContext::getAdjustedParameterType(QualType T) const {
7952 if (getLangOpts().HLSL && T->isConstantArrayType())
7953 return getArrayParameterType(Ty: T);
7954 if (T->isArrayType() || T->isFunctionType())
7955 return getDecayedType(T);
7956 return T;
7957}
7958
7959QualType ASTContext::getSignatureParameterType(QualType T) const {
7960 T = getVariableArrayDecayedType(type: T);
7961 T = getAdjustedParameterType(T);
7962 return T.getUnqualifiedType();
7963}
7964
7965QualType ASTContext::getExceptionObjectType(QualType T) const {
7966 // C++ [except.throw]p3:
7967 // A throw-expression initializes a temporary object, called the exception
7968 // object, the type of which is determined by removing any top-level
7969 // cv-qualifiers from the static type of the operand of throw and adjusting
7970 // the type from "array of T" or "function returning T" to "pointer to T"
7971 // or "pointer to function returning T", [...]
7972 T = getVariableArrayDecayedType(type: T);
7973 if (T->isArrayType() || T->isFunctionType())
7974 T = getDecayedType(T);
7975 return T.getUnqualifiedType();
7976}
7977
7978/// getArrayDecayedType - Return the properly qualified result of decaying the
7979/// specified array type to a pointer. This operation is non-trivial when
7980/// handling typedefs etc. The canonical type of "T" must be an array type,
7981/// this returns a pointer to a properly qualified element of the array.
7982///
7983/// See C99 6.7.5.3p7 and C99 6.3.2.1p3.
7984QualType ASTContext::getArrayDecayedType(QualType Ty) const {
7985 // Get the element type with 'getAsArrayType' so that we don't lose any
7986 // typedefs in the element type of the array. This also handles propagation
7987 // of type qualifiers from the array type into the element type if present
7988 // (C99 6.7.3p8).
7989 const ArrayType *PrettyArrayType = getAsArrayType(T: Ty);
7990 assert(PrettyArrayType && "Not an array type!");
7991
7992 QualType PtrTy = getPointerType(T: PrettyArrayType->getElementType());
7993
7994 // int x[restrict 4] -> int *restrict
7995 QualType Result = getQualifiedType(T: PtrTy,
7996 Qs: PrettyArrayType->getIndexTypeQualifiers());
7997
7998 // int x[_Nullable] -> int * _Nullable
7999 if (auto Nullability = Ty->getNullability()) {
8000 Result = const_cast<ASTContext *>(this)->getAttributedType(nullability: *Nullability,
8001 modifiedType: Result, equivalentType: Result);
8002 }
8003 return Result;
8004}
8005
8006QualType ASTContext::getBaseElementType(const ArrayType *array) const {
8007 return getBaseElementType(QT: array->getElementType());
8008}
8009
8010QualType ASTContext::getBaseElementType(QualType type) const {
8011 Qualifiers qs;
8012 while (true) {
8013 SplitQualType split = type.getSplitDesugaredType();
8014 const ArrayType *array = split.Ty->getAsArrayTypeUnsafe();
8015 if (!array) break;
8016
8017 type = array->getElementType();
8018 qs.addConsistentQualifiers(qs: split.Quals);
8019 }
8020
8021 return getQualifiedType(T: type, Qs: qs);
8022}
8023
8024/// getConstantArrayElementCount - Returns number of constant array elements.
8025uint64_t
8026ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const {
8027 uint64_t ElementCount = 1;
8028 do {
8029 ElementCount *= CA->getZExtSize();
8030 CA = dyn_cast_or_null<ConstantArrayType>(
8031 Val: CA->getElementType()->getAsArrayTypeUnsafe());
8032 } while (CA);
8033 return ElementCount;
8034}
8035
8036uint64_t ASTContext::getArrayInitLoopExprElementCount(
8037 const ArrayInitLoopExpr *AILE) const {
8038 if (!AILE)
8039 return 0;
8040
8041 uint64_t ElementCount = 1;
8042
8043 do {
8044 ElementCount *= AILE->getArraySize().getZExtValue();
8045 AILE = dyn_cast<ArrayInitLoopExpr>(Val: AILE->getSubExpr());
8046 } while (AILE);
8047
8048 return ElementCount;
8049}
8050
8051/// getFloatingRank - Return a relative rank for floating point types.
8052/// This routine will assert if passed a built-in type that isn't a float.
8053static FloatingRank getFloatingRank(QualType T) {
8054 if (const auto *CT = T->getAs<ComplexType>())
8055 return getFloatingRank(T: CT->getElementType());
8056
8057 switch (T->castAs<BuiltinType>()->getKind()) {
8058 default: llvm_unreachable("getFloatingRank(): not a floating type");
8059 case BuiltinType::Float16: return Float16Rank;
8060 case BuiltinType::Half: return HalfRank;
8061 case BuiltinType::Float: return FloatRank;
8062 case BuiltinType::Double: return DoubleRank;
8063 case BuiltinType::LongDouble: return LongDoubleRank;
8064 case BuiltinType::Float128: return Float128Rank;
8065 case BuiltinType::BFloat16: return BFloat16Rank;
8066 case BuiltinType::Ibm128: return Ibm128Rank;
8067 }
8068}
8069
8070/// getFloatingTypeOrder - Compare the rank of the two specified floating
8071/// point types, ignoring the domain of the type (i.e. 'double' ==
8072/// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If
8073/// LHS < RHS, return -1.
8074int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const {
8075 FloatingRank LHSR = getFloatingRank(T: LHS);
8076 FloatingRank RHSR = getFloatingRank(T: RHS);
8077
8078 if (LHSR == RHSR)
8079 return 0;
8080 if (LHSR > RHSR)
8081 return 1;
8082 return -1;
8083}
8084
8085int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const {
8086 if (&getFloatTypeSemantics(T: LHS) == &getFloatTypeSemantics(T: RHS))
8087 return 0;
8088 return getFloatingTypeOrder(LHS, RHS);
8089}
8090
8091/// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This
8092/// routine will assert if passed a built-in type that isn't an integer or enum,
8093/// or if it is not canonicalized.
8094unsigned ASTContext::getIntegerRank(const Type *T) const {
8095 assert(T->isCanonicalUnqualified() && "T should be canonicalized");
8096
8097 // Results in this 'losing' to any type of the same size, but winning if
8098 // larger.
8099 if (const auto *EIT = dyn_cast<BitIntType>(Val: T))
8100 return 0 + (EIT->getNumBits() << 3);
8101
8102 switch (cast<BuiltinType>(Val: T)->getKind()) {
8103 default: llvm_unreachable("getIntegerRank(): not a built-in integer");
8104 case BuiltinType::Bool:
8105 return 1 + (getIntWidth(T: BoolTy) << 3);
8106 case BuiltinType::Char_S:
8107 case BuiltinType::Char_U:
8108 case BuiltinType::SChar:
8109 case BuiltinType::UChar:
8110 return 2 + (getIntWidth(T: CharTy) << 3);
8111 case BuiltinType::Short:
8112 case BuiltinType::UShort:
8113 return 3 + (getIntWidth(T: ShortTy) << 3);
8114 case BuiltinType::Int:
8115 case BuiltinType::UInt:
8116 return 4 + (getIntWidth(T: IntTy) << 3);
8117 case BuiltinType::Long:
8118 case BuiltinType::ULong:
8119 return 5 + (getIntWidth(T: LongTy) << 3);
8120 case BuiltinType::LongLong:
8121 case BuiltinType::ULongLong:
8122 return 6 + (getIntWidth(T: LongLongTy) << 3);
8123 case BuiltinType::Int128:
8124 case BuiltinType::UInt128:
8125 return 7 + (getIntWidth(T: Int128Ty) << 3);
8126
8127 // "The ranks of char8_t, char16_t, char32_t, and wchar_t equal the ranks of
8128 // their underlying types" [c++20 conv.rank]
8129 case BuiltinType::Char8:
8130 return getIntegerRank(T: UnsignedCharTy.getTypePtr());
8131 case BuiltinType::Char16:
8132 return getIntegerRank(
8133 T: getFromTargetType(Type: Target->getChar16Type()).getTypePtr());
8134 case BuiltinType::Char32:
8135 return getIntegerRank(
8136 T: getFromTargetType(Type: Target->getChar32Type()).getTypePtr());
8137 case BuiltinType::WChar_S:
8138 case BuiltinType::WChar_U:
8139 return getIntegerRank(
8140 T: getFromTargetType(Type: Target->getWCharType()).getTypePtr());
8141 }
8142}
8143
8144/// Whether this is a promotable bitfield reference according
8145/// to C99 6.3.1.1p2, bullet 2 (and GCC extensions).
8146///
8147/// \returns the type this bit-field will promote to, or NULL if no
8148/// promotion occurs.
8149QualType ASTContext::isPromotableBitField(Expr *E) const {
8150 if (E->isTypeDependent() || E->isValueDependent())
8151 return {};
8152
8153 // C++ [conv.prom]p5:
8154 // If the bit-field has an enumerated type, it is treated as any other
8155 // value of that type for promotion purposes.
8156 if (getLangOpts().CPlusPlus && E->getType()->isEnumeralType())
8157 return {};
8158
8159 // FIXME: We should not do this unless E->refersToBitField() is true. This
8160 // matters in C where getSourceBitField() will find bit-fields for various
8161 // cases where the source expression is not a bit-field designator.
8162
8163 FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields?
8164 if (!Field)
8165 return {};
8166
8167 QualType FT = Field->getType();
8168
8169 uint64_t BitWidth = Field->getBitWidthValue();
8170 uint64_t IntSize = getTypeSize(T: IntTy);
8171 // C++ [conv.prom]p5:
8172 // A prvalue for an integral bit-field can be converted to a prvalue of type
8173 // int if int can represent all the values of the bit-field; otherwise, it
8174 // can be converted to unsigned int if unsigned int can represent all the
8175 // values of the bit-field. If the bit-field is larger yet, no integral
8176 // promotion applies to it.
8177 // C11 6.3.1.1/2:
8178 // [For a bit-field of type _Bool, int, signed int, or unsigned int:]
8179 // If an int can represent all values of the original type (as restricted by
8180 // the width, for a bit-field), the value is converted to an int; otherwise,
8181 // it is converted to an unsigned int.
8182 //
8183 // FIXME: C does not permit promotion of a 'long : 3' bitfield to int.
8184 // We perform that promotion here to match GCC and C++.
8185 // FIXME: C does not permit promotion of an enum bit-field whose rank is
8186 // greater than that of 'int'. We perform that promotion to match GCC.
8187 //
8188 // C23 6.3.1.1p2:
8189 // The value from a bit-field of a bit-precise integer type is converted to
8190 // the corresponding bit-precise integer type. (The rest is the same as in
8191 // C11.)
8192 if (QualType QT = Field->getType(); QT->isBitIntType())
8193 return QT;
8194
8195 if (BitWidth < IntSize)
8196 return IntTy;
8197
8198 if (BitWidth == IntSize)
8199 return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy;
8200
8201 // Bit-fields wider than int are not subject to promotions, and therefore act
8202 // like the base type. GCC has some weird bugs in this area that we
8203 // deliberately do not follow (GCC follows a pre-standard resolution to
8204 // C's DR315 which treats bit-width as being part of the type, and this leaks
8205 // into their semantics in some cases).
8206 return {};
8207}
8208
8209/// getPromotedIntegerType - Returns the type that Promotable will
8210/// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable
8211/// integer type.
8212QualType ASTContext::getPromotedIntegerType(QualType Promotable) const {
8213 assert(!Promotable.isNull());
8214 assert(isPromotableIntegerType(Promotable));
8215 if (const auto *ED = Promotable->getAsEnumDecl())
8216 return ED->getPromotionType();
8217
8218 if (const auto *BT = Promotable->getAs<BuiltinType>()) {
8219 // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t
8220 // (3.9.1) can be converted to a prvalue of the first of the following
8221 // types that can represent all the values of its underlying type:
8222 // int, unsigned int, long int, unsigned long int, long long int, or
8223 // unsigned long long int [...]
8224 // FIXME: Is there some better way to compute this?
8225 if (BT->getKind() == BuiltinType::WChar_S ||
8226 BT->getKind() == BuiltinType::WChar_U ||
8227 BT->getKind() == BuiltinType::Char8 ||
8228 BT->getKind() == BuiltinType::Char16 ||
8229 BT->getKind() == BuiltinType::Char32) {
8230 bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S;
8231 uint64_t FromSize = getTypeSize(T: BT);
8232 QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy,
8233 LongLongTy, UnsignedLongLongTy };
8234 for (const auto &PT : PromoteTypes) {
8235 uint64_t ToSize = getTypeSize(T: PT);
8236 if (FromSize < ToSize ||
8237 (FromSize == ToSize && FromIsSigned == PT->isSignedIntegerType()))
8238 return PT;
8239 }
8240 llvm_unreachable("char type should fit into long long");
8241 }
8242 }
8243
8244 // At this point, we should have a signed or unsigned integer type.
8245 if (Promotable->isSignedIntegerType())
8246 return IntTy;
8247 uint64_t PromotableSize = getIntWidth(T: Promotable);
8248 uint64_t IntSize = getIntWidth(T: IntTy);
8249 assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize);
8250 return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy;
8251}
8252
8253/// Recurses in pointer/array types until it finds an objc retainable
8254/// type and returns its ownership.
8255Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const {
8256 while (!T.isNull()) {
8257 if (T.getObjCLifetime() != Qualifiers::OCL_None)
8258 return T.getObjCLifetime();
8259 if (T->isArrayType())
8260 T = getBaseElementType(type: T);
8261 else if (const auto *PT = T->getAs<PointerType>())
8262 T = PT->getPointeeType();
8263 else if (const auto *RT = T->getAs<ReferenceType>())
8264 T = RT->getPointeeType();
8265 else
8266 break;
8267 }
8268
8269 return Qualifiers::OCL_None;
8270}
8271
8272static const Type *getIntegerTypeForEnum(const EnumType *ET) {
8273 // Incomplete enum types are not treated as integer types.
8274 // FIXME: In C++, enum types are never integer types.
8275 const EnumDecl *ED = ET->getDecl()->getDefinitionOrSelf();
8276 if (ED->isComplete() && !ED->isScoped())
8277 return ED->getIntegerType().getTypePtr();
8278 return nullptr;
8279}
8280
8281/// getIntegerTypeOrder - Returns the highest ranked integer type:
8282/// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If
8283/// LHS < RHS, return -1.
8284int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const {
8285 const Type *LHSC = getCanonicalType(T: LHS).getTypePtr();
8286 const Type *RHSC = getCanonicalType(T: RHS).getTypePtr();
8287
8288 // Unwrap enums to their underlying type.
8289 if (const auto *ET = dyn_cast<EnumType>(Val: LHSC))
8290 LHSC = getIntegerTypeForEnum(ET);
8291 if (const auto *ET = dyn_cast<EnumType>(Val: RHSC))
8292 RHSC = getIntegerTypeForEnum(ET);
8293
8294 if (LHSC == RHSC) return 0;
8295
8296 bool LHSUnsigned = LHSC->isUnsignedIntegerType();
8297 bool RHSUnsigned = RHSC->isUnsignedIntegerType();
8298
8299 unsigned LHSRank = getIntegerRank(T: LHSC);
8300 unsigned RHSRank = getIntegerRank(T: RHSC);
8301
8302 if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned.
8303 if (LHSRank == RHSRank) return 0;
8304 return LHSRank > RHSRank ? 1 : -1;
8305 }
8306
8307 // Otherwise, the LHS is signed and the RHS is unsigned or visa versa.
8308 if (LHSUnsigned) {
8309 // If the unsigned [LHS] type is larger, return it.
8310 if (LHSRank >= RHSRank)
8311 return 1;
8312
8313 // If the signed type can represent all values of the unsigned type, it
8314 // wins. Because we are dealing with 2's complement and types that are
8315 // powers of two larger than each other, this is always safe.
8316 return -1;
8317 }
8318
8319 // If the unsigned [RHS] type is larger, return it.
8320 if (RHSRank >= LHSRank)
8321 return -1;
8322
8323 // If the signed type can represent all values of the unsigned type, it
8324 // wins. Because we are dealing with 2's complement and types that are
8325 // powers of two larger than each other, this is always safe.
8326 return 1;
8327}
8328
8329TypedefDecl *ASTContext::getCFConstantStringDecl() const {
8330 if (CFConstantStringTypeDecl)
8331 return CFConstantStringTypeDecl;
8332
8333 assert(!CFConstantStringTagDecl &&
8334 "tag and typedef should be initialized together");
8335 CFConstantStringTagDecl = buildImplicitRecord(Name: "__NSConstantString_tag");
8336 CFConstantStringTagDecl->startDefinition();
8337
8338 struct {
8339 QualType Type;
8340 const char *Name;
8341 } Fields[5];
8342 unsigned Count = 0;
8343
8344 /// Objective-C ABI
8345 ///
8346 /// typedef struct __NSConstantString_tag {
8347 /// const int *isa;
8348 /// int flags;
8349 /// const char *str;
8350 /// long length;
8351 /// } __NSConstantString;
8352 ///
8353 /// Swift ABI (4.1, 4.2)
8354 ///
8355 /// typedef struct __NSConstantString_tag {
8356 /// uintptr_t _cfisa;
8357 /// uintptr_t _swift_rc;
8358 /// _Atomic(uint64_t) _cfinfoa;
8359 /// const char *_ptr;
8360 /// uint32_t _length;
8361 /// } __NSConstantString;
8362 ///
8363 /// Swift ABI (5.0)
8364 ///
8365 /// typedef struct __NSConstantString_tag {
8366 /// uintptr_t _cfisa;
8367 /// uintptr_t _swift_rc;
8368 /// _Atomic(uint64_t) _cfinfoa;
8369 /// const char *_ptr;
8370 /// uintptr_t _length;
8371 /// } __NSConstantString;
8372
8373 const auto CFRuntime = getLangOpts().CFRuntime;
8374 if (static_cast<unsigned>(CFRuntime) <
8375 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift)) {
8376 Fields[Count++] = { .Type: getPointerType(T: IntTy.withConst()), .Name: "isa" };
8377 Fields[Count++] = { .Type: IntTy, .Name: "flags" };
8378 Fields[Count++] = { .Type: getPointerType(T: CharTy.withConst()), .Name: "str" };
8379 Fields[Count++] = { .Type: LongTy, .Name: "length" };
8380 } else {
8381 Fields[Count++] = { .Type: getUIntPtrType(), .Name: "_cfisa" };
8382 Fields[Count++] = { .Type: getUIntPtrType(), .Name: "_swift_rc" };
8383 Fields[Count++] = { .Type: getFromTargetType(Type: Target->getUInt64Type()), .Name: "_swift_rc" };
8384 Fields[Count++] = { .Type: getPointerType(T: CharTy.withConst()), .Name: "_ptr" };
8385 if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 ||
8386 CFRuntime == LangOptions::CoreFoundationABI::Swift4_2)
8387 Fields[Count++] = { .Type: IntTy, .Name: "_ptr" };
8388 else
8389 Fields[Count++] = { .Type: getUIntPtrType(), .Name: "_ptr" };
8390 }
8391
8392 // Create fields
8393 for (unsigned i = 0; i < Count; ++i) {
8394 FieldDecl *Field =
8395 FieldDecl::Create(C: *this, DC: CFConstantStringTagDecl, StartLoc: SourceLocation(),
8396 IdLoc: SourceLocation(), Id: &Idents.get(Name: Fields[i].Name),
8397 T: Fields[i].Type, /*TInfo=*/nullptr,
8398 /*BitWidth=*/BW: nullptr, /*Mutable=*/false, InitStyle: ICIS_NoInit);
8399 Field->setAccess(AS_public);
8400 CFConstantStringTagDecl->addDecl(D: Field);
8401 }
8402
8403 CFConstantStringTagDecl->completeDefinition();
8404 // This type is designed to be compatible with NSConstantString, but cannot
8405 // use the same name, since NSConstantString is an interface.
8406 CanQualType tagType = getCanonicalTagType(TD: CFConstantStringTagDecl);
8407 CFConstantStringTypeDecl =
8408 buildImplicitTypedef(T: tagType, Name: "__NSConstantString");
8409
8410 return CFConstantStringTypeDecl;
8411}
8412
8413RecordDecl *ASTContext::getCFConstantStringTagDecl() const {
8414 if (!CFConstantStringTagDecl)
8415 getCFConstantStringDecl(); // Build the tag and the typedef.
8416 return CFConstantStringTagDecl;
8417}
8418
8419// getCFConstantStringType - Return the type used for constant CFStrings.
8420QualType ASTContext::getCFConstantStringType() const {
8421 return getTypedefType(Keyword: ElaboratedTypeKeyword::None, /*Qualifier=*/std::nullopt,
8422 Decl: getCFConstantStringDecl());
8423}
8424
8425QualType ASTContext::getObjCSuperType() const {
8426 if (ObjCSuperType.isNull()) {
8427 RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord(Name: "objc_super");
8428 getTranslationUnitDecl()->addDecl(D: ObjCSuperTypeDecl);
8429 ObjCSuperType = getCanonicalTagType(TD: ObjCSuperTypeDecl);
8430 }
8431 return ObjCSuperType;
8432}
8433
8434void ASTContext::setCFConstantStringType(QualType T) {
8435 const auto *TT = T->castAs<TypedefType>();
8436 CFConstantStringTypeDecl = cast<TypedefDecl>(Val: TT->getDecl());
8437 CFConstantStringTagDecl = TT->castAsRecordDecl();
8438}
8439
8440QualType ASTContext::getBlockDescriptorType() const {
8441 if (BlockDescriptorType)
8442 return getCanonicalTagType(TD: BlockDescriptorType);
8443
8444 RecordDecl *RD;
8445 // FIXME: Needs the FlagAppleBlock bit.
8446 RD = buildImplicitRecord(Name: "__block_descriptor");
8447 RD->startDefinition();
8448
8449 QualType FieldTypes[] = {
8450 UnsignedLongTy,
8451 UnsignedLongTy,
8452 };
8453
8454 static const char *const FieldNames[] = {
8455 "reserved",
8456 "Size"
8457 };
8458
8459 for (size_t i = 0; i < 2; ++i) {
8460 FieldDecl *Field = FieldDecl::Create(
8461 C: *this, DC: RD, StartLoc: SourceLocation(), IdLoc: SourceLocation(),
8462 Id: &Idents.get(Name: FieldNames[i]), T: FieldTypes[i], /*TInfo=*/nullptr,
8463 /*BitWidth=*/BW: nullptr, /*Mutable=*/false, InitStyle: ICIS_NoInit);
8464 Field->setAccess(AS_public);
8465 RD->addDecl(D: Field);
8466 }
8467
8468 RD->completeDefinition();
8469
8470 BlockDescriptorType = RD;
8471
8472 return getCanonicalTagType(TD: BlockDescriptorType);
8473}
8474
8475QualType ASTContext::getBlockDescriptorExtendedType() const {
8476 if (BlockDescriptorExtendedType)
8477 return getCanonicalTagType(TD: BlockDescriptorExtendedType);
8478
8479 RecordDecl *RD;
8480 // FIXME: Needs the FlagAppleBlock bit.
8481 RD = buildImplicitRecord(Name: "__block_descriptor_withcopydispose");
8482 RD->startDefinition();
8483
8484 QualType FieldTypes[] = {
8485 UnsignedLongTy,
8486 UnsignedLongTy,
8487 getPointerType(T: VoidPtrTy),
8488 getPointerType(T: VoidPtrTy)
8489 };
8490
8491 static const char *const FieldNames[] = {
8492 "reserved",
8493 "Size",
8494 "CopyFuncPtr",
8495 "DestroyFuncPtr"
8496 };
8497
8498 for (size_t i = 0; i < 4; ++i) {
8499 FieldDecl *Field = FieldDecl::Create(
8500 C: *this, DC: RD, StartLoc: SourceLocation(), IdLoc: SourceLocation(),
8501 Id: &Idents.get(Name: FieldNames[i]), T: FieldTypes[i], /*TInfo=*/nullptr,
8502 /*BitWidth=*/BW: nullptr,
8503 /*Mutable=*/false, InitStyle: ICIS_NoInit);
8504 Field->setAccess(AS_public);
8505 RD->addDecl(D: Field);
8506 }
8507
8508 RD->completeDefinition();
8509
8510 BlockDescriptorExtendedType = RD;
8511 return getCanonicalTagType(TD: BlockDescriptorExtendedType);
8512}
8513
8514OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const {
8515 const auto *BT = dyn_cast<BuiltinType>(Val: T);
8516
8517 if (!BT) {
8518 if (isa<PipeType>(Val: T))
8519 return OCLTK_Pipe;
8520
8521 return OCLTK_Default;
8522 }
8523
8524 switch (BT->getKind()) {
8525#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
8526 case BuiltinType::Id: \
8527 return OCLTK_Image;
8528#include "clang/Basic/OpenCLImageTypes.def"
8529
8530 case BuiltinType::OCLClkEvent:
8531 return OCLTK_ClkEvent;
8532
8533 case BuiltinType::OCLEvent:
8534 return OCLTK_Event;
8535
8536 case BuiltinType::OCLQueue:
8537 return OCLTK_Queue;
8538
8539 case BuiltinType::OCLReserveID:
8540 return OCLTK_ReserveID;
8541
8542 case BuiltinType::OCLSampler:
8543 return OCLTK_Sampler;
8544
8545 default:
8546 return OCLTK_Default;
8547 }
8548}
8549
8550LangAS ASTContext::getOpenCLTypeAddrSpace(const Type *T) const {
8551 return Target->getOpenCLTypeAddrSpace(TK: getOpenCLTypeKind(T));
8552}
8553
8554/// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty"
8555/// requires copy/dispose. Note that this must match the logic
8556/// in buildByrefHelpers.
8557bool ASTContext::BlockRequiresCopying(QualType Ty,
8558 const VarDecl *D) {
8559 if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) {
8560 const Expr *copyExpr = getBlockVarCopyInit(VD: D).getCopyExpr();
8561 if (!copyExpr && record->hasTrivialDestructor()) return false;
8562
8563 return true;
8564 }
8565
8566 if (Ty.hasAddressDiscriminatedPointerAuth())
8567 return true;
8568
8569 // The block needs copy/destroy helpers if Ty is non-trivial to destructively
8570 // move or destroy.
8571 if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType())
8572 return true;
8573
8574 if (!Ty->isObjCRetainableType()) return false;
8575
8576 Qualifiers qs = Ty.getQualifiers();
8577
8578 // If we have lifetime, that dominates.
8579 if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) {
8580 switch (lifetime) {
8581 case Qualifiers::OCL_None: llvm_unreachable("impossible");
8582
8583 // These are just bits as far as the runtime is concerned.
8584 case Qualifiers::OCL_ExplicitNone:
8585 case Qualifiers::OCL_Autoreleasing:
8586 return false;
8587
8588 // These cases should have been taken care of when checking the type's
8589 // non-triviality.
8590 case Qualifiers::OCL_Weak:
8591 case Qualifiers::OCL_Strong:
8592 llvm_unreachable("impossible");
8593 }
8594 llvm_unreachable("fell out of lifetime switch!");
8595 }
8596 return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) ||
8597 Ty->isObjCObjectPointerType());
8598}
8599
8600bool ASTContext::getByrefLifetime(QualType Ty,
8601 Qualifiers::ObjCLifetime &LifeTime,
8602 bool &HasByrefExtendedLayout) const {
8603 if (!getLangOpts().ObjC ||
8604 getLangOpts().getGC() != LangOptions::NonGC)
8605 return false;
8606
8607 HasByrefExtendedLayout = false;
8608 if (Ty->isRecordType()) {
8609 HasByrefExtendedLayout = true;
8610 LifeTime = Qualifiers::OCL_None;
8611 } else if ((LifeTime = Ty.getObjCLifetime())) {
8612 // Honor the ARC qualifiers.
8613 } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) {
8614 // The MRR rule.
8615 LifeTime = Qualifiers::OCL_ExplicitNone;
8616 } else {
8617 LifeTime = Qualifiers::OCL_None;
8618 }
8619 return true;
8620}
8621
8622CanQualType ASTContext::getNSUIntegerType() const {
8623 assert(Target && "Expected target to be initialized");
8624 const llvm::Triple &T = Target->getTriple();
8625 // Windows is LLP64 rather than LP64
8626 if (T.isOSWindows() && T.isArch64Bit())
8627 return UnsignedLongLongTy;
8628 return UnsignedLongTy;
8629}
8630
8631CanQualType ASTContext::getNSIntegerType() const {
8632 assert(Target && "Expected target to be initialized");
8633 const llvm::Triple &T = Target->getTriple();
8634 // Windows is LLP64 rather than LP64
8635 if (T.isOSWindows() && T.isArch64Bit())
8636 return LongLongTy;
8637 return LongTy;
8638}
8639
8640TypedefDecl *ASTContext::getObjCInstanceTypeDecl() {
8641 if (!ObjCInstanceTypeDecl)
8642 ObjCInstanceTypeDecl =
8643 buildImplicitTypedef(T: getObjCIdType(), Name: "instancetype");
8644 return ObjCInstanceTypeDecl;
8645}
8646
8647// This returns true if a type has been typedefed to BOOL:
8648// typedef <type> BOOL;
8649static bool isTypeTypedefedAsBOOL(QualType T) {
8650 if (const auto *TT = dyn_cast<TypedefType>(Val&: T))
8651 if (IdentifierInfo *II = TT->getDecl()->getIdentifier())
8652 return II->isStr(Str: "BOOL");
8653
8654 return false;
8655}
8656
8657/// getObjCEncodingTypeSize returns size of type for objective-c encoding
8658/// purpose.
8659CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const {
8660 if (!type->isIncompleteArrayType() && type->isIncompleteType())
8661 return CharUnits::Zero();
8662
8663 CharUnits sz = getTypeSizeInChars(T: type);
8664
8665 // Make all integer and enum types at least as large as an int
8666 if (sz.isPositive() && type->isIntegralOrEnumerationType())
8667 sz = std::max(a: sz, b: getTypeSizeInChars(T: IntTy));
8668 // Treat arrays as pointers, since that's how they're passed in.
8669 else if (type->isArrayType())
8670 sz = getTypeSizeInChars(T: VoidPtrTy);
8671 return sz;
8672}
8673
8674bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const {
8675 return getTargetInfo().getCXXABI().isMicrosoft() &&
8676 VD->isStaticDataMember() &&
8677 VD->getType()->isIntegralOrEnumerationType() &&
8678 !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit();
8679}
8680
8681ASTContext::InlineVariableDefinitionKind
8682ASTContext::getInlineVariableDefinitionKind(const VarDecl *VD) const {
8683 if (!VD->isInline())
8684 return InlineVariableDefinitionKind::None;
8685
8686 // In almost all cases, it's a weak definition.
8687 auto *First = VD->getFirstDecl();
8688 if (First->isInlineSpecified() || !First->isStaticDataMember())
8689 return InlineVariableDefinitionKind::Weak;
8690
8691 // If there's a file-context declaration in this translation unit, it's a
8692 // non-discardable definition.
8693 for (auto *D : VD->redecls())
8694 if (D->getLexicalDeclContext()->isFileContext() &&
8695 !D->isInlineSpecified() && (D->isConstexpr() || First->isConstexpr()))
8696 return InlineVariableDefinitionKind::Strong;
8697
8698 // If we've not seen one yet, we don't know.
8699 return InlineVariableDefinitionKind::WeakUnknown;
8700}
8701
8702static std::string charUnitsToString(const CharUnits &CU) {
8703 return llvm::itostr(X: CU.getQuantity());
8704}
8705
8706/// getObjCEncodingForBlock - Return the encoded type for this block
8707/// declaration.
8708std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const {
8709 std::string S;
8710
8711 const BlockDecl *Decl = Expr->getBlockDecl();
8712 QualType BlockTy =
8713 Expr->getType()->castAs<BlockPointerType>()->getPointeeType();
8714 QualType BlockReturnTy = BlockTy->castAs<FunctionType>()->getReturnType();
8715 // Encode result type.
8716 if (getLangOpts().EncodeExtendedBlockSig)
8717 getObjCEncodingForMethodParameter(QT: Decl::OBJC_TQ_None, T: BlockReturnTy, S,
8718 Extended: true /*Extended*/);
8719 else
8720 getObjCEncodingForType(T: BlockReturnTy, S);
8721 // Compute size of all parameters.
8722 // Start with computing size of a pointer in number of bytes.
8723 // FIXME: There might(should) be a better way of doing this computation!
8724 CharUnits PtrSize = getTypeSizeInChars(T: VoidPtrTy);
8725 CharUnits ParmOffset = PtrSize;
8726 for (auto *PI : Decl->parameters()) {
8727 QualType PType = PI->getType();
8728 CharUnits sz = getObjCEncodingTypeSize(type: PType);
8729 if (sz.isZero())
8730 continue;
8731 assert(sz.isPositive() && "BlockExpr - Incomplete param type");
8732 ParmOffset += sz;
8733 }
8734 // Size of the argument frame
8735 S += charUnitsToString(CU: ParmOffset);
8736 // Block pointer and offset.
8737 S += "@?0";
8738
8739 // Argument types.
8740 ParmOffset = PtrSize;
8741 for (auto *PVDecl : Decl->parameters()) {
8742 QualType PType = PVDecl->getOriginalType();
8743 if (const auto *AT =
8744 dyn_cast<ArrayType>(Val: PType->getCanonicalTypeInternal())) {
8745 // Use array's original type only if it has known number of
8746 // elements.
8747 if (!isa<ConstantArrayType>(Val: AT))
8748 PType = PVDecl->getType();
8749 } else if (PType->isFunctionType())
8750 PType = PVDecl->getType();
8751 if (getLangOpts().EncodeExtendedBlockSig)
8752 getObjCEncodingForMethodParameter(QT: Decl::OBJC_TQ_None, T: PType,
8753 S, Extended: true /*Extended*/);
8754 else
8755 getObjCEncodingForType(T: PType, S);
8756 S += charUnitsToString(CU: ParmOffset);
8757 ParmOffset += getObjCEncodingTypeSize(type: PType);
8758 }
8759
8760 return S;
8761}
8762
8763std::string
8764ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const {
8765 std::string S;
8766 // Encode result type.
8767 getObjCEncodingForType(T: Decl->getReturnType(), S);
8768 CharUnits ParmOffset;
8769 // Compute size of all parameters.
8770 for (auto *PI : Decl->parameters()) {
8771 QualType PType = PI->getType();
8772 CharUnits sz = getObjCEncodingTypeSize(type: PType);
8773 if (sz.isZero())
8774 continue;
8775
8776 assert(sz.isPositive() &&
8777 "getObjCEncodingForFunctionDecl - Incomplete param type");
8778 ParmOffset += sz;
8779 }
8780 S += charUnitsToString(CU: ParmOffset);
8781 ParmOffset = CharUnits::Zero();
8782
8783 // Argument types.
8784 for (auto *PVDecl : Decl->parameters()) {
8785 QualType PType = PVDecl->getOriginalType();
8786 if (const auto *AT =
8787 dyn_cast<ArrayType>(Val: PType->getCanonicalTypeInternal())) {
8788 // Use array's original type only if it has known number of
8789 // elements.
8790 if (!isa<ConstantArrayType>(Val: AT))
8791 PType = PVDecl->getType();
8792 } else if (PType->isFunctionType())
8793 PType = PVDecl->getType();
8794 getObjCEncodingForType(T: PType, S);
8795 S += charUnitsToString(CU: ParmOffset);
8796 ParmOffset += getObjCEncodingTypeSize(type: PType);
8797 }
8798
8799 return S;
8800}
8801
8802/// getObjCEncodingForMethodParameter - Return the encoded type for a single
8803/// method parameter or return type. If Extended, include class names and
8804/// block object types.
8805void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT,
8806 QualType T, std::string& S,
8807 bool Extended) const {
8808 // Encode type qualifier, 'in', 'inout', etc. for the parameter.
8809 getObjCEncodingForTypeQualifier(QT, S);
8810 // Encode parameter type.
8811 ObjCEncOptions Options = ObjCEncOptions()
8812 .setExpandPointedToStructures()
8813 .setExpandStructures()
8814 .setIsOutermostType();
8815 if (Extended)
8816 Options.setEncodeBlockParameters().setEncodeClassNames();
8817 getObjCEncodingForTypeImpl(t: T, S, Options, /*Field=*/nullptr);
8818}
8819
8820/// getObjCEncodingForMethodDecl - Return the encoded type for this method
8821/// declaration.
8822std::string ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl,
8823 bool Extended) const {
8824 // FIXME: This is not very efficient.
8825 // Encode return type.
8826 std::string S;
8827 getObjCEncodingForMethodParameter(QT: Decl->getObjCDeclQualifier(),
8828 T: Decl->getReturnType(), S, Extended);
8829 // Compute size of all parameters.
8830 // Start with computing size of a pointer in number of bytes.
8831 // FIXME: There might(should) be a better way of doing this computation!
8832 CharUnits PtrSize = getTypeSizeInChars(T: VoidPtrTy);
8833 // The first two arguments (self and _cmd) are pointers; account for
8834 // their size.
8835 CharUnits ParmOffset = 2 * PtrSize;
8836 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(),
8837 E = Decl->sel_param_end(); PI != E; ++PI) {
8838 QualType PType = (*PI)->getType();
8839 CharUnits sz = getObjCEncodingTypeSize(type: PType);
8840 if (sz.isZero())
8841 continue;
8842
8843 assert(sz.isPositive() &&
8844 "getObjCEncodingForMethodDecl - Incomplete param type");
8845 ParmOffset += sz;
8846 }
8847 S += charUnitsToString(CU: ParmOffset);
8848 S += "@0:";
8849 S += charUnitsToString(CU: PtrSize);
8850
8851 // Argument types.
8852 ParmOffset = 2 * PtrSize;
8853 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(),
8854 E = Decl->sel_param_end(); PI != E; ++PI) {
8855 const ParmVarDecl *PVDecl = *PI;
8856 QualType PType = PVDecl->getOriginalType();
8857 if (const auto *AT =
8858 dyn_cast<ArrayType>(Val: PType->getCanonicalTypeInternal())) {
8859 // Use array's original type only if it has known number of
8860 // elements.
8861 if (!isa<ConstantArrayType>(Val: AT))
8862 PType = PVDecl->getType();
8863 } else if (PType->isFunctionType())
8864 PType = PVDecl->getType();
8865 getObjCEncodingForMethodParameter(QT: PVDecl->getObjCDeclQualifier(),
8866 T: PType, S, Extended);
8867 S += charUnitsToString(CU: ParmOffset);
8868 ParmOffset += getObjCEncodingTypeSize(type: PType);
8869 }
8870
8871 return S;
8872}
8873
8874ObjCPropertyImplDecl *
8875ASTContext::getObjCPropertyImplDeclForPropertyDecl(
8876 const ObjCPropertyDecl *PD,
8877 const Decl *Container) const {
8878 if (!Container)
8879 return nullptr;
8880 if (const auto *CID = dyn_cast<ObjCCategoryImplDecl>(Val: Container)) {
8881 for (auto *PID : CID->property_impls())
8882 if (PID->getPropertyDecl() == PD)
8883 return PID;
8884 } else {
8885 const auto *OID = cast<ObjCImplementationDecl>(Val: Container);
8886 for (auto *PID : OID->property_impls())
8887 if (PID->getPropertyDecl() == PD)
8888 return PID;
8889 }
8890 return nullptr;
8891}
8892
8893/// getObjCEncodingForPropertyDecl - Return the encoded type for this
8894/// property declaration. If non-NULL, Container must be either an
8895/// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be
8896/// NULL when getting encodings for protocol properties.
8897/// Property attributes are stored as a comma-delimited C string. The simple
8898/// attributes readonly and bycopy are encoded as single characters. The
8899/// parametrized attributes, getter=name, setter=name, and ivar=name, are
8900/// encoded as single characters, followed by an identifier. Property types
8901/// are also encoded as a parametrized attribute. The characters used to encode
8902/// these attributes are defined by the following enumeration:
8903/// @code
8904/// enum PropertyAttributes {
8905/// kPropertyReadOnly = 'R', // property is read-only.
8906/// kPropertyBycopy = 'C', // property is a copy of the value last assigned
8907/// kPropertyByref = '&', // property is a reference to the value last assigned
8908/// kPropertyDynamic = 'D', // property is dynamic
8909/// kPropertyGetter = 'G', // followed by getter selector name
8910/// kPropertySetter = 'S', // followed by setter selector name
8911/// kPropertyInstanceVariable = 'V' // followed by instance variable name
8912/// kPropertyType = 'T' // followed by old-style type encoding.
8913/// kPropertyWeak = 'W' // 'weak' property
8914/// kPropertyStrong = 'P' // property GC'able
8915/// kPropertyNonAtomic = 'N' // property non-atomic
8916/// kPropertyOptional = '?' // property optional
8917/// };
8918/// @endcode
8919std::string
8920ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
8921 const Decl *Container) const {
8922 // Collect information from the property implementation decl(s).
8923 bool Dynamic = false;
8924 ObjCPropertyImplDecl *SynthesizePID = nullptr;
8925
8926 if (ObjCPropertyImplDecl *PropertyImpDecl =
8927 getObjCPropertyImplDeclForPropertyDecl(PD, Container)) {
8928 if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
8929 Dynamic = true;
8930 else
8931 SynthesizePID = PropertyImpDecl;
8932 }
8933
8934 // FIXME: This is not very efficient.
8935 std::string S = "T";
8936
8937 // Encode result type.
8938 // GCC has some special rules regarding encoding of properties which
8939 // closely resembles encoding of ivars.
8940 getObjCEncodingForPropertyType(T: PD->getType(), S);
8941
8942 if (PD->isOptional())
8943 S += ",?";
8944
8945 if (PD->isReadOnly()) {
8946 S += ",R";
8947 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy)
8948 S += ",C";
8949 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain)
8950 S += ",&";
8951 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak)
8952 S += ",W";
8953 } else {
8954 switch (PD->getSetterKind()) {
8955 case ObjCPropertyDecl::Assign: break;
8956 case ObjCPropertyDecl::Copy: S += ",C"; break;
8957 case ObjCPropertyDecl::Retain: S += ",&"; break;
8958 case ObjCPropertyDecl::Weak: S += ",W"; break;
8959 }
8960 }
8961
8962 // It really isn't clear at all what this means, since properties
8963 // are "dynamic by default".
8964 if (Dynamic)
8965 S += ",D";
8966
8967 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic)
8968 S += ",N";
8969
8970 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) {
8971 S += ",G";
8972 S += PD->getGetterName().getAsString();
8973 }
8974
8975 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) {
8976 S += ",S";
8977 S += PD->getSetterName().getAsString();
8978 }
8979
8980 if (SynthesizePID) {
8981 const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl();
8982 S += ",V";
8983 S += OID->getNameAsString();
8984 }
8985
8986 // FIXME: OBJCGC: weak & strong
8987 return S;
8988}
8989
8990/// getLegacyIntegralTypeEncoding -
8991/// Another legacy compatibility encoding: 32-bit longs are encoded as
8992/// 'l' or 'L' , but not always. For typedefs, we need to use
8993/// 'i' or 'I' instead if encoding a struct field, or a pointer!
8994void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const {
8995 if (PointeeTy->getAs<TypedefType>()) {
8996 if (const auto *BT = PointeeTy->getAs<BuiltinType>()) {
8997 if (BT->getKind() == BuiltinType::ULong && getIntWidth(T: PointeeTy) == 32)
8998 PointeeTy = UnsignedIntTy;
8999 else
9000 if (BT->getKind() == BuiltinType::Long && getIntWidth(T: PointeeTy) == 32)
9001 PointeeTy = IntTy;
9002 }
9003 }
9004}
9005
9006void ASTContext::getObjCEncodingForType(QualType T, std::string& S,
9007 const FieldDecl *Field,
9008 QualType *NotEncodedT) const {
9009 // We follow the behavior of gcc, expanding structures which are
9010 // directly pointed to, and expanding embedded structures. Note that
9011 // these rules are sufficient to prevent recursive encoding of the
9012 // same type.
9013 getObjCEncodingForTypeImpl(t: T, S,
9014 Options: ObjCEncOptions()
9015 .setExpandPointedToStructures()
9016 .setExpandStructures()
9017 .setIsOutermostType(),
9018 Field, NotEncodedT);
9019}
9020
9021void ASTContext::getObjCEncodingForPropertyType(QualType T,
9022 std::string& S) const {
9023 // Encode result type.
9024 // GCC has some special rules regarding encoding of properties which
9025 // closely resembles encoding of ivars.
9026 getObjCEncodingForTypeImpl(t: T, S,
9027 Options: ObjCEncOptions()
9028 .setExpandPointedToStructures()
9029 .setExpandStructures()
9030 .setIsOutermostType()
9031 .setEncodingProperty(),
9032 /*Field=*/nullptr);
9033}
9034
9035static char getObjCEncodingForPrimitiveType(const ASTContext *C,
9036 const BuiltinType *BT) {
9037 BuiltinType::Kind kind = BT->getKind();
9038 switch (kind) {
9039 case BuiltinType::Void: return 'v';
9040 case BuiltinType::Bool: return 'B';
9041 case BuiltinType::Char8:
9042 case BuiltinType::Char_U:
9043 case BuiltinType::UChar: return 'C';
9044 case BuiltinType::Char16:
9045 case BuiltinType::UShort: return 'S';
9046 case BuiltinType::Char32:
9047 case BuiltinType::UInt: return 'I';
9048 case BuiltinType::ULong:
9049 return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q';
9050 case BuiltinType::UInt128: return 'T';
9051 case BuiltinType::ULongLong: return 'Q';
9052 case BuiltinType::Char_S:
9053 case BuiltinType::SChar: return 'c';
9054 case BuiltinType::Short: return 's';
9055 case BuiltinType::WChar_S:
9056 case BuiltinType::WChar_U:
9057 case BuiltinType::Int: return 'i';
9058 case BuiltinType::Long:
9059 return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q';
9060 case BuiltinType::LongLong: return 'q';
9061 case BuiltinType::Int128: return 't';
9062 case BuiltinType::Float: return 'f';
9063 case BuiltinType::Double: return 'd';
9064 case BuiltinType::LongDouble: return 'D';
9065 case BuiltinType::NullPtr: return '*'; // like char*
9066
9067 case BuiltinType::BFloat16:
9068 case BuiltinType::Float16:
9069 case BuiltinType::Float128:
9070 case BuiltinType::Ibm128:
9071 case BuiltinType::Half:
9072 case BuiltinType::ShortAccum:
9073 case BuiltinType::Accum:
9074 case BuiltinType::LongAccum:
9075 case BuiltinType::UShortAccum:
9076 case BuiltinType::UAccum:
9077 case BuiltinType::ULongAccum:
9078 case BuiltinType::ShortFract:
9079 case BuiltinType::Fract:
9080 case BuiltinType::LongFract:
9081 case BuiltinType::UShortFract:
9082 case BuiltinType::UFract:
9083 case BuiltinType::ULongFract:
9084 case BuiltinType::SatShortAccum:
9085 case BuiltinType::SatAccum:
9086 case BuiltinType::SatLongAccum:
9087 case BuiltinType::SatUShortAccum:
9088 case BuiltinType::SatUAccum:
9089 case BuiltinType::SatULongAccum:
9090 case BuiltinType::SatShortFract:
9091 case BuiltinType::SatFract:
9092 case BuiltinType::SatLongFract:
9093 case BuiltinType::SatUShortFract:
9094 case BuiltinType::SatUFract:
9095 case BuiltinType::SatULongFract:
9096 // FIXME: potentially need @encodes for these!
9097 return ' ';
9098
9099#define SVE_TYPE(Name, Id, SingletonId) \
9100 case BuiltinType::Id:
9101#include "clang/Basic/AArch64ACLETypes.def"
9102#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
9103#include "clang/Basic/RISCVVTypes.def"
9104#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
9105#include "clang/Basic/WebAssemblyReferenceTypes.def"
9106#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) case BuiltinType::Id:
9107#include "clang/Basic/AMDGPUTypes.def"
9108 {
9109 DiagnosticsEngine &Diags = C->getDiagnostics();
9110 unsigned DiagID = Diags.getCustomDiagID(L: DiagnosticsEngine::Error,
9111 FormatString: "cannot yet @encode type %0");
9112 Diags.Report(DiagID) << BT->getName(Policy: C->getPrintingPolicy());
9113 return ' ';
9114 }
9115
9116 case BuiltinType::ObjCId:
9117 case BuiltinType::ObjCClass:
9118 case BuiltinType::ObjCSel:
9119 llvm_unreachable("@encoding ObjC primitive type");
9120
9121 // OpenCL and placeholder types don't need @encodings.
9122#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
9123 case BuiltinType::Id:
9124#include "clang/Basic/OpenCLImageTypes.def"
9125#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
9126 case BuiltinType::Id:
9127#include "clang/Basic/OpenCLExtensionTypes.def"
9128 case BuiltinType::OCLEvent:
9129 case BuiltinType::OCLClkEvent:
9130 case BuiltinType::OCLQueue:
9131 case BuiltinType::OCLReserveID:
9132 case BuiltinType::OCLSampler:
9133 case BuiltinType::Dependent:
9134#define PPC_VECTOR_TYPE(Name, Id, Size) \
9135 case BuiltinType::Id:
9136#include "clang/Basic/PPCTypes.def"
9137#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
9138#include "clang/Basic/HLSLIntangibleTypes.def"
9139#define BUILTIN_TYPE(KIND, ID)
9140#define PLACEHOLDER_TYPE(KIND, ID) \
9141 case BuiltinType::KIND:
9142#include "clang/AST/BuiltinTypes.def"
9143 llvm_unreachable("invalid builtin type for @encode");
9144 }
9145 llvm_unreachable("invalid BuiltinType::Kind value");
9146}
9147
9148static char ObjCEncodingForEnumDecl(const ASTContext *C, const EnumDecl *ED) {
9149 EnumDecl *Enum = ED->getDefinitionOrSelf();
9150
9151 // The encoding of an non-fixed enum type is always 'i', regardless of size.
9152 if (!Enum->isFixed())
9153 return 'i';
9154
9155 // The encoding of a fixed enum type matches its fixed underlying type.
9156 const auto *BT = Enum->getIntegerType()->castAs<BuiltinType>();
9157 return getObjCEncodingForPrimitiveType(C, BT);
9158}
9159
9160static void EncodeBitField(const ASTContext *Ctx, std::string& S,
9161 QualType T, const FieldDecl *FD) {
9162 assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl");
9163 S += 'b';
9164 // The NeXT runtime encodes bit fields as b followed by the number of bits.
9165 // The GNU runtime requires more information; bitfields are encoded as b,
9166 // then the offset (in bits) of the first element, then the type of the
9167 // bitfield, then the size in bits. For example, in this structure:
9168 //
9169 // struct
9170 // {
9171 // int integer;
9172 // int flags:2;
9173 // };
9174 // On a 32-bit system, the encoding for flags would be b2 for the NeXT
9175 // runtime, but b32i2 for the GNU runtime. The reason for this extra
9176 // information is not especially sensible, but we're stuck with it for
9177 // compatibility with GCC, although providing it breaks anything that
9178 // actually uses runtime introspection and wants to work on both runtimes...
9179 if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) {
9180 uint64_t Offset;
9181
9182 if (const auto *IVD = dyn_cast<ObjCIvarDecl>(Val: FD)) {
9183 Offset = Ctx->lookupFieldBitOffset(OID: IVD->getContainingInterface(), Ivar: IVD);
9184 } else {
9185 const RecordDecl *RD = FD->getParent();
9186 const ASTRecordLayout &RL = Ctx->getASTRecordLayout(D: RD);
9187 Offset = RL.getFieldOffset(FieldNo: FD->getFieldIndex());
9188 }
9189
9190 S += llvm::utostr(X: Offset);
9191
9192 if (const auto *ET = T->getAsCanonical<EnumType>())
9193 S += ObjCEncodingForEnumDecl(C: Ctx, ED: ET->getDecl());
9194 else {
9195 const auto *BT = T->castAs<BuiltinType>();
9196 S += getObjCEncodingForPrimitiveType(C: Ctx, BT);
9197 }
9198 }
9199 S += llvm::utostr(X: FD->getBitWidthValue());
9200}
9201
9202// Helper function for determining whether the encoded type string would include
9203// a template specialization type.
9204static bool hasTemplateSpecializationInEncodedString(const Type *T,
9205 bool VisitBasesAndFields) {
9206 T = T->getBaseElementTypeUnsafe();
9207
9208 if (auto *PT = T->getAs<PointerType>())
9209 return hasTemplateSpecializationInEncodedString(
9210 T: PT->getPointeeType().getTypePtr(), VisitBasesAndFields: false);
9211
9212 auto *CXXRD = T->getAsCXXRecordDecl();
9213
9214 if (!CXXRD)
9215 return false;
9216
9217 if (isa<ClassTemplateSpecializationDecl>(Val: CXXRD))
9218 return true;
9219
9220 if (!CXXRD->hasDefinition() || !VisitBasesAndFields)
9221 return false;
9222
9223 for (const auto &B : CXXRD->bases())
9224 if (hasTemplateSpecializationInEncodedString(T: B.getType().getTypePtr(),
9225 VisitBasesAndFields: true))
9226 return true;
9227
9228 for (auto *FD : CXXRD->fields())
9229 if (hasTemplateSpecializationInEncodedString(T: FD->getType().getTypePtr(),
9230 VisitBasesAndFields: true))
9231 return true;
9232
9233 return false;
9234}
9235
9236// FIXME: Use SmallString for accumulating string.
9237void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
9238 const ObjCEncOptions Options,
9239 const FieldDecl *FD,
9240 QualType *NotEncodedT) const {
9241 CanQualType CT = getCanonicalType(T);
9242 switch (CT->getTypeClass()) {
9243 case Type::Builtin:
9244 case Type::Enum:
9245 if (FD && FD->isBitField())
9246 return EncodeBitField(Ctx: this, S, T, FD);
9247 if (const auto *BT = dyn_cast<BuiltinType>(Val&: CT))
9248 S += getObjCEncodingForPrimitiveType(C: this, BT);
9249 else
9250 S += ObjCEncodingForEnumDecl(C: this, ED: cast<EnumType>(Val&: CT)->getDecl());
9251 return;
9252
9253 case Type::Complex:
9254 S += 'j';
9255 getObjCEncodingForTypeImpl(T: T->castAs<ComplexType>()->getElementType(), S,
9256 Options: ObjCEncOptions(),
9257 /*Field=*/FD: nullptr);
9258 return;
9259
9260 case Type::Atomic:
9261 S += 'A';
9262 getObjCEncodingForTypeImpl(T: T->castAs<AtomicType>()->getValueType(), S,
9263 Options: ObjCEncOptions(),
9264 /*Field=*/FD: nullptr);
9265 return;
9266
9267 // encoding for pointer or reference types.
9268 case Type::Pointer:
9269 case Type::LValueReference:
9270 case Type::RValueReference: {
9271 QualType PointeeTy;
9272 if (isa<PointerType>(Val: CT)) {
9273 const auto *PT = T->castAs<PointerType>();
9274 if (PT->isObjCSelType()) {
9275 S += ':';
9276 return;
9277 }
9278 PointeeTy = PT->getPointeeType();
9279 } else {
9280 PointeeTy = T->castAs<ReferenceType>()->getPointeeType();
9281 }
9282
9283 bool isReadOnly = false;
9284 // For historical/compatibility reasons, the read-only qualifier of the
9285 // pointee gets emitted _before_ the '^'. The read-only qualifier of
9286 // the pointer itself gets ignored, _unless_ we are looking at a typedef!
9287 // Also, do not emit the 'r' for anything but the outermost type!
9288 if (T->getAs<TypedefType>()) {
9289 if (Options.IsOutermostType() && T.isConstQualified()) {
9290 isReadOnly = true;
9291 S += 'r';
9292 }
9293 } else if (Options.IsOutermostType()) {
9294 QualType P = PointeeTy;
9295 while (auto PT = P->getAs<PointerType>())
9296 P = PT->getPointeeType();
9297 if (P.isConstQualified()) {
9298 isReadOnly = true;
9299 S += 'r';
9300 }
9301 }
9302 if (isReadOnly) {
9303 // Another legacy compatibility encoding. Some ObjC qualifier and type
9304 // combinations need to be rearranged.
9305 // Rewrite "in const" from "nr" to "rn"
9306 if (StringRef(S).ends_with(Suffix: "nr"))
9307 S.replace(i1: S.end()-2, i2: S.end(), s: "rn");
9308 }
9309
9310 if (PointeeTy->isCharType()) {
9311 // char pointer types should be encoded as '*' unless it is a
9312 // type that has been typedef'd to 'BOOL'.
9313 if (!isTypeTypedefedAsBOOL(T: PointeeTy)) {
9314 S += '*';
9315 return;
9316 }
9317 } else if (const auto *RTy = PointeeTy->getAsCanonical<RecordType>()) {
9318 const IdentifierInfo *II = RTy->getDecl()->getIdentifier();
9319 // GCC binary compat: Need to convert "struct objc_class *" to "#".
9320 if (II == &Idents.get(Name: "objc_class")) {
9321 S += '#';
9322 return;
9323 }
9324 // GCC binary compat: Need to convert "struct objc_object *" to "@".
9325 if (II == &Idents.get(Name: "objc_object")) {
9326 S += '@';
9327 return;
9328 }
9329 // If the encoded string for the class includes template names, just emit
9330 // "^v" for pointers to the class.
9331 if (getLangOpts().CPlusPlus &&
9332 (!getLangOpts().EncodeCXXClassTemplateSpec &&
9333 hasTemplateSpecializationInEncodedString(
9334 T: RTy, VisitBasesAndFields: Options.ExpandPointedToStructures()))) {
9335 S += "^v";
9336 return;
9337 }
9338 // fall through...
9339 }
9340 S += '^';
9341 getLegacyIntegralTypeEncoding(PointeeTy);
9342
9343 ObjCEncOptions NewOptions;
9344 if (Options.ExpandPointedToStructures())
9345 NewOptions.setExpandStructures();
9346 getObjCEncodingForTypeImpl(T: PointeeTy, S, Options: NewOptions,
9347 /*Field=*/FD: nullptr, NotEncodedT);
9348 return;
9349 }
9350
9351 case Type::ConstantArray:
9352 case Type::IncompleteArray:
9353 case Type::VariableArray: {
9354 const auto *AT = cast<ArrayType>(Val&: CT);
9355
9356 if (isa<IncompleteArrayType>(Val: AT) && !Options.IsStructField()) {
9357 // Incomplete arrays are encoded as a pointer to the array element.
9358 S += '^';
9359
9360 getObjCEncodingForTypeImpl(
9361 T: AT->getElementType(), S,
9362 Options: Options.keepingOnly(Mask: ObjCEncOptions().setExpandStructures()), FD);
9363 } else {
9364 S += '[';
9365
9366 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: AT))
9367 S += llvm::utostr(X: CAT->getZExtSize());
9368 else {
9369 //Variable length arrays are encoded as a regular array with 0 elements.
9370 assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) &&
9371 "Unknown array type!");
9372 S += '0';
9373 }
9374
9375 getObjCEncodingForTypeImpl(
9376 T: AT->getElementType(), S,
9377 Options: Options.keepingOnly(Mask: ObjCEncOptions().setExpandStructures()), FD,
9378 NotEncodedT);
9379 S += ']';
9380 }
9381 return;
9382 }
9383
9384 case Type::FunctionNoProto:
9385 case Type::FunctionProto:
9386 S += '?';
9387 return;
9388
9389 case Type::Record: {
9390 RecordDecl *RDecl = cast<RecordType>(Val&: CT)->getDecl();
9391 S += RDecl->isUnion() ? '(' : '{';
9392 // Anonymous structures print as '?'
9393 if (const IdentifierInfo *II = RDecl->getIdentifier()) {
9394 S += II->getName();
9395 if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(Val: RDecl)) {
9396 const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
9397 llvm::raw_string_ostream OS(S);
9398 printTemplateArgumentList(OS, Args: TemplateArgs.asArray(),
9399 Policy: getPrintingPolicy());
9400 }
9401 } else {
9402 S += '?';
9403 }
9404 if (Options.ExpandStructures()) {
9405 S += '=';
9406 if (!RDecl->isUnion()) {
9407 getObjCEncodingForStructureImpl(RD: RDecl, S, Field: FD, includeVBases: true, NotEncodedT);
9408 } else {
9409 for (const auto *Field : RDecl->fields()) {
9410 if (FD) {
9411 S += '"';
9412 S += Field->getNameAsString();
9413 S += '"';
9414 }
9415
9416 // Special case bit-fields.
9417 if (Field->isBitField()) {
9418 getObjCEncodingForTypeImpl(T: Field->getType(), S,
9419 Options: ObjCEncOptions().setExpandStructures(),
9420 FD: Field);
9421 } else {
9422 QualType qt = Field->getType();
9423 getLegacyIntegralTypeEncoding(PointeeTy&: qt);
9424 getObjCEncodingForTypeImpl(
9425 T: qt, S,
9426 Options: ObjCEncOptions().setExpandStructures().setIsStructField(), FD,
9427 NotEncodedT);
9428 }
9429 }
9430 }
9431 }
9432 S += RDecl->isUnion() ? ')' : '}';
9433 return;
9434 }
9435
9436 case Type::BlockPointer: {
9437 const auto *BT = T->castAs<BlockPointerType>();
9438 S += "@?"; // Unlike a pointer-to-function, which is "^?".
9439 if (Options.EncodeBlockParameters()) {
9440 const auto *FT = BT->getPointeeType()->castAs<FunctionType>();
9441
9442 S += '<';
9443 // Block return type
9444 getObjCEncodingForTypeImpl(T: FT->getReturnType(), S,
9445 Options: Options.forComponentType(), FD, NotEncodedT);
9446 // Block self
9447 S += "@?";
9448 // Block parameters
9449 if (const auto *FPT = dyn_cast<FunctionProtoType>(Val: FT)) {
9450 for (const auto &I : FPT->param_types())
9451 getObjCEncodingForTypeImpl(T: I, S, Options: Options.forComponentType(), FD,
9452 NotEncodedT);
9453 }
9454 S += '>';
9455 }
9456 return;
9457 }
9458
9459 case Type::ObjCObject: {
9460 // hack to match legacy encoding of *id and *Class
9461 QualType Ty = getObjCObjectPointerType(ObjectT: CT);
9462 if (Ty->isObjCIdType()) {
9463 S += "{objc_object=}";
9464 return;
9465 }
9466 else if (Ty->isObjCClassType()) {
9467 S += "{objc_class=}";
9468 return;
9469 }
9470 // TODO: Double check to make sure this intentionally falls through.
9471 [[fallthrough]];
9472 }
9473
9474 case Type::ObjCInterface: {
9475 // Ignore protocol qualifiers when mangling at this level.
9476 // @encode(class_name)
9477 ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface();
9478 S += '{';
9479 S += OI->getObjCRuntimeNameAsString();
9480 if (Options.ExpandStructures()) {
9481 S += '=';
9482 SmallVector<const ObjCIvarDecl*, 32> Ivars;
9483 DeepCollectObjCIvars(OI, leafClass: true, Ivars);
9484 for (unsigned i = 0, e = Ivars.size(); i != e; ++i) {
9485 const FieldDecl *Field = Ivars[i];
9486 if (Field->isBitField())
9487 getObjCEncodingForTypeImpl(T: Field->getType(), S,
9488 Options: ObjCEncOptions().setExpandStructures(),
9489 FD: Field);
9490 else
9491 getObjCEncodingForTypeImpl(T: Field->getType(), S,
9492 Options: ObjCEncOptions().setExpandStructures(), FD,
9493 NotEncodedT);
9494 }
9495 }
9496 S += '}';
9497 return;
9498 }
9499
9500 case Type::ObjCObjectPointer: {
9501 const auto *OPT = T->castAs<ObjCObjectPointerType>();
9502 if (OPT->isObjCIdType()) {
9503 S += '@';
9504 return;
9505 }
9506
9507 if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) {
9508 // FIXME: Consider if we need to output qualifiers for 'Class<p>'.
9509 // Since this is a binary compatibility issue, need to consult with
9510 // runtime folks. Fortunately, this is a *very* obscure construct.
9511 S += '#';
9512 return;
9513 }
9514
9515 if (OPT->isObjCQualifiedIdType()) {
9516 getObjCEncodingForTypeImpl(
9517 T: getObjCIdType(), S,
9518 Options: Options.keepingOnly(Mask: ObjCEncOptions()
9519 .setExpandPointedToStructures()
9520 .setExpandStructures()),
9521 FD);
9522 if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) {
9523 // Note that we do extended encoding of protocol qualifier list
9524 // Only when doing ivar or property encoding.
9525 S += '"';
9526 for (const auto *I : OPT->quals()) {
9527 S += '<';
9528 S += I->getObjCRuntimeNameAsString();
9529 S += '>';
9530 }
9531 S += '"';
9532 }
9533 return;
9534 }
9535
9536 S += '@';
9537 if (OPT->getInterfaceDecl() &&
9538 (FD || Options.EncodingProperty() || Options.EncodeClassNames())) {
9539 S += '"';
9540 S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString();
9541 for (const auto *I : OPT->quals()) {
9542 S += '<';
9543 S += I->getObjCRuntimeNameAsString();
9544 S += '>';
9545 }
9546 S += '"';
9547 }
9548 return;
9549 }
9550
9551 // gcc just blithely ignores member pointers.
9552 // FIXME: we should do better than that. 'M' is available.
9553 case Type::MemberPointer:
9554 // This matches gcc's encoding, even though technically it is insufficient.
9555 //FIXME. We should do a better job than gcc.
9556 case Type::Vector:
9557 case Type::ExtVector:
9558 // Until we have a coherent encoding of these three types, issue warning.
9559 if (NotEncodedT)
9560 *NotEncodedT = T;
9561 return;
9562
9563 case Type::ConstantMatrix:
9564 if (NotEncodedT)
9565 *NotEncodedT = T;
9566 return;
9567
9568 case Type::BitInt:
9569 if (NotEncodedT)
9570 *NotEncodedT = T;
9571 return;
9572
9573 // We could see an undeduced auto type here during error recovery.
9574 // Just ignore it.
9575 case Type::Auto:
9576 case Type::DeducedTemplateSpecialization:
9577 return;
9578
9579 case Type::HLSLAttributedResource:
9580 case Type::HLSLInlineSpirv:
9581 llvm_unreachable("unexpected type");
9582
9583 case Type::ArrayParameter:
9584 case Type::Pipe:
9585#define ABSTRACT_TYPE(KIND, BASE)
9586#define TYPE(KIND, BASE)
9587#define DEPENDENT_TYPE(KIND, BASE) \
9588 case Type::KIND:
9589#define NON_CANONICAL_TYPE(KIND, BASE) \
9590 case Type::KIND:
9591#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \
9592 case Type::KIND:
9593#include "clang/AST/TypeNodes.inc"
9594 llvm_unreachable("@encode for dependent type!");
9595 }
9596 llvm_unreachable("bad type kind!");
9597}
9598
9599void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
9600 std::string &S,
9601 const FieldDecl *FD,
9602 bool includeVBases,
9603 QualType *NotEncodedT) const {
9604 assert(RDecl && "Expected non-null RecordDecl");
9605 assert(!RDecl->isUnion() && "Should not be called for unions");
9606 if (!RDecl->getDefinition() || RDecl->getDefinition()->isInvalidDecl())
9607 return;
9608
9609 const auto *CXXRec = dyn_cast<CXXRecordDecl>(Val: RDecl);
9610 std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets;
9611 const ASTRecordLayout &layout = getASTRecordLayout(D: RDecl);
9612
9613 if (CXXRec) {
9614 for (const auto &BI : CXXRec->bases()) {
9615 if (!BI.isVirtual()) {
9616 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl();
9617 if (base->isEmpty())
9618 continue;
9619 uint64_t offs = toBits(CharSize: layout.getBaseClassOffset(Base: base));
9620 FieldOrBaseOffsets.insert(position: FieldOrBaseOffsets.upper_bound(x: offs),
9621 x: std::make_pair(x&: offs, y&: base));
9622 }
9623 }
9624 }
9625
9626 for (FieldDecl *Field : RDecl->fields()) {
9627 if (!Field->isZeroLengthBitField() && Field->isZeroSize(Ctx: *this))
9628 continue;
9629 uint64_t offs = layout.getFieldOffset(FieldNo: Field->getFieldIndex());
9630 FieldOrBaseOffsets.insert(position: FieldOrBaseOffsets.upper_bound(x: offs),
9631 x: std::make_pair(x&: offs, y&: Field));
9632 }
9633
9634 if (CXXRec && includeVBases) {
9635 for (const auto &BI : CXXRec->vbases()) {
9636 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl();
9637 if (base->isEmpty())
9638 continue;
9639 uint64_t offs = toBits(CharSize: layout.getVBaseClassOffset(VBase: base));
9640 if (offs >= uint64_t(toBits(CharSize: layout.getNonVirtualSize())) &&
9641 FieldOrBaseOffsets.find(x: offs) == FieldOrBaseOffsets.end())
9642 FieldOrBaseOffsets.insert(position: FieldOrBaseOffsets.end(),
9643 x: std::make_pair(x&: offs, y&: base));
9644 }
9645 }
9646
9647 CharUnits size;
9648 if (CXXRec) {
9649 size = includeVBases ? layout.getSize() : layout.getNonVirtualSize();
9650 } else {
9651 size = layout.getSize();
9652 }
9653
9654#ifndef NDEBUG
9655 uint64_t CurOffs = 0;
9656#endif
9657 std::multimap<uint64_t, NamedDecl *>::iterator
9658 CurLayObj = FieldOrBaseOffsets.begin();
9659
9660 if (CXXRec && CXXRec->isDynamicClass() &&
9661 (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) {
9662 if (FD) {
9663 S += "\"_vptr$";
9664 std::string recname = CXXRec->getNameAsString();
9665 if (recname.empty()) recname = "?";
9666 S += recname;
9667 S += '"';
9668 }
9669 S += "^^?";
9670#ifndef NDEBUG
9671 CurOffs += getTypeSize(VoidPtrTy);
9672#endif
9673 }
9674
9675 if (!RDecl->hasFlexibleArrayMember()) {
9676 // Mark the end of the structure.
9677 uint64_t offs = toBits(CharSize: size);
9678 FieldOrBaseOffsets.insert(position: FieldOrBaseOffsets.upper_bound(x: offs),
9679 x: std::make_pair(x&: offs, y: nullptr));
9680 }
9681
9682 for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) {
9683#ifndef NDEBUG
9684 assert(CurOffs <= CurLayObj->first);
9685 if (CurOffs < CurLayObj->first) {
9686 uint64_t padding = CurLayObj->first - CurOffs;
9687 // FIXME: There doesn't seem to be a way to indicate in the encoding that
9688 // packing/alignment of members is different that normal, in which case
9689 // the encoding will be out-of-sync with the real layout.
9690 // If the runtime switches to just consider the size of types without
9691 // taking into account alignment, we could make padding explicit in the
9692 // encoding (e.g. using arrays of chars). The encoding strings would be
9693 // longer then though.
9694 CurOffs += padding;
9695 }
9696#endif
9697
9698 NamedDecl *dcl = CurLayObj->second;
9699 if (!dcl)
9700 break; // reached end of structure.
9701
9702 if (auto *base = dyn_cast<CXXRecordDecl>(Val: dcl)) {
9703 // We expand the bases without their virtual bases since those are going
9704 // in the initial structure. Note that this differs from gcc which
9705 // expands virtual bases each time one is encountered in the hierarchy,
9706 // making the encoding type bigger than it really is.
9707 getObjCEncodingForStructureImpl(RDecl: base, S, FD, /*includeVBases*/false,
9708 NotEncodedT);
9709 assert(!base->isEmpty());
9710#ifndef NDEBUG
9711 CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize());
9712#endif
9713 } else {
9714 const auto *field = cast<FieldDecl>(Val: dcl);
9715 if (FD) {
9716 S += '"';
9717 S += field->getNameAsString();
9718 S += '"';
9719 }
9720
9721 if (field->isBitField()) {
9722 EncodeBitField(Ctx: this, S, T: field->getType(), FD: field);
9723#ifndef NDEBUG
9724 CurOffs += field->getBitWidthValue();
9725#endif
9726 } else {
9727 QualType qt = field->getType();
9728 getLegacyIntegralTypeEncoding(PointeeTy&: qt);
9729 getObjCEncodingForTypeImpl(
9730 T: qt, S, Options: ObjCEncOptions().setExpandStructures().setIsStructField(),
9731 FD, NotEncodedT);
9732#ifndef NDEBUG
9733 CurOffs += getTypeSize(field->getType());
9734#endif
9735 }
9736 }
9737 }
9738}
9739
9740void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT,
9741 std::string& S) const {
9742 if (QT & Decl::OBJC_TQ_In)
9743 S += 'n';
9744 if (QT & Decl::OBJC_TQ_Inout)
9745 S += 'N';
9746 if (QT & Decl::OBJC_TQ_Out)
9747 S += 'o';
9748 if (QT & Decl::OBJC_TQ_Bycopy)
9749 S += 'O';
9750 if (QT & Decl::OBJC_TQ_Byref)
9751 S += 'R';
9752 if (QT & Decl::OBJC_TQ_Oneway)
9753 S += 'V';
9754}
9755
9756TypedefDecl *ASTContext::getObjCIdDecl() const {
9757 if (!ObjCIdDecl) {
9758 QualType T = getObjCObjectType(BaseType: ObjCBuiltinIdTy, Protocols: {}, NumProtocols: {});
9759 T = getObjCObjectPointerType(ObjectT: T);
9760 ObjCIdDecl = buildImplicitTypedef(T, Name: "id");
9761 }
9762 return ObjCIdDecl;
9763}
9764
9765TypedefDecl *ASTContext::getObjCSelDecl() const {
9766 if (!ObjCSelDecl) {
9767 QualType T = getPointerType(T: ObjCBuiltinSelTy);
9768 ObjCSelDecl = buildImplicitTypedef(T, Name: "SEL");
9769 }
9770 return ObjCSelDecl;
9771}
9772
9773TypedefDecl *ASTContext::getObjCClassDecl() const {
9774 if (!ObjCClassDecl) {
9775 QualType T = getObjCObjectType(BaseType: ObjCBuiltinClassTy, Protocols: {}, NumProtocols: {});
9776 T = getObjCObjectPointerType(ObjectT: T);
9777 ObjCClassDecl = buildImplicitTypedef(T, Name: "Class");
9778 }
9779 return ObjCClassDecl;
9780}
9781
9782ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const {
9783 if (!ObjCProtocolClassDecl) {
9784 ObjCProtocolClassDecl
9785 = ObjCInterfaceDecl::Create(C: *this, DC: getTranslationUnitDecl(),
9786 atLoc: SourceLocation(),
9787 Id: &Idents.get(Name: "Protocol"),
9788 /*typeParamList=*/nullptr,
9789 /*PrevDecl=*/nullptr,
9790 ClassLoc: SourceLocation(), isInternal: true);
9791 }
9792
9793 return ObjCProtocolClassDecl;
9794}
9795
9796PointerAuthQualifier ASTContext::getObjCMemberSelTypePtrAuth() {
9797 if (!getLangOpts().PointerAuthObjcInterfaceSel)
9798 return PointerAuthQualifier();
9799 return PointerAuthQualifier::Create(
9800 Key: getLangOpts().PointerAuthObjcInterfaceSelKey,
9801 /*isAddressDiscriminated=*/IsAddressDiscriminated: true, ExtraDiscriminator: SelPointerConstantDiscriminator,
9802 AuthenticationMode: PointerAuthenticationMode::SignAndAuth,
9803 /*isIsaPointer=*/IsIsaPointer: false,
9804 /*authenticatesNullValues=*/AuthenticatesNullValues: false);
9805}
9806
9807//===----------------------------------------------------------------------===//
9808// __builtin_va_list Construction Functions
9809//===----------------------------------------------------------------------===//
9810
9811static TypedefDecl *CreateCharPtrNamedVaListDecl(const ASTContext *Context,
9812 StringRef Name) {
9813 // typedef char* __builtin[_ms]_va_list;
9814 QualType T = Context->getPointerType(T: Context->CharTy);
9815 return Context->buildImplicitTypedef(T, Name);
9816}
9817
9818static TypedefDecl *CreateMSVaListDecl(const ASTContext *Context) {
9819 return CreateCharPtrNamedVaListDecl(Context, Name: "__builtin_ms_va_list");
9820}
9821
9822static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) {
9823 return CreateCharPtrNamedVaListDecl(Context, Name: "__builtin_va_list");
9824}
9825
9826static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) {
9827 // typedef void* __builtin_va_list;
9828 QualType T = Context->getPointerType(T: Context->VoidTy);
9829 return Context->buildImplicitTypedef(T, Name: "__builtin_va_list");
9830}
9831
9832static TypedefDecl *
9833CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) {
9834 // struct __va_list
9835 RecordDecl *VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list");
9836 if (Context->getLangOpts().CPlusPlus) {
9837 // namespace std { struct __va_list {
9838 auto *NS = NamespaceDecl::Create(
9839 C&: const_cast<ASTContext &>(*Context), DC: Context->getTranslationUnitDecl(),
9840 /*Inline=*/false, StartLoc: SourceLocation(), IdLoc: SourceLocation(),
9841 Id: &Context->Idents.get(Name: "std"),
9842 /*PrevDecl=*/nullptr, /*Nested=*/false);
9843 NS->setImplicit();
9844 VaListTagDecl->setDeclContext(NS);
9845 }
9846
9847 VaListTagDecl->startDefinition();
9848
9849 const size_t NumFields = 5;
9850 QualType FieldTypes[NumFields];
9851 const char *FieldNames[NumFields];
9852
9853 // void *__stack;
9854 FieldTypes[0] = Context->getPointerType(T: Context->VoidTy);
9855 FieldNames[0] = "__stack";
9856
9857 // void *__gr_top;
9858 FieldTypes[1] = Context->getPointerType(T: Context->VoidTy);
9859 FieldNames[1] = "__gr_top";
9860
9861 // void *__vr_top;
9862 FieldTypes[2] = Context->getPointerType(T: Context->VoidTy);
9863 FieldNames[2] = "__vr_top";
9864
9865 // int __gr_offs;
9866 FieldTypes[3] = Context->IntTy;
9867 FieldNames[3] = "__gr_offs";
9868
9869 // int __vr_offs;
9870 FieldTypes[4] = Context->IntTy;
9871 FieldNames[4] = "__vr_offs";
9872
9873 // Create fields
9874 for (unsigned i = 0; i < NumFields; ++i) {
9875 FieldDecl *Field = FieldDecl::Create(C: const_cast<ASTContext &>(*Context),
9876 DC: VaListTagDecl,
9877 StartLoc: SourceLocation(),
9878 IdLoc: SourceLocation(),
9879 Id: &Context->Idents.get(Name: FieldNames[i]),
9880 T: FieldTypes[i], /*TInfo=*/nullptr,
9881 /*BitWidth=*/BW: nullptr,
9882 /*Mutable=*/false,
9883 InitStyle: ICIS_NoInit);
9884 Field->setAccess(AS_public);
9885 VaListTagDecl->addDecl(D: Field);
9886 }
9887 VaListTagDecl->completeDefinition();
9888 Context->VaListTagDecl = VaListTagDecl;
9889 CanQualType VaListTagType = Context->getCanonicalTagType(TD: VaListTagDecl);
9890
9891 // } __builtin_va_list;
9892 return Context->buildImplicitTypedef(T: VaListTagType, Name: "__builtin_va_list");
9893}
9894
9895static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) {
9896 // typedef struct __va_list_tag {
9897 RecordDecl *VaListTagDecl;
9898
9899 VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
9900 VaListTagDecl->startDefinition();
9901
9902 const size_t NumFields = 5;
9903 QualType FieldTypes[NumFields];
9904 const char *FieldNames[NumFields];
9905
9906 // unsigned char gpr;
9907 FieldTypes[0] = Context->UnsignedCharTy;
9908 FieldNames[0] = "gpr";
9909
9910 // unsigned char fpr;
9911 FieldTypes[1] = Context->UnsignedCharTy;
9912 FieldNames[1] = "fpr";
9913
9914 // unsigned short reserved;
9915 FieldTypes[2] = Context->UnsignedShortTy;
9916 FieldNames[2] = "reserved";
9917
9918 // void* overflow_arg_area;
9919 FieldTypes[3] = Context->getPointerType(T: Context->VoidTy);
9920 FieldNames[3] = "overflow_arg_area";
9921
9922 // void* reg_save_area;
9923 FieldTypes[4] = Context->getPointerType(T: Context->VoidTy);
9924 FieldNames[4] = "reg_save_area";
9925
9926 // Create fields
9927 for (unsigned i = 0; i < NumFields; ++i) {
9928 FieldDecl *Field = FieldDecl::Create(C: *Context, DC: VaListTagDecl,
9929 StartLoc: SourceLocation(),
9930 IdLoc: SourceLocation(),
9931 Id: &Context->Idents.get(Name: FieldNames[i]),
9932 T: FieldTypes[i], /*TInfo=*/nullptr,
9933 /*BitWidth=*/BW: nullptr,
9934 /*Mutable=*/false,
9935 InitStyle: ICIS_NoInit);
9936 Field->setAccess(AS_public);
9937 VaListTagDecl->addDecl(D: Field);
9938 }
9939 VaListTagDecl->completeDefinition();
9940 Context->VaListTagDecl = VaListTagDecl;
9941 CanQualType VaListTagType = Context->getCanonicalTagType(TD: VaListTagDecl);
9942
9943 // } __va_list_tag;
9944 TypedefDecl *VaListTagTypedefDecl =
9945 Context->buildImplicitTypedef(T: VaListTagType, Name: "__va_list_tag");
9946
9947 QualType VaListTagTypedefType =
9948 Context->getTypedefType(Keyword: ElaboratedTypeKeyword::None,
9949 /*Qualifier=*/std::nullopt, Decl: VaListTagTypedefDecl);
9950
9951 // typedef __va_list_tag __builtin_va_list[1];
9952 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 1);
9953 QualType VaListTagArrayType = Context->getConstantArrayType(
9954 EltTy: VaListTagTypedefType, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
9955 return Context->buildImplicitTypedef(T: VaListTagArrayType, Name: "__builtin_va_list");
9956}
9957
9958static TypedefDecl *
9959CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) {
9960 // struct __va_list_tag {
9961 RecordDecl *VaListTagDecl;
9962 VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
9963 VaListTagDecl->startDefinition();
9964
9965 const size_t NumFields = 4;
9966 QualType FieldTypes[NumFields];
9967 const char *FieldNames[NumFields];
9968
9969 // unsigned gp_offset;
9970 FieldTypes[0] = Context->UnsignedIntTy;
9971 FieldNames[0] = "gp_offset";
9972
9973 // unsigned fp_offset;
9974 FieldTypes[1] = Context->UnsignedIntTy;
9975 FieldNames[1] = "fp_offset";
9976
9977 // void* overflow_arg_area;
9978 FieldTypes[2] = Context->getPointerType(T: Context->VoidTy);
9979 FieldNames[2] = "overflow_arg_area";
9980
9981 // void* reg_save_area;
9982 FieldTypes[3] = Context->getPointerType(T: Context->VoidTy);
9983 FieldNames[3] = "reg_save_area";
9984
9985 // Create fields
9986 for (unsigned i = 0; i < NumFields; ++i) {
9987 FieldDecl *Field = FieldDecl::Create(C: const_cast<ASTContext &>(*Context),
9988 DC: VaListTagDecl,
9989 StartLoc: SourceLocation(),
9990 IdLoc: SourceLocation(),
9991 Id: &Context->Idents.get(Name: FieldNames[i]),
9992 T: FieldTypes[i], /*TInfo=*/nullptr,
9993 /*BitWidth=*/BW: nullptr,
9994 /*Mutable=*/false,
9995 InitStyle: ICIS_NoInit);
9996 Field->setAccess(AS_public);
9997 VaListTagDecl->addDecl(D: Field);
9998 }
9999 VaListTagDecl->completeDefinition();
10000 Context->VaListTagDecl = VaListTagDecl;
10001 CanQualType VaListTagType = Context->getCanonicalTagType(TD: VaListTagDecl);
10002
10003 // };
10004
10005 // typedef struct __va_list_tag __builtin_va_list[1];
10006 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 1);
10007 QualType VaListTagArrayType = Context->getConstantArrayType(
10008 EltTy: VaListTagType, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
10009 return Context->buildImplicitTypedef(T: VaListTagArrayType, Name: "__builtin_va_list");
10010}
10011
10012static TypedefDecl *
10013CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) {
10014 // struct __va_list
10015 RecordDecl *VaListDecl = Context->buildImplicitRecord(Name: "__va_list");
10016 if (Context->getLangOpts().CPlusPlus) {
10017 // namespace std { struct __va_list {
10018 NamespaceDecl *NS;
10019 NS = NamespaceDecl::Create(C&: const_cast<ASTContext &>(*Context),
10020 DC: Context->getTranslationUnitDecl(),
10021 /*Inline=*/false, StartLoc: SourceLocation(),
10022 IdLoc: SourceLocation(), Id: &Context->Idents.get(Name: "std"),
10023 /*PrevDecl=*/nullptr, /*Nested=*/false);
10024 NS->setImplicit();
10025 VaListDecl->setDeclContext(NS);
10026 }
10027
10028 VaListDecl->startDefinition();
10029
10030 // void * __ap;
10031 FieldDecl *Field = FieldDecl::Create(C: const_cast<ASTContext &>(*Context),
10032 DC: VaListDecl,
10033 StartLoc: SourceLocation(),
10034 IdLoc: SourceLocation(),
10035 Id: &Context->Idents.get(Name: "__ap"),
10036 T: Context->getPointerType(T: Context->VoidTy),
10037 /*TInfo=*/nullptr,
10038 /*BitWidth=*/BW: nullptr,
10039 /*Mutable=*/false,
10040 InitStyle: ICIS_NoInit);
10041 Field->setAccess(AS_public);
10042 VaListDecl->addDecl(D: Field);
10043
10044 // };
10045 VaListDecl->completeDefinition();
10046 Context->VaListTagDecl = VaListDecl;
10047
10048 // typedef struct __va_list __builtin_va_list;
10049 CanQualType T = Context->getCanonicalTagType(TD: VaListDecl);
10050 return Context->buildImplicitTypedef(T, Name: "__builtin_va_list");
10051}
10052
10053static TypedefDecl *
10054CreateSystemZBuiltinVaListDecl(const ASTContext *Context) {
10055 // struct __va_list_tag {
10056 RecordDecl *VaListTagDecl;
10057 VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
10058 VaListTagDecl->startDefinition();
10059
10060 const size_t NumFields = 4;
10061 QualType FieldTypes[NumFields];
10062 const char *FieldNames[NumFields];
10063
10064 // long __gpr;
10065 FieldTypes[0] = Context->LongTy;
10066 FieldNames[0] = "__gpr";
10067
10068 // long __fpr;
10069 FieldTypes[1] = Context->LongTy;
10070 FieldNames[1] = "__fpr";
10071
10072 // void *__overflow_arg_area;
10073 FieldTypes[2] = Context->getPointerType(T: Context->VoidTy);
10074 FieldNames[2] = "__overflow_arg_area";
10075
10076 // void *__reg_save_area;
10077 FieldTypes[3] = Context->getPointerType(T: Context->VoidTy);
10078 FieldNames[3] = "__reg_save_area";
10079
10080 // Create fields
10081 for (unsigned i = 0; i < NumFields; ++i) {
10082 FieldDecl *Field = FieldDecl::Create(C: const_cast<ASTContext &>(*Context),
10083 DC: VaListTagDecl,
10084 StartLoc: SourceLocation(),
10085 IdLoc: SourceLocation(),
10086 Id: &Context->Idents.get(Name: FieldNames[i]),
10087 T: FieldTypes[i], /*TInfo=*/nullptr,
10088 /*BitWidth=*/BW: nullptr,
10089 /*Mutable=*/false,
10090 InitStyle: ICIS_NoInit);
10091 Field->setAccess(AS_public);
10092 VaListTagDecl->addDecl(D: Field);
10093 }
10094 VaListTagDecl->completeDefinition();
10095 Context->VaListTagDecl = VaListTagDecl;
10096 CanQualType VaListTagType = Context->getCanonicalTagType(TD: VaListTagDecl);
10097
10098 // };
10099
10100 // typedef __va_list_tag __builtin_va_list[1];
10101 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 1);
10102 QualType VaListTagArrayType = Context->getConstantArrayType(
10103 EltTy: VaListTagType, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
10104
10105 return Context->buildImplicitTypedef(T: VaListTagArrayType, Name: "__builtin_va_list");
10106}
10107
10108static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) {
10109 // typedef struct __va_list_tag {
10110 RecordDecl *VaListTagDecl;
10111 VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
10112 VaListTagDecl->startDefinition();
10113
10114 const size_t NumFields = 3;
10115 QualType FieldTypes[NumFields];
10116 const char *FieldNames[NumFields];
10117
10118 // void *CurrentSavedRegisterArea;
10119 FieldTypes[0] = Context->getPointerType(T: Context->VoidTy);
10120 FieldNames[0] = "__current_saved_reg_area_pointer";
10121
10122 // void *SavedRegAreaEnd;
10123 FieldTypes[1] = Context->getPointerType(T: Context->VoidTy);
10124 FieldNames[1] = "__saved_reg_area_end_pointer";
10125
10126 // void *OverflowArea;
10127 FieldTypes[2] = Context->getPointerType(T: Context->VoidTy);
10128 FieldNames[2] = "__overflow_area_pointer";
10129
10130 // Create fields
10131 for (unsigned i = 0; i < NumFields; ++i) {
10132 FieldDecl *Field = FieldDecl::Create(
10133 C: const_cast<ASTContext &>(*Context), DC: VaListTagDecl, StartLoc: SourceLocation(),
10134 IdLoc: SourceLocation(), Id: &Context->Idents.get(Name: FieldNames[i]), T: FieldTypes[i],
10135 /*TInfo=*/nullptr,
10136 /*BitWidth=*/BW: nullptr,
10137 /*Mutable=*/false, InitStyle: ICIS_NoInit);
10138 Field->setAccess(AS_public);
10139 VaListTagDecl->addDecl(D: Field);
10140 }
10141 VaListTagDecl->completeDefinition();
10142 Context->VaListTagDecl = VaListTagDecl;
10143 CanQualType VaListTagType = Context->getCanonicalTagType(TD: VaListTagDecl);
10144
10145 // } __va_list_tag;
10146 TypedefDecl *VaListTagTypedefDecl =
10147 Context->buildImplicitTypedef(T: VaListTagType, Name: "__va_list_tag");
10148
10149 QualType VaListTagTypedefType =
10150 Context->getTypedefType(Keyword: ElaboratedTypeKeyword::None,
10151 /*Qualifier=*/std::nullopt, Decl: VaListTagTypedefDecl);
10152
10153 // typedef __va_list_tag __builtin_va_list[1];
10154 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 1);
10155 QualType VaListTagArrayType = Context->getConstantArrayType(
10156 EltTy: VaListTagTypedefType, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
10157
10158 return Context->buildImplicitTypedef(T: VaListTagArrayType, Name: "__builtin_va_list");
10159}
10160
10161static TypedefDecl *
10162CreateXtensaABIBuiltinVaListDecl(const ASTContext *Context) {
10163 // typedef struct __va_list_tag {
10164 RecordDecl *VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
10165
10166 VaListTagDecl->startDefinition();
10167
10168 // int* __va_stk;
10169 // int* __va_reg;
10170 // int __va_ndx;
10171 constexpr size_t NumFields = 3;
10172 QualType FieldTypes[NumFields] = {Context->getPointerType(T: Context->IntTy),
10173 Context->getPointerType(T: Context->IntTy),
10174 Context->IntTy};
10175 const char *FieldNames[NumFields] = {"__va_stk", "__va_reg", "__va_ndx"};
10176
10177 // Create fields
10178 for (unsigned i = 0; i < NumFields; ++i) {
10179 FieldDecl *Field = FieldDecl::Create(
10180 C: *Context, DC: VaListTagDecl, StartLoc: SourceLocation(), IdLoc: SourceLocation(),
10181 Id: &Context->Idents.get(Name: FieldNames[i]), T: FieldTypes[i], /*TInfo=*/nullptr,
10182 /*BitWidth=*/BW: nullptr,
10183 /*Mutable=*/false, InitStyle: ICIS_NoInit);
10184 Field->setAccess(AS_public);
10185 VaListTagDecl->addDecl(D: Field);
10186 }
10187 VaListTagDecl->completeDefinition();
10188 Context->VaListTagDecl = VaListTagDecl;
10189 CanQualType VaListTagType = Context->getCanonicalTagType(TD: VaListTagDecl);
10190
10191 // } __va_list_tag;
10192 TypedefDecl *VaListTagTypedefDecl =
10193 Context->buildImplicitTypedef(T: VaListTagType, Name: "__builtin_va_list");
10194
10195 return VaListTagTypedefDecl;
10196}
10197
10198static TypedefDecl *CreateVaListDecl(const ASTContext *Context,
10199 TargetInfo::BuiltinVaListKind Kind) {
10200 switch (Kind) {
10201 case TargetInfo::CharPtrBuiltinVaList:
10202 return CreateCharPtrBuiltinVaListDecl(Context);
10203 case TargetInfo::VoidPtrBuiltinVaList:
10204 return CreateVoidPtrBuiltinVaListDecl(Context);
10205 case TargetInfo::AArch64ABIBuiltinVaList:
10206 return CreateAArch64ABIBuiltinVaListDecl(Context);
10207 case TargetInfo::PowerABIBuiltinVaList:
10208 return CreatePowerABIBuiltinVaListDecl(Context);
10209 case TargetInfo::X86_64ABIBuiltinVaList:
10210 return CreateX86_64ABIBuiltinVaListDecl(Context);
10211 case TargetInfo::AAPCSABIBuiltinVaList:
10212 return CreateAAPCSABIBuiltinVaListDecl(Context);
10213 case TargetInfo::SystemZBuiltinVaList:
10214 return CreateSystemZBuiltinVaListDecl(Context);
10215 case TargetInfo::HexagonBuiltinVaList:
10216 return CreateHexagonBuiltinVaListDecl(Context);
10217 case TargetInfo::XtensaABIBuiltinVaList:
10218 return CreateXtensaABIBuiltinVaListDecl(Context);
10219 }
10220
10221 llvm_unreachable("Unhandled __builtin_va_list type kind");
10222}
10223
10224TypedefDecl *ASTContext::getBuiltinVaListDecl() const {
10225 if (!BuiltinVaListDecl) {
10226 BuiltinVaListDecl = CreateVaListDecl(Context: this, Kind: Target->getBuiltinVaListKind());
10227 assert(BuiltinVaListDecl->isImplicit());
10228 }
10229
10230 return BuiltinVaListDecl;
10231}
10232
10233Decl *ASTContext::getVaListTagDecl() const {
10234 // Force the creation of VaListTagDecl by building the __builtin_va_list
10235 // declaration.
10236 if (!VaListTagDecl)
10237 (void)getBuiltinVaListDecl();
10238
10239 return VaListTagDecl;
10240}
10241
10242TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const {
10243 if (!BuiltinMSVaListDecl)
10244 BuiltinMSVaListDecl = CreateMSVaListDecl(Context: this);
10245
10246 return BuiltinMSVaListDecl;
10247}
10248
10249bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const {
10250 // Allow redecl custom type checking builtin for HLSL.
10251 if (LangOpts.HLSL && FD->getBuiltinID() != Builtin::NotBuiltin &&
10252 BuiltinInfo.hasCustomTypechecking(ID: FD->getBuiltinID()))
10253 return true;
10254 // Allow redecl custom type checking builtin for SPIR-V.
10255 if (getTargetInfo().getTriple().isSPIROrSPIRV() &&
10256 BuiltinInfo.isTSBuiltin(ID: FD->getBuiltinID()) &&
10257 BuiltinInfo.hasCustomTypechecking(ID: FD->getBuiltinID()))
10258 return true;
10259 return BuiltinInfo.canBeRedeclared(ID: FD->getBuiltinID());
10260}
10261
10262void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) {
10263 assert(ObjCConstantStringType.isNull() &&
10264 "'NSConstantString' type already set!");
10265
10266 ObjCConstantStringType = getObjCInterfaceType(Decl);
10267}
10268
10269/// Retrieve the template name that corresponds to a non-empty
10270/// lookup.
10271TemplateName
10272ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin,
10273 UnresolvedSetIterator End) const {
10274 unsigned size = End - Begin;
10275 assert(size > 1 && "set is not overloaded!");
10276
10277 void *memory = Allocate(Size: sizeof(OverloadedTemplateStorage) +
10278 size * sizeof(FunctionTemplateDecl*));
10279 auto *OT = new (memory) OverloadedTemplateStorage(size);
10280
10281 NamedDecl **Storage = OT->getStorage();
10282 for (UnresolvedSetIterator I = Begin; I != End; ++I) {
10283 NamedDecl *D = *I;
10284 assert(isa<FunctionTemplateDecl>(D) ||
10285 isa<UnresolvedUsingValueDecl>(D) ||
10286 (isa<UsingShadowDecl>(D) &&
10287 isa<FunctionTemplateDecl>(D->getUnderlyingDecl())));
10288 *Storage++ = D;
10289 }
10290
10291 return TemplateName(OT);
10292}
10293
10294/// Retrieve a template name representing an unqualified-id that has been
10295/// assumed to name a template for ADL purposes.
10296TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const {
10297 auto *OT = new (*this) AssumedTemplateStorage(Name);
10298 return TemplateName(OT);
10299}
10300
10301/// Retrieve the template name that represents a qualified
10302/// template name such as \c std::vector.
10303TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier Qualifier,
10304 bool TemplateKeyword,
10305 TemplateName Template) const {
10306 assert(Template.getKind() == TemplateName::Template ||
10307 Template.getKind() == TemplateName::UsingTemplate);
10308
10309 if (Template.getAsTemplateDecl()->getKind() == Decl::TemplateTemplateParm) {
10310 assert(!Qualifier && "unexpected qualified template template parameter");
10311 assert(TemplateKeyword == false);
10312 return Template;
10313 }
10314
10315 // FIXME: Canonicalization?
10316 llvm::FoldingSetNodeID ID;
10317 QualifiedTemplateName::Profile(ID, NNS: Qualifier, TemplateKeyword, TN: Template);
10318
10319 void *InsertPos = nullptr;
10320 QualifiedTemplateName *QTN =
10321 QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
10322 if (!QTN) {
10323 QTN = new (*this, alignof(QualifiedTemplateName))
10324 QualifiedTemplateName(Qualifier, TemplateKeyword, Template);
10325 QualifiedTemplateNames.InsertNode(N: QTN, InsertPos);
10326 }
10327
10328 return TemplateName(QTN);
10329}
10330
10331/// Retrieve the template name that represents a dependent
10332/// template name such as \c MetaFun::template operator+.
10333TemplateName
10334ASTContext::getDependentTemplateName(const DependentTemplateStorage &S) const {
10335 llvm::FoldingSetNodeID ID;
10336 S.Profile(ID);
10337
10338 void *InsertPos = nullptr;
10339 if (DependentTemplateName *QTN =
10340 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos))
10341 return TemplateName(QTN);
10342
10343 DependentTemplateName *QTN =
10344 new (*this, alignof(DependentTemplateName)) DependentTemplateName(S);
10345 DependentTemplateNames.InsertNode(N: QTN, InsertPos);
10346 return TemplateName(QTN);
10347}
10348
10349TemplateName ASTContext::getSubstTemplateTemplateParm(TemplateName Replacement,
10350 Decl *AssociatedDecl,
10351 unsigned Index,
10352 UnsignedOrNone PackIndex,
10353 bool Final) const {
10354 llvm::FoldingSetNodeID ID;
10355 SubstTemplateTemplateParmStorage::Profile(ID, Replacement, AssociatedDecl,
10356 Index, PackIndex, Final);
10357
10358 void *insertPos = nullptr;
10359 SubstTemplateTemplateParmStorage *subst
10360 = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
10361
10362 if (!subst) {
10363 subst = new (*this) SubstTemplateTemplateParmStorage(
10364 Replacement, AssociatedDecl, Index, PackIndex, Final);
10365 SubstTemplateTemplateParms.InsertNode(N: subst, InsertPos: insertPos);
10366 }
10367
10368 return TemplateName(subst);
10369}
10370
10371TemplateName
10372ASTContext::getSubstTemplateTemplateParmPack(const TemplateArgument &ArgPack,
10373 Decl *AssociatedDecl,
10374 unsigned Index, bool Final) const {
10375 auto &Self = const_cast<ASTContext &>(*this);
10376 llvm::FoldingSetNodeID ID;
10377 SubstTemplateTemplateParmPackStorage::Profile(ID, Context&: Self, ArgPack,
10378 AssociatedDecl, Index, Final);
10379
10380 void *InsertPos = nullptr;
10381 SubstTemplateTemplateParmPackStorage *Subst
10382 = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos);
10383
10384 if (!Subst) {
10385 Subst = new (*this) SubstTemplateTemplateParmPackStorage(
10386 ArgPack.pack_elements(), AssociatedDecl, Index, Final);
10387 SubstTemplateTemplateParmPacks.InsertNode(N: Subst, InsertPos);
10388 }
10389
10390 return TemplateName(Subst);
10391}
10392
10393/// Retrieve the template name that represents a template name
10394/// deduced from a specialization.
10395TemplateName
10396ASTContext::getDeducedTemplateName(TemplateName Underlying,
10397 DefaultArguments DefaultArgs) const {
10398 if (!DefaultArgs)
10399 return Underlying;
10400
10401 llvm::FoldingSetNodeID ID;
10402 DeducedTemplateStorage::Profile(ID, Context: *this, Underlying, DefArgs: DefaultArgs);
10403
10404 void *InsertPos = nullptr;
10405 DeducedTemplateStorage *DTS =
10406 DeducedTemplates.FindNodeOrInsertPos(ID, InsertPos);
10407 if (!DTS) {
10408 void *Mem = Allocate(Size: sizeof(DeducedTemplateStorage) +
10409 sizeof(TemplateArgument) * DefaultArgs.Args.size(),
10410 Align: alignof(DeducedTemplateStorage));
10411 DTS = new (Mem) DeducedTemplateStorage(Underlying, DefaultArgs);
10412 DeducedTemplates.InsertNode(N: DTS, InsertPos);
10413 }
10414 return TemplateName(DTS);
10415}
10416
10417/// getFromTargetType - Given one of the integer types provided by
10418/// TargetInfo, produce the corresponding type. The unsigned @p Type
10419/// is actually a value of type @c TargetInfo::IntType.
10420CanQualType ASTContext::getFromTargetType(unsigned Type) const {
10421 switch (Type) {
10422 case TargetInfo::NoInt: return {};
10423 case TargetInfo::SignedChar: return SignedCharTy;
10424 case TargetInfo::UnsignedChar: return UnsignedCharTy;
10425 case TargetInfo::SignedShort: return ShortTy;
10426 case TargetInfo::UnsignedShort: return UnsignedShortTy;
10427 case TargetInfo::SignedInt: return IntTy;
10428 case TargetInfo::UnsignedInt: return UnsignedIntTy;
10429 case TargetInfo::SignedLong: return LongTy;
10430 case TargetInfo::UnsignedLong: return UnsignedLongTy;
10431 case TargetInfo::SignedLongLong: return LongLongTy;
10432 case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy;
10433 }
10434
10435 llvm_unreachable("Unhandled TargetInfo::IntType value");
10436}
10437
10438//===----------------------------------------------------------------------===//
10439// Type Predicates.
10440//===----------------------------------------------------------------------===//
10441
10442/// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's
10443/// garbage collection attribute.
10444///
10445Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const {
10446 if (getLangOpts().getGC() == LangOptions::NonGC)
10447 return Qualifiers::GCNone;
10448
10449 assert(getLangOpts().ObjC);
10450 Qualifiers::GC GCAttrs = Ty.getObjCGCAttr();
10451
10452 // Default behaviour under objective-C's gc is for ObjC pointers
10453 // (or pointers to them) be treated as though they were declared
10454 // as __strong.
10455 if (GCAttrs == Qualifiers::GCNone) {
10456 if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType())
10457 return Qualifiers::Strong;
10458 else if (Ty->isPointerType())
10459 return getObjCGCAttrKind(Ty: Ty->castAs<PointerType>()->getPointeeType());
10460 } else {
10461 // It's not valid to set GC attributes on anything that isn't a
10462 // pointer.
10463#ifndef NDEBUG
10464 QualType CT = Ty->getCanonicalTypeInternal();
10465 while (const auto *AT = dyn_cast<ArrayType>(CT))
10466 CT = AT->getElementType();
10467 assert(CT->isAnyPointerType() || CT->isBlockPointerType());
10468#endif
10469 }
10470 return GCAttrs;
10471}
10472
10473//===----------------------------------------------------------------------===//
10474// Type Compatibility Testing
10475//===----------------------------------------------------------------------===//
10476
10477/// areCompatVectorTypes - Return true if the two specified vector types are
10478/// compatible.
10479static bool areCompatVectorTypes(const VectorType *LHS,
10480 const VectorType *RHS) {
10481 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified());
10482 return LHS->getElementType() == RHS->getElementType() &&
10483 LHS->getNumElements() == RHS->getNumElements();
10484}
10485
10486/// areCompatMatrixTypes - Return true if the two specified matrix types are
10487/// compatible.
10488static bool areCompatMatrixTypes(const ConstantMatrixType *LHS,
10489 const ConstantMatrixType *RHS) {
10490 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified());
10491 return LHS->getElementType() == RHS->getElementType() &&
10492 LHS->getNumRows() == RHS->getNumRows() &&
10493 LHS->getNumColumns() == RHS->getNumColumns();
10494}
10495
10496bool ASTContext::areCompatibleVectorTypes(QualType FirstVec,
10497 QualType SecondVec) {
10498 assert(FirstVec->isVectorType() && "FirstVec should be a vector type");
10499 assert(SecondVec->isVectorType() && "SecondVec should be a vector type");
10500
10501 if (hasSameUnqualifiedType(T1: FirstVec, T2: SecondVec))
10502 return true;
10503
10504 // Treat Neon vector types and most AltiVec vector types as if they are the
10505 // equivalent GCC vector types.
10506 const auto *First = FirstVec->castAs<VectorType>();
10507 const auto *Second = SecondVec->castAs<VectorType>();
10508 if (First->getNumElements() == Second->getNumElements() &&
10509 hasSameType(T1: First->getElementType(), T2: Second->getElementType()) &&
10510 First->getVectorKind() != VectorKind::AltiVecPixel &&
10511 First->getVectorKind() != VectorKind::AltiVecBool &&
10512 Second->getVectorKind() != VectorKind::AltiVecPixel &&
10513 Second->getVectorKind() != VectorKind::AltiVecBool &&
10514 First->getVectorKind() != VectorKind::SveFixedLengthData &&
10515 First->getVectorKind() != VectorKind::SveFixedLengthPredicate &&
10516 Second->getVectorKind() != VectorKind::SveFixedLengthData &&
10517 Second->getVectorKind() != VectorKind::SveFixedLengthPredicate &&
10518 First->getVectorKind() != VectorKind::RVVFixedLengthData &&
10519 Second->getVectorKind() != VectorKind::RVVFixedLengthData &&
10520 First->getVectorKind() != VectorKind::RVVFixedLengthMask &&
10521 Second->getVectorKind() != VectorKind::RVVFixedLengthMask &&
10522 First->getVectorKind() != VectorKind::RVVFixedLengthMask_1 &&
10523 Second->getVectorKind() != VectorKind::RVVFixedLengthMask_1 &&
10524 First->getVectorKind() != VectorKind::RVVFixedLengthMask_2 &&
10525 Second->getVectorKind() != VectorKind::RVVFixedLengthMask_2 &&
10526 First->getVectorKind() != VectorKind::RVVFixedLengthMask_4 &&
10527 Second->getVectorKind() != VectorKind::RVVFixedLengthMask_4)
10528 return true;
10529
10530 // In OpenCL, treat half and _Float16 vector types as compatible.
10531 if (getLangOpts().OpenCL &&
10532 First->getNumElements() == Second->getNumElements()) {
10533 QualType FirstElt = First->getElementType();
10534 QualType SecondElt = Second->getElementType();
10535
10536 if ((FirstElt->isFloat16Type() && SecondElt->isHalfType()) ||
10537 (FirstElt->isHalfType() && SecondElt->isFloat16Type())) {
10538 if (First->getVectorKind() != VectorKind::AltiVecPixel &&
10539 First->getVectorKind() != VectorKind::AltiVecBool &&
10540 Second->getVectorKind() != VectorKind::AltiVecPixel &&
10541 Second->getVectorKind() != VectorKind::AltiVecBool)
10542 return true;
10543 }
10544 }
10545 return false;
10546}
10547
10548/// getRVVTypeSize - Return RVV vector register size.
10549static uint64_t getRVVTypeSize(ASTContext &Context, const BuiltinType *Ty) {
10550 assert(Ty->isRVVVLSBuiltinType() && "Invalid RVV Type");
10551 auto VScale = Context.getTargetInfo().getVScaleRange(
10552 LangOpts: Context.getLangOpts(), Mode: TargetInfo::ArmStreamingKind::NotStreaming);
10553 if (!VScale)
10554 return 0;
10555
10556 ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(Ty);
10557
10558 uint64_t EltSize = Context.getTypeSize(T: Info.ElementType);
10559 if (Info.ElementType == Context.BoolTy)
10560 EltSize = 1;
10561
10562 uint64_t MinElts = Info.EC.getKnownMinValue();
10563 return VScale->first * MinElts * EltSize;
10564}
10565
10566bool ASTContext::areCompatibleRVVTypes(QualType FirstType,
10567 QualType SecondType) {
10568 assert(
10569 ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) ||
10570 (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) &&
10571 "Expected RVV builtin type and vector type!");
10572
10573 auto IsValidCast = [this](QualType FirstType, QualType SecondType) {
10574 if (const auto *BT = FirstType->getAs<BuiltinType>()) {
10575 if (const auto *VT = SecondType->getAs<VectorType>()) {
10576 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask) {
10577 BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(Ty: BT);
10578 return FirstType->isRVVVLSBuiltinType() &&
10579 Info.ElementType == BoolTy &&
10580 getTypeSize(T: SecondType) == ((getRVVTypeSize(Context&: *this, Ty: BT)));
10581 }
10582 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask_1) {
10583 BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(Ty: BT);
10584 return FirstType->isRVVVLSBuiltinType() &&
10585 Info.ElementType == BoolTy &&
10586 getTypeSize(T: SecondType) == ((getRVVTypeSize(Context&: *this, Ty: BT) * 8));
10587 }
10588 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask_2) {
10589 BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(Ty: BT);
10590 return FirstType->isRVVVLSBuiltinType() &&
10591 Info.ElementType == BoolTy &&
10592 getTypeSize(T: SecondType) == ((getRVVTypeSize(Context&: *this, Ty: BT)) * 4);
10593 }
10594 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask_4) {
10595 BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(Ty: BT);
10596 return FirstType->isRVVVLSBuiltinType() &&
10597 Info.ElementType == BoolTy &&
10598 getTypeSize(T: SecondType) == ((getRVVTypeSize(Context&: *this, Ty: BT)) * 2);
10599 }
10600 if (VT->getVectorKind() == VectorKind::RVVFixedLengthData ||
10601 VT->getVectorKind() == VectorKind::Generic)
10602 return FirstType->isRVVVLSBuiltinType() &&
10603 getTypeSize(T: SecondType) == getRVVTypeSize(Context&: *this, Ty: BT) &&
10604 hasSameType(T1: VT->getElementType(),
10605 T2: getBuiltinVectorTypeInfo(Ty: BT).ElementType);
10606 }
10607 }
10608 return false;
10609 };
10610
10611 return IsValidCast(FirstType, SecondType) ||
10612 IsValidCast(SecondType, FirstType);
10613}
10614
10615bool ASTContext::areLaxCompatibleRVVTypes(QualType FirstType,
10616 QualType SecondType) {
10617 assert(
10618 ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) ||
10619 (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) &&
10620 "Expected RVV builtin type and vector type!");
10621
10622 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) {
10623 const auto *BT = FirstType->getAs<BuiltinType>();
10624 if (!BT)
10625 return false;
10626
10627 if (!BT->isRVVVLSBuiltinType())
10628 return false;
10629
10630 const auto *VecTy = SecondType->getAs<VectorType>();
10631 if (VecTy && VecTy->getVectorKind() == VectorKind::Generic) {
10632 const LangOptions::LaxVectorConversionKind LVCKind =
10633 getLangOpts().getLaxVectorConversions();
10634
10635 // If __riscv_v_fixed_vlen != N do not allow vector lax conversion.
10636 if (getTypeSize(T: SecondType) != getRVVTypeSize(Context&: *this, Ty: BT))
10637 return false;
10638
10639 // If -flax-vector-conversions=all is specified, the types are
10640 // certainly compatible.
10641 if (LVCKind == LangOptions::LaxVectorConversionKind::All)
10642 return true;
10643
10644 // If -flax-vector-conversions=integer is specified, the types are
10645 // compatible if the elements are integer types.
10646 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer)
10647 return VecTy->getElementType().getCanonicalType()->isIntegerType() &&
10648 FirstType->getRVVEltType(Ctx: *this)->isIntegerType();
10649 }
10650
10651 return false;
10652 };
10653
10654 return IsLaxCompatible(FirstType, SecondType) ||
10655 IsLaxCompatible(SecondType, FirstType);
10656}
10657
10658bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const {
10659 while (true) {
10660 // __strong id
10661 if (const AttributedType *Attr = dyn_cast<AttributedType>(Val&: Ty)) {
10662 if (Attr->getAttrKind() == attr::ObjCOwnership)
10663 return true;
10664
10665 Ty = Attr->getModifiedType();
10666
10667 // X *__strong (...)
10668 } else if (const ParenType *Paren = dyn_cast<ParenType>(Val&: Ty)) {
10669 Ty = Paren->getInnerType();
10670
10671 // We do not want to look through typedefs, typeof(expr),
10672 // typeof(type), or any other way that the type is somehow
10673 // abstracted.
10674 } else {
10675 return false;
10676 }
10677 }
10678}
10679
10680//===----------------------------------------------------------------------===//
10681// ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's.
10682//===----------------------------------------------------------------------===//
10683
10684/// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the
10685/// inheritance hierarchy of 'rProto'.
10686bool
10687ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto,
10688 ObjCProtocolDecl *rProto) const {
10689 if (declaresSameEntity(D1: lProto, D2: rProto))
10690 return true;
10691 for (auto *PI : rProto->protocols())
10692 if (ProtocolCompatibleWithProtocol(lProto, rProto: PI))
10693 return true;
10694 return false;
10695}
10696
10697/// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and
10698/// Class<pr1, ...>.
10699bool ASTContext::ObjCQualifiedClassTypesAreCompatible(
10700 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs) {
10701 for (auto *lhsProto : lhs->quals()) {
10702 bool match = false;
10703 for (auto *rhsProto : rhs->quals()) {
10704 if (ProtocolCompatibleWithProtocol(lProto: lhsProto, rProto: rhsProto)) {
10705 match = true;
10706 break;
10707 }
10708 }
10709 if (!match)
10710 return false;
10711 }
10712 return true;
10713}
10714
10715/// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an
10716/// ObjCQualifiedIDType.
10717bool ASTContext::ObjCQualifiedIdTypesAreCompatible(
10718 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs,
10719 bool compare) {
10720 // Allow id<P..> and an 'id' in all cases.
10721 if (lhs->isObjCIdType() || rhs->isObjCIdType())
10722 return true;
10723
10724 // Don't allow id<P..> to convert to Class or Class<P..> in either direction.
10725 if (lhs->isObjCClassType() || lhs->isObjCQualifiedClassType() ||
10726 rhs->isObjCClassType() || rhs->isObjCQualifiedClassType())
10727 return false;
10728
10729 if (lhs->isObjCQualifiedIdType()) {
10730 if (rhs->qual_empty()) {
10731 // If the RHS is a unqualified interface pointer "NSString*",
10732 // make sure we check the class hierarchy.
10733 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) {
10734 for (auto *I : lhs->quals()) {
10735 // when comparing an id<P> on lhs with a static type on rhs,
10736 // see if static class implements all of id's protocols, directly or
10737 // through its super class and categories.
10738 if (!rhsID->ClassImplementsProtocol(lProto: I, lookupCategory: true))
10739 return false;
10740 }
10741 }
10742 // If there are no qualifiers and no interface, we have an 'id'.
10743 return true;
10744 }
10745 // Both the right and left sides have qualifiers.
10746 for (auto *lhsProto : lhs->quals()) {
10747 bool match = false;
10748
10749 // when comparing an id<P> on lhs with a static type on rhs,
10750 // see if static class implements all of id's protocols, directly or
10751 // through its super class and categories.
10752 for (auto *rhsProto : rhs->quals()) {
10753 if (ProtocolCompatibleWithProtocol(lProto: lhsProto, rProto: rhsProto) ||
10754 (compare && ProtocolCompatibleWithProtocol(lProto: rhsProto, rProto: lhsProto))) {
10755 match = true;
10756 break;
10757 }
10758 }
10759 // If the RHS is a qualified interface pointer "NSString<P>*",
10760 // make sure we check the class hierarchy.
10761 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) {
10762 for (auto *I : lhs->quals()) {
10763 // when comparing an id<P> on lhs with a static type on rhs,
10764 // see if static class implements all of id's protocols, directly or
10765 // through its super class and categories.
10766 if (rhsID->ClassImplementsProtocol(lProto: I, lookupCategory: true)) {
10767 match = true;
10768 break;
10769 }
10770 }
10771 }
10772 if (!match)
10773 return false;
10774 }
10775
10776 return true;
10777 }
10778
10779 assert(rhs->isObjCQualifiedIdType() && "One of the LHS/RHS should be id<x>");
10780
10781 if (lhs->getInterfaceType()) {
10782 // If both the right and left sides have qualifiers.
10783 for (auto *lhsProto : lhs->quals()) {
10784 bool match = false;
10785
10786 // when comparing an id<P> on rhs with a static type on lhs,
10787 // see if static class implements all of id's protocols, directly or
10788 // through its super class and categories.
10789 // First, lhs protocols in the qualifier list must be found, direct
10790 // or indirect in rhs's qualifier list or it is a mismatch.
10791 for (auto *rhsProto : rhs->quals()) {
10792 if (ProtocolCompatibleWithProtocol(lProto: lhsProto, rProto: rhsProto) ||
10793 (compare && ProtocolCompatibleWithProtocol(lProto: rhsProto, rProto: lhsProto))) {
10794 match = true;
10795 break;
10796 }
10797 }
10798 if (!match)
10799 return false;
10800 }
10801
10802 // Static class's protocols, or its super class or category protocols
10803 // must be found, direct or indirect in rhs's qualifier list or it is a mismatch.
10804 if (ObjCInterfaceDecl *lhsID = lhs->getInterfaceDecl()) {
10805 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols;
10806 CollectInheritedProtocols(CDecl: lhsID, Protocols&: LHSInheritedProtocols);
10807 // This is rather dubious but matches gcc's behavior. If lhs has
10808 // no type qualifier and its class has no static protocol(s)
10809 // assume that it is mismatch.
10810 if (LHSInheritedProtocols.empty() && lhs->qual_empty())
10811 return false;
10812 for (auto *lhsProto : LHSInheritedProtocols) {
10813 bool match = false;
10814 for (auto *rhsProto : rhs->quals()) {
10815 if (ProtocolCompatibleWithProtocol(lProto: lhsProto, rProto: rhsProto) ||
10816 (compare && ProtocolCompatibleWithProtocol(lProto: rhsProto, rProto: lhsProto))) {
10817 match = true;
10818 break;
10819 }
10820 }
10821 if (!match)
10822 return false;
10823 }
10824 }
10825 return true;
10826 }
10827 return false;
10828}
10829
10830/// canAssignObjCInterfaces - Return true if the two interface types are
10831/// compatible for assignment from RHS to LHS. This handles validation of any
10832/// protocol qualifiers on the LHS or RHS.
10833bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT,
10834 const ObjCObjectPointerType *RHSOPT) {
10835 const ObjCObjectType* LHS = LHSOPT->getObjectType();
10836 const ObjCObjectType* RHS = RHSOPT->getObjectType();
10837
10838 // If either type represents the built-in 'id' type, return true.
10839 if (LHS->isObjCUnqualifiedId() || RHS->isObjCUnqualifiedId())
10840 return true;
10841
10842 // Function object that propagates a successful result or handles
10843 // __kindof types.
10844 auto finish = [&](bool succeeded) -> bool {
10845 if (succeeded)
10846 return true;
10847
10848 if (!RHS->isKindOfType())
10849 return false;
10850
10851 // Strip off __kindof and protocol qualifiers, then check whether
10852 // we can assign the other way.
10853 return canAssignObjCInterfaces(LHSOPT: RHSOPT->stripObjCKindOfTypeAndQuals(ctx: *this),
10854 RHSOPT: LHSOPT->stripObjCKindOfTypeAndQuals(ctx: *this));
10855 };
10856
10857 // Casts from or to id<P> are allowed when the other side has compatible
10858 // protocols.
10859 if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) {
10860 return finish(ObjCQualifiedIdTypesAreCompatible(lhs: LHSOPT, rhs: RHSOPT, compare: false));
10861 }
10862
10863 // Verify protocol compatibility for casts from Class<P1> to Class<P2>.
10864 if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) {
10865 return finish(ObjCQualifiedClassTypesAreCompatible(lhs: LHSOPT, rhs: RHSOPT));
10866 }
10867
10868 // Casts from Class to Class<Foo>, or vice-versa, are allowed.
10869 if (LHS->isObjCClass() && RHS->isObjCClass()) {
10870 return true;
10871 }
10872
10873 // If we have 2 user-defined types, fall into that path.
10874 if (LHS->getInterface() && RHS->getInterface()) {
10875 return finish(canAssignObjCInterfaces(LHS, RHS));
10876 }
10877
10878 return false;
10879}
10880
10881/// canAssignObjCInterfacesInBlockPointer - This routine is specifically written
10882/// for providing type-safety for objective-c pointers used to pass/return
10883/// arguments in block literals. When passed as arguments, passing 'A*' where
10884/// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is
10885/// not OK. For the return type, the opposite is not OK.
10886bool ASTContext::canAssignObjCInterfacesInBlockPointer(
10887 const ObjCObjectPointerType *LHSOPT,
10888 const ObjCObjectPointerType *RHSOPT,
10889 bool BlockReturnType) {
10890
10891 // Function object that propagates a successful result or handles
10892 // __kindof types.
10893 auto finish = [&](bool succeeded) -> bool {
10894 if (succeeded)
10895 return true;
10896
10897 const ObjCObjectPointerType *Expected = BlockReturnType ? RHSOPT : LHSOPT;
10898 if (!Expected->isKindOfType())
10899 return false;
10900
10901 // Strip off __kindof and protocol qualifiers, then check whether
10902 // we can assign the other way.
10903 return canAssignObjCInterfacesInBlockPointer(
10904 LHSOPT: RHSOPT->stripObjCKindOfTypeAndQuals(ctx: *this),
10905 RHSOPT: LHSOPT->stripObjCKindOfTypeAndQuals(ctx: *this),
10906 BlockReturnType);
10907 };
10908
10909 if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType())
10910 return true;
10911
10912 if (LHSOPT->isObjCBuiltinType()) {
10913 return finish(RHSOPT->isObjCBuiltinType() ||
10914 RHSOPT->isObjCQualifiedIdType());
10915 }
10916
10917 if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) {
10918 if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking)
10919 // Use for block parameters previous type checking for compatibility.
10920 return finish(ObjCQualifiedIdTypesAreCompatible(lhs: LHSOPT, rhs: RHSOPT, compare: false) ||
10921 // Or corrected type checking as in non-compat mode.
10922 (!BlockReturnType &&
10923 ObjCQualifiedIdTypesAreCompatible(lhs: RHSOPT, rhs: LHSOPT, compare: false)));
10924 else
10925 return finish(ObjCQualifiedIdTypesAreCompatible(
10926 lhs: (BlockReturnType ? LHSOPT : RHSOPT),
10927 rhs: (BlockReturnType ? RHSOPT : LHSOPT), compare: false));
10928 }
10929
10930 const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType();
10931 const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType();
10932 if (LHS && RHS) { // We have 2 user-defined types.
10933 if (LHS != RHS) {
10934 if (LHS->getDecl()->isSuperClassOf(I: RHS->getDecl()))
10935 return finish(BlockReturnType);
10936 if (RHS->getDecl()->isSuperClassOf(I: LHS->getDecl()))
10937 return finish(!BlockReturnType);
10938 }
10939 else
10940 return true;
10941 }
10942 return false;
10943}
10944
10945/// Comparison routine for Objective-C protocols to be used with
10946/// llvm::array_pod_sort.
10947static int compareObjCProtocolsByName(ObjCProtocolDecl * const *lhs,
10948 ObjCProtocolDecl * const *rhs) {
10949 return (*lhs)->getName().compare(RHS: (*rhs)->getName());
10950}
10951
10952/// getIntersectionOfProtocols - This routine finds the intersection of set
10953/// of protocols inherited from two distinct objective-c pointer objects with
10954/// the given common base.
10955/// It is used to build composite qualifier list of the composite type of
10956/// the conditional expression involving two objective-c pointer objects.
10957static
10958void getIntersectionOfProtocols(ASTContext &Context,
10959 const ObjCInterfaceDecl *CommonBase,
10960 const ObjCObjectPointerType *LHSOPT,
10961 const ObjCObjectPointerType *RHSOPT,
10962 SmallVectorImpl<ObjCProtocolDecl *> &IntersectionSet) {
10963
10964 const ObjCObjectType* LHS = LHSOPT->getObjectType();
10965 const ObjCObjectType* RHS = RHSOPT->getObjectType();
10966 assert(LHS->getInterface() && "LHS must have an interface base");
10967 assert(RHS->getInterface() && "RHS must have an interface base");
10968
10969 // Add all of the protocols for the LHS.
10970 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSProtocolSet;
10971
10972 // Start with the protocol qualifiers.
10973 for (auto *proto : LHS->quals()) {
10974 Context.CollectInheritedProtocols(CDecl: proto, Protocols&: LHSProtocolSet);
10975 }
10976
10977 // Also add the protocols associated with the LHS interface.
10978 Context.CollectInheritedProtocols(CDecl: LHS->getInterface(), Protocols&: LHSProtocolSet);
10979
10980 // Add all of the protocols for the RHS.
10981 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet;
10982
10983 // Start with the protocol qualifiers.
10984 for (auto *proto : RHS->quals()) {
10985 Context.CollectInheritedProtocols(CDecl: proto, Protocols&: RHSProtocolSet);
10986 }
10987
10988 // Also add the protocols associated with the RHS interface.
10989 Context.CollectInheritedProtocols(CDecl: RHS->getInterface(), Protocols&: RHSProtocolSet);
10990
10991 // Compute the intersection of the collected protocol sets.
10992 for (auto *proto : LHSProtocolSet) {
10993 if (RHSProtocolSet.count(Ptr: proto))
10994 IntersectionSet.push_back(Elt: proto);
10995 }
10996
10997 // Compute the set of protocols that is implied by either the common type or
10998 // the protocols within the intersection.
10999 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> ImpliedProtocols;
11000 Context.CollectInheritedProtocols(CDecl: CommonBase, Protocols&: ImpliedProtocols);
11001
11002 // Remove any implied protocols from the list of inherited protocols.
11003 if (!ImpliedProtocols.empty()) {
11004 llvm::erase_if(C&: IntersectionSet, P: [&](ObjCProtocolDecl *proto) -> bool {
11005 return ImpliedProtocols.contains(Ptr: proto);
11006 });
11007 }
11008
11009 // Sort the remaining protocols by name.
11010 llvm::array_pod_sort(Start: IntersectionSet.begin(), End: IntersectionSet.end(),
11011 Compare: compareObjCProtocolsByName);
11012}
11013
11014/// Determine whether the first type is a subtype of the second.
11015static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs,
11016 QualType rhs) {
11017 // Common case: two object pointers.
11018 const auto *lhsOPT = lhs->getAs<ObjCObjectPointerType>();
11019 const auto *rhsOPT = rhs->getAs<ObjCObjectPointerType>();
11020 if (lhsOPT && rhsOPT)
11021 return ctx.canAssignObjCInterfaces(LHSOPT: lhsOPT, RHSOPT: rhsOPT);
11022
11023 // Two block pointers.
11024 const auto *lhsBlock = lhs->getAs<BlockPointerType>();
11025 const auto *rhsBlock = rhs->getAs<BlockPointerType>();
11026 if (lhsBlock && rhsBlock)
11027 return ctx.typesAreBlockPointerCompatible(lhs, rhs);
11028
11029 // If either is an unqualified 'id' and the other is a block, it's
11030 // acceptable.
11031 if ((lhsOPT && lhsOPT->isObjCIdType() && rhsBlock) ||
11032 (rhsOPT && rhsOPT->isObjCIdType() && lhsBlock))
11033 return true;
11034
11035 return false;
11036}
11037
11038// Check that the given Objective-C type argument lists are equivalent.
11039static bool sameObjCTypeArgs(ASTContext &ctx,
11040 const ObjCInterfaceDecl *iface,
11041 ArrayRef<QualType> lhsArgs,
11042 ArrayRef<QualType> rhsArgs,
11043 bool stripKindOf) {
11044 if (lhsArgs.size() != rhsArgs.size())
11045 return false;
11046
11047 ObjCTypeParamList *typeParams = iface->getTypeParamList();
11048 if (!typeParams)
11049 return false;
11050
11051 for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) {
11052 if (ctx.hasSameType(T1: lhsArgs[i], T2: rhsArgs[i]))
11053 continue;
11054
11055 switch (typeParams->begin()[i]->getVariance()) {
11056 case ObjCTypeParamVariance::Invariant:
11057 if (!stripKindOf ||
11058 !ctx.hasSameType(T1: lhsArgs[i].stripObjCKindOfType(ctx),
11059 T2: rhsArgs[i].stripObjCKindOfType(ctx))) {
11060 return false;
11061 }
11062 break;
11063
11064 case ObjCTypeParamVariance::Covariant:
11065 if (!canAssignObjCObjectTypes(ctx, lhs: lhsArgs[i], rhs: rhsArgs[i]))
11066 return false;
11067 break;
11068
11069 case ObjCTypeParamVariance::Contravariant:
11070 if (!canAssignObjCObjectTypes(ctx, lhs: rhsArgs[i], rhs: lhsArgs[i]))
11071 return false;
11072 break;
11073 }
11074 }
11075
11076 return true;
11077}
11078
11079QualType ASTContext::areCommonBaseCompatible(
11080 const ObjCObjectPointerType *Lptr,
11081 const ObjCObjectPointerType *Rptr) {
11082 const ObjCObjectType *LHS = Lptr->getObjectType();
11083 const ObjCObjectType *RHS = Rptr->getObjectType();
11084 const ObjCInterfaceDecl* LDecl = LHS->getInterface();
11085 const ObjCInterfaceDecl* RDecl = RHS->getInterface();
11086
11087 if (!LDecl || !RDecl)
11088 return {};
11089
11090 // When either LHS or RHS is a kindof type, we should return a kindof type.
11091 // For example, for common base of kindof(ASub1) and kindof(ASub2), we return
11092 // kindof(A).
11093 bool anyKindOf = LHS->isKindOfType() || RHS->isKindOfType();
11094
11095 // Follow the left-hand side up the class hierarchy until we either hit a
11096 // root or find the RHS. Record the ancestors in case we don't find it.
11097 llvm::SmallDenseMap<const ObjCInterfaceDecl *, const ObjCObjectType *, 4>
11098 LHSAncestors;
11099 while (true) {
11100 // Record this ancestor. We'll need this if the common type isn't in the
11101 // path from the LHS to the root.
11102 LHSAncestors[LHS->getInterface()->getCanonicalDecl()] = LHS;
11103
11104 if (declaresSameEntity(D1: LHS->getInterface(), D2: RDecl)) {
11105 // Get the type arguments.
11106 ArrayRef<QualType> LHSTypeArgs = LHS->getTypeArgsAsWritten();
11107 bool anyChanges = false;
11108 if (LHS->isSpecialized() && RHS->isSpecialized()) {
11109 // Both have type arguments, compare them.
11110 if (!sameObjCTypeArgs(ctx&: *this, iface: LHS->getInterface(),
11111 lhsArgs: LHS->getTypeArgs(), rhsArgs: RHS->getTypeArgs(),
11112 /*stripKindOf=*/true))
11113 return {};
11114 } else if (LHS->isSpecialized() != RHS->isSpecialized()) {
11115 // If only one has type arguments, the result will not have type
11116 // arguments.
11117 LHSTypeArgs = {};
11118 anyChanges = true;
11119 }
11120
11121 // Compute the intersection of protocols.
11122 SmallVector<ObjCProtocolDecl *, 8> Protocols;
11123 getIntersectionOfProtocols(Context&: *this, CommonBase: LHS->getInterface(), LHSOPT: Lptr, RHSOPT: Rptr,
11124 IntersectionSet&: Protocols);
11125 if (!Protocols.empty())
11126 anyChanges = true;
11127
11128 // If anything in the LHS will have changed, build a new result type.
11129 // If we need to return a kindof type but LHS is not a kindof type, we
11130 // build a new result type.
11131 if (anyChanges || LHS->isKindOfType() != anyKindOf) {
11132 QualType Result = getObjCInterfaceType(Decl: LHS->getInterface());
11133 Result = getObjCObjectType(baseType: Result, typeArgs: LHSTypeArgs, protocols: Protocols,
11134 isKindOf: anyKindOf || LHS->isKindOfType());
11135 return getObjCObjectPointerType(ObjectT: Result);
11136 }
11137
11138 return getObjCObjectPointerType(ObjectT: QualType(LHS, 0));
11139 }
11140
11141 // Find the superclass.
11142 QualType LHSSuperType = LHS->getSuperClassType();
11143 if (LHSSuperType.isNull())
11144 break;
11145
11146 LHS = LHSSuperType->castAs<ObjCObjectType>();
11147 }
11148
11149 // We didn't find anything by following the LHS to its root; now check
11150 // the RHS against the cached set of ancestors.
11151 while (true) {
11152 auto KnownLHS = LHSAncestors.find(Val: RHS->getInterface()->getCanonicalDecl());
11153 if (KnownLHS != LHSAncestors.end()) {
11154 LHS = KnownLHS->second;
11155
11156 // Get the type arguments.
11157 ArrayRef<QualType> RHSTypeArgs = RHS->getTypeArgsAsWritten();
11158 bool anyChanges = false;
11159 if (LHS->isSpecialized() && RHS->isSpecialized()) {
11160 // Both have type arguments, compare them.
11161 if (!sameObjCTypeArgs(ctx&: *this, iface: LHS->getInterface(),
11162 lhsArgs: LHS->getTypeArgs(), rhsArgs: RHS->getTypeArgs(),
11163 /*stripKindOf=*/true))
11164 return {};
11165 } else if (LHS->isSpecialized() != RHS->isSpecialized()) {
11166 // If only one has type arguments, the result will not have type
11167 // arguments.
11168 RHSTypeArgs = {};
11169 anyChanges = true;
11170 }
11171
11172 // Compute the intersection of protocols.
11173 SmallVector<ObjCProtocolDecl *, 8> Protocols;
11174 getIntersectionOfProtocols(Context&: *this, CommonBase: RHS->getInterface(), LHSOPT: Lptr, RHSOPT: Rptr,
11175 IntersectionSet&: Protocols);
11176 if (!Protocols.empty())
11177 anyChanges = true;
11178
11179 // If we need to return a kindof type but RHS is not a kindof type, we
11180 // build a new result type.
11181 if (anyChanges || RHS->isKindOfType() != anyKindOf) {
11182 QualType Result = getObjCInterfaceType(Decl: RHS->getInterface());
11183 Result = getObjCObjectType(baseType: Result, typeArgs: RHSTypeArgs, protocols: Protocols,
11184 isKindOf: anyKindOf || RHS->isKindOfType());
11185 return getObjCObjectPointerType(ObjectT: Result);
11186 }
11187
11188 return getObjCObjectPointerType(ObjectT: QualType(RHS, 0));
11189 }
11190
11191 // Find the superclass of the RHS.
11192 QualType RHSSuperType = RHS->getSuperClassType();
11193 if (RHSSuperType.isNull())
11194 break;
11195
11196 RHS = RHSSuperType->castAs<ObjCObjectType>();
11197 }
11198
11199 return {};
11200}
11201
11202bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS,
11203 const ObjCObjectType *RHS) {
11204 assert(LHS->getInterface() && "LHS is not an interface type");
11205 assert(RHS->getInterface() && "RHS is not an interface type");
11206
11207 // Verify that the base decls are compatible: the RHS must be a subclass of
11208 // the LHS.
11209 ObjCInterfaceDecl *LHSInterface = LHS->getInterface();
11210 bool IsSuperClass = LHSInterface->isSuperClassOf(I: RHS->getInterface());
11211 if (!IsSuperClass)
11212 return false;
11213
11214 // If the LHS has protocol qualifiers, determine whether all of them are
11215 // satisfied by the RHS (i.e., the RHS has a superset of the protocols in the
11216 // LHS).
11217 if (LHS->getNumProtocols() > 0) {
11218 // OK if conversion of LHS to SuperClass results in narrowing of types
11219 // ; i.e., SuperClass may implement at least one of the protocols
11220 // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok.
11221 // But not SuperObj<P1,P2,P3> = lhs<P1,P2>.
11222 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols;
11223 CollectInheritedProtocols(CDecl: RHS->getInterface(), Protocols&: SuperClassInheritedProtocols);
11224 // Also, if RHS has explicit quelifiers, include them for comparing with LHS's
11225 // qualifiers.
11226 for (auto *RHSPI : RHS->quals())
11227 CollectInheritedProtocols(CDecl: RHSPI, Protocols&: SuperClassInheritedProtocols);
11228 // If there is no protocols associated with RHS, it is not a match.
11229 if (SuperClassInheritedProtocols.empty())
11230 return false;
11231
11232 for (const auto *LHSProto : LHS->quals()) {
11233 bool SuperImplementsProtocol = false;
11234 for (auto *SuperClassProto : SuperClassInheritedProtocols)
11235 if (SuperClassProto->lookupProtocolNamed(PName: LHSProto->getIdentifier())) {
11236 SuperImplementsProtocol = true;
11237 break;
11238 }
11239 if (!SuperImplementsProtocol)
11240 return false;
11241 }
11242 }
11243
11244 // If the LHS is specialized, we may need to check type arguments.
11245 if (LHS->isSpecialized()) {
11246 // Follow the superclass chain until we've matched the LHS class in the
11247 // hierarchy. This substitutes type arguments through.
11248 const ObjCObjectType *RHSSuper = RHS;
11249 while (!declaresSameEntity(D1: RHSSuper->getInterface(), D2: LHSInterface))
11250 RHSSuper = RHSSuper->getSuperClassType()->castAs<ObjCObjectType>();
11251
11252 // If the RHS is specializd, compare type arguments.
11253 if (RHSSuper->isSpecialized() &&
11254 !sameObjCTypeArgs(ctx&: *this, iface: LHS->getInterface(),
11255 lhsArgs: LHS->getTypeArgs(), rhsArgs: RHSSuper->getTypeArgs(),
11256 /*stripKindOf=*/true)) {
11257 return false;
11258 }
11259 }
11260
11261 return true;
11262}
11263
11264bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) {
11265 // get the "pointed to" types
11266 const auto *LHSOPT = LHS->getAs<ObjCObjectPointerType>();
11267 const auto *RHSOPT = RHS->getAs<ObjCObjectPointerType>();
11268
11269 if (!LHSOPT || !RHSOPT)
11270 return false;
11271
11272 return canAssignObjCInterfaces(LHSOPT, RHSOPT) ||
11273 canAssignObjCInterfaces(LHSOPT: RHSOPT, RHSOPT: LHSOPT);
11274}
11275
11276bool ASTContext::canBindObjCObjectType(QualType To, QualType From) {
11277 return canAssignObjCInterfaces(
11278 LHSOPT: getObjCObjectPointerType(ObjectT: To)->castAs<ObjCObjectPointerType>(),
11279 RHSOPT: getObjCObjectPointerType(ObjectT: From)->castAs<ObjCObjectPointerType>());
11280}
11281
11282/// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible,
11283/// both shall have the identically qualified version of a compatible type.
11284/// C99 6.2.7p1: Two types have compatible types if their types are the
11285/// same. See 6.7.[2,3,5] for additional rules.
11286bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS,
11287 bool CompareUnqualified) {
11288 if (getLangOpts().CPlusPlus)
11289 return hasSameType(T1: LHS, T2: RHS);
11290
11291 return !mergeTypes(LHS, RHS, OfBlockPointer: false, Unqualified: CompareUnqualified).isNull();
11292}
11293
11294bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) {
11295 return typesAreCompatible(LHS, RHS);
11296}
11297
11298bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) {
11299 return !mergeTypes(LHS, RHS, OfBlockPointer: true).isNull();
11300}
11301
11302/// mergeTransparentUnionType - if T is a transparent union type and a member
11303/// of T is compatible with SubType, return the merged type, else return
11304/// QualType()
11305QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType,
11306 bool OfBlockPointer,
11307 bool Unqualified) {
11308 if (const RecordType *UT = T->getAsUnionType()) {
11309 RecordDecl *UD = UT->getDecl()->getMostRecentDecl();
11310 if (UD->hasAttr<TransparentUnionAttr>()) {
11311 for (const auto *I : UD->fields()) {
11312 QualType ET = I->getType().getUnqualifiedType();
11313 QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified);
11314 if (!MT.isNull())
11315 return MT;
11316 }
11317 }
11318 }
11319
11320 return {};
11321}
11322
11323/// mergeFunctionParameterTypes - merge two types which appear as function
11324/// parameter types
11325QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs,
11326 bool OfBlockPointer,
11327 bool Unqualified) {
11328 // GNU extension: two types are compatible if they appear as a function
11329 // argument, one of the types is a transparent union type and the other
11330 // type is compatible with a union member
11331 QualType lmerge = mergeTransparentUnionType(T: lhs, SubType: rhs, OfBlockPointer,
11332 Unqualified);
11333 if (!lmerge.isNull())
11334 return lmerge;
11335
11336 QualType rmerge = mergeTransparentUnionType(T: rhs, SubType: lhs, OfBlockPointer,
11337 Unqualified);
11338 if (!rmerge.isNull())
11339 return rmerge;
11340
11341 return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified);
11342}
11343
11344QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
11345 bool OfBlockPointer, bool Unqualified,
11346 bool AllowCXX,
11347 bool IsConditionalOperator) {
11348 const auto *lbase = lhs->castAs<FunctionType>();
11349 const auto *rbase = rhs->castAs<FunctionType>();
11350 const auto *lproto = dyn_cast<FunctionProtoType>(Val: lbase);
11351 const auto *rproto = dyn_cast<FunctionProtoType>(Val: rbase);
11352 bool allLTypes = true;
11353 bool allRTypes = true;
11354
11355 // Check return type
11356 QualType retType;
11357 if (OfBlockPointer) {
11358 QualType RHS = rbase->getReturnType();
11359 QualType LHS = lbase->getReturnType();
11360 bool UnqualifiedResult = Unqualified;
11361 if (!UnqualifiedResult)
11362 UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers());
11363 retType = mergeTypes(LHS, RHS, OfBlockPointer: true, Unqualified: UnqualifiedResult, BlockReturnType: true);
11364 }
11365 else
11366 retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), OfBlockPointer: false,
11367 Unqualified);
11368 if (retType.isNull())
11369 return {};
11370
11371 if (Unqualified)
11372 retType = retType.getUnqualifiedType();
11373
11374 CanQualType LRetType = getCanonicalType(T: lbase->getReturnType());
11375 CanQualType RRetType = getCanonicalType(T: rbase->getReturnType());
11376 if (Unqualified) {
11377 LRetType = LRetType.getUnqualifiedType();
11378 RRetType = RRetType.getUnqualifiedType();
11379 }
11380
11381 if (getCanonicalType(T: retType) != LRetType)
11382 allLTypes = false;
11383 if (getCanonicalType(T: retType) != RRetType)
11384 allRTypes = false;
11385
11386 // FIXME: double check this
11387 // FIXME: should we error if lbase->getRegParmAttr() != 0 &&
11388 // rbase->getRegParmAttr() != 0 &&
11389 // lbase->getRegParmAttr() != rbase->getRegParmAttr()?
11390 FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo();
11391 FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo();
11392
11393 // Compatible functions must have compatible calling conventions
11394 if (lbaseInfo.getCC() != rbaseInfo.getCC())
11395 return {};
11396
11397 // Regparm is part of the calling convention.
11398 if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm())
11399 return {};
11400 if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm())
11401 return {};
11402
11403 if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult())
11404 return {};
11405 if (lbaseInfo.getNoCallerSavedRegs() != rbaseInfo.getNoCallerSavedRegs())
11406 return {};
11407 if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck())
11408 return {};
11409
11410 // When merging declarations, it's common for supplemental information like
11411 // attributes to only be present in one of the declarations, and we generally
11412 // want type merging to preserve the union of information. So a merged
11413 // function type should be noreturn if it was noreturn in *either* operand
11414 // type.
11415 //
11416 // But for the conditional operator, this is backwards. The result of the
11417 // operator could be either operand, and its type should conservatively
11418 // reflect that. So a function type in a composite type is noreturn only
11419 // if it's noreturn in *both* operand types.
11420 //
11421 // Arguably, noreturn is a kind of subtype, and the conditional operator
11422 // ought to produce the most specific common supertype of its operand types.
11423 // That would differ from this rule in contravariant positions. However,
11424 // neither C nor C++ generally uses this kind of subtype reasoning. Also,
11425 // as a practical matter, it would only affect C code that does abstraction of
11426 // higher-order functions (taking noreturn callbacks!), which is uncommon to
11427 // say the least. So we use the simpler rule.
11428 bool NoReturn = IsConditionalOperator
11429 ? lbaseInfo.getNoReturn() && rbaseInfo.getNoReturn()
11430 : lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn();
11431 if (lbaseInfo.getNoReturn() != NoReturn)
11432 allLTypes = false;
11433 if (rbaseInfo.getNoReturn() != NoReturn)
11434 allRTypes = false;
11435
11436 FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(noReturn: NoReturn);
11437
11438 std::optional<FunctionEffectSet> MergedFX;
11439
11440 if (lproto && rproto) { // two C99 style function prototypes
11441 assert((AllowCXX ||
11442 (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) &&
11443 "C++ shouldn't be here");
11444 // Compatible functions must have the same number of parameters
11445 if (lproto->getNumParams() != rproto->getNumParams())
11446 return {};
11447
11448 // Variadic and non-variadic functions aren't compatible
11449 if (lproto->isVariadic() != rproto->isVariadic())
11450 return {};
11451
11452 if (lproto->getMethodQuals() != rproto->getMethodQuals())
11453 return {};
11454
11455 // Function protos with different 'cfi_salt' values aren't compatible.
11456 if (lproto->getExtraAttributeInfo().CFISalt !=
11457 rproto->getExtraAttributeInfo().CFISalt)
11458 return {};
11459
11460 // Function effects are handled similarly to noreturn, see above.
11461 FunctionEffectsRef LHSFX = lproto->getFunctionEffects();
11462 FunctionEffectsRef RHSFX = rproto->getFunctionEffects();
11463 if (LHSFX != RHSFX) {
11464 if (IsConditionalOperator)
11465 MergedFX = FunctionEffectSet::getIntersection(LHS: LHSFX, RHS: RHSFX);
11466 else {
11467 FunctionEffectSet::Conflicts Errs;
11468 MergedFX = FunctionEffectSet::getUnion(LHS: LHSFX, RHS: RHSFX, Errs);
11469 // Here we're discarding a possible error due to conflicts in the effect
11470 // sets. But we're not in a context where we can report it. The
11471 // operation does however guarantee maintenance of invariants.
11472 }
11473 if (*MergedFX != LHSFX)
11474 allLTypes = false;
11475 if (*MergedFX != RHSFX)
11476 allRTypes = false;
11477 }
11478
11479 SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos;
11480 bool canUseLeft, canUseRight;
11481 if (!mergeExtParameterInfo(FirstFnType: lproto, SecondFnType: rproto, CanUseFirst&: canUseLeft, CanUseSecond&: canUseRight,
11482 NewParamInfos&: newParamInfos))
11483 return {};
11484
11485 if (!canUseLeft)
11486 allLTypes = false;
11487 if (!canUseRight)
11488 allRTypes = false;
11489
11490 // Check parameter type compatibility
11491 SmallVector<QualType, 10> types;
11492 for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) {
11493 QualType lParamType = lproto->getParamType(i).getUnqualifiedType();
11494 QualType rParamType = rproto->getParamType(i).getUnqualifiedType();
11495 QualType paramType = mergeFunctionParameterTypes(
11496 lhs: lParamType, rhs: rParamType, OfBlockPointer, Unqualified);
11497 if (paramType.isNull())
11498 return {};
11499
11500 if (Unqualified)
11501 paramType = paramType.getUnqualifiedType();
11502
11503 types.push_back(Elt: paramType);
11504 if (Unqualified) {
11505 lParamType = lParamType.getUnqualifiedType();
11506 rParamType = rParamType.getUnqualifiedType();
11507 }
11508
11509 if (getCanonicalType(T: paramType) != getCanonicalType(T: lParamType))
11510 allLTypes = false;
11511 if (getCanonicalType(T: paramType) != getCanonicalType(T: rParamType))
11512 allRTypes = false;
11513 }
11514
11515 if (allLTypes) return lhs;
11516 if (allRTypes) return rhs;
11517
11518 FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo();
11519 EPI.ExtInfo = einfo;
11520 EPI.ExtParameterInfos =
11521 newParamInfos.empty() ? nullptr : newParamInfos.data();
11522 if (MergedFX)
11523 EPI.FunctionEffects = *MergedFX;
11524 return getFunctionType(ResultTy: retType, Args: types, EPI);
11525 }
11526
11527 if (lproto) allRTypes = false;
11528 if (rproto) allLTypes = false;
11529
11530 const FunctionProtoType *proto = lproto ? lproto : rproto;
11531 if (proto) {
11532 assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here");
11533 if (proto->isVariadic())
11534 return {};
11535 // Check that the types are compatible with the types that
11536 // would result from default argument promotions (C99 6.7.5.3p15).
11537 // The only types actually affected are promotable integer
11538 // types and floats, which would be passed as a different
11539 // type depending on whether the prototype is visible.
11540 for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) {
11541 QualType paramTy = proto->getParamType(i);
11542
11543 // Look at the converted type of enum types, since that is the type used
11544 // to pass enum values.
11545 if (const auto *ED = paramTy->getAsEnumDecl()) {
11546 paramTy = ED->getIntegerType();
11547 if (paramTy.isNull())
11548 return {};
11549 }
11550
11551 if (isPromotableIntegerType(T: paramTy) ||
11552 getCanonicalType(T: paramTy).getUnqualifiedType() == FloatTy)
11553 return {};
11554 }
11555
11556 if (allLTypes) return lhs;
11557 if (allRTypes) return rhs;
11558
11559 FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo();
11560 EPI.ExtInfo = einfo;
11561 if (MergedFX)
11562 EPI.FunctionEffects = *MergedFX;
11563 return getFunctionType(ResultTy: retType, Args: proto->getParamTypes(), EPI);
11564 }
11565
11566 if (allLTypes) return lhs;
11567 if (allRTypes) return rhs;
11568 return getFunctionNoProtoType(ResultTy: retType, Info: einfo);
11569}
11570
11571/// Given that we have an enum type and a non-enum type, try to merge them.
11572static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET,
11573 QualType other, bool isBlockReturnType) {
11574 // C99 6.7.2.2p4: Each enumerated type shall be compatible with char,
11575 // a signed integer type, or an unsigned integer type.
11576 // Compatibility is based on the underlying type, not the promotion
11577 // type.
11578 QualType underlyingType =
11579 ET->getDecl()->getDefinitionOrSelf()->getIntegerType();
11580 if (underlyingType.isNull())
11581 return {};
11582 if (Context.hasSameType(T1: underlyingType, T2: other))
11583 return other;
11584
11585 // In block return types, we're more permissive and accept any
11586 // integral type of the same size.
11587 if (isBlockReturnType && other->isIntegerType() &&
11588 Context.getTypeSize(T: underlyingType) == Context.getTypeSize(T: other))
11589 return other;
11590
11591 return {};
11592}
11593
11594QualType ASTContext::mergeTagDefinitions(QualType LHS, QualType RHS) {
11595 // C17 and earlier and C++ disallow two tag definitions within the same TU
11596 // from being compatible.
11597 if (LangOpts.CPlusPlus || !LangOpts.C23)
11598 return {};
11599
11600 // Nameless tags are comparable only within outer definitions. At the top
11601 // level they are not comparable.
11602 const TagDecl *LTagD = LHS->castAsTagDecl(), *RTagD = RHS->castAsTagDecl();
11603 if (!LTagD->getIdentifier() || !RTagD->getIdentifier())
11604 return {};
11605
11606 // C23, on the other hand, requires the members to be "the same enough", so
11607 // we use a structural equivalence check.
11608 StructuralEquivalenceContext::NonEquivalentDeclSet NonEquivalentDecls;
11609 StructuralEquivalenceContext Ctx(
11610 getLangOpts(), *this, *this, NonEquivalentDecls,
11611 StructuralEquivalenceKind::Default, /*StrictTypeSpelling=*/false,
11612 /*Complain=*/false, /*ErrorOnTagTypeMismatch=*/true);
11613 return Ctx.IsEquivalent(T1: LHS, T2: RHS) ? LHS : QualType{};
11614}
11615
11616QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, bool OfBlockPointer,
11617 bool Unqualified, bool BlockReturnType,
11618 bool IsConditionalOperator) {
11619 // For C++ we will not reach this code with reference types (see below),
11620 // for OpenMP variant call overloading we might.
11621 //
11622 // C++ [expr]: If an expression initially has the type "reference to T", the
11623 // type is adjusted to "T" prior to any further analysis, the expression
11624 // designates the object or function denoted by the reference, and the
11625 // expression is an lvalue unless the reference is an rvalue reference and
11626 // the expression is a function call (possibly inside parentheses).
11627 auto *LHSRefTy = LHS->getAs<ReferenceType>();
11628 auto *RHSRefTy = RHS->getAs<ReferenceType>();
11629 if (LangOpts.OpenMP && LHSRefTy && RHSRefTy &&
11630 LHS->getTypeClass() == RHS->getTypeClass())
11631 return mergeTypes(LHS: LHSRefTy->getPointeeType(), RHS: RHSRefTy->getPointeeType(),
11632 OfBlockPointer, Unqualified, BlockReturnType);
11633 if (LHSRefTy || RHSRefTy)
11634 return {};
11635
11636 if (Unqualified) {
11637 LHS = LHS.getUnqualifiedType();
11638 RHS = RHS.getUnqualifiedType();
11639 }
11640
11641 QualType LHSCan = getCanonicalType(T: LHS),
11642 RHSCan = getCanonicalType(T: RHS);
11643
11644 // If two types are identical, they are compatible.
11645 if (LHSCan == RHSCan)
11646 return LHS;
11647
11648 // If the qualifiers are different, the types aren't compatible... mostly.
11649 Qualifiers LQuals = LHSCan.getLocalQualifiers();
11650 Qualifiers RQuals = RHSCan.getLocalQualifiers();
11651 if (LQuals != RQuals) {
11652 // If any of these qualifiers are different, we have a type
11653 // mismatch.
11654 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() ||
11655 LQuals.getAddressSpace() != RQuals.getAddressSpace() ||
11656 LQuals.getObjCLifetime() != RQuals.getObjCLifetime() ||
11657 !LQuals.getPointerAuth().isEquivalent(Other: RQuals.getPointerAuth()) ||
11658 LQuals.hasUnaligned() != RQuals.hasUnaligned())
11659 return {};
11660
11661 // Exactly one GC qualifier difference is allowed: __strong is
11662 // okay if the other type has no GC qualifier but is an Objective
11663 // C object pointer (i.e. implicitly strong by default). We fix
11664 // this by pretending that the unqualified type was actually
11665 // qualified __strong.
11666 Qualifiers::GC GC_L = LQuals.getObjCGCAttr();
11667 Qualifiers::GC GC_R = RQuals.getObjCGCAttr();
11668 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements");
11669
11670 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak)
11671 return {};
11672
11673 if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) {
11674 return mergeTypes(LHS, RHS: getObjCGCQualType(T: RHS, GCAttr: Qualifiers::Strong));
11675 }
11676 if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) {
11677 return mergeTypes(LHS: getObjCGCQualType(T: LHS, GCAttr: Qualifiers::Strong), RHS);
11678 }
11679 return {};
11680 }
11681
11682 // Okay, qualifiers are equal.
11683
11684 Type::TypeClass LHSClass = LHSCan->getTypeClass();
11685 Type::TypeClass RHSClass = RHSCan->getTypeClass();
11686
11687 // We want to consider the two function types to be the same for these
11688 // comparisons, just force one to the other.
11689 if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto;
11690 if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto;
11691
11692 // Same as above for arrays
11693 if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray)
11694 LHSClass = Type::ConstantArray;
11695 if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray)
11696 RHSClass = Type::ConstantArray;
11697
11698 // ObjCInterfaces are just specialized ObjCObjects.
11699 if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject;
11700 if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject;
11701
11702 // Canonicalize ExtVector -> Vector.
11703 if (LHSClass == Type::ExtVector) LHSClass = Type::Vector;
11704 if (RHSClass == Type::ExtVector) RHSClass = Type::Vector;
11705
11706 // If the canonical type classes don't match.
11707 if (LHSClass != RHSClass) {
11708 // Note that we only have special rules for turning block enum
11709 // returns into block int returns, not vice-versa.
11710 if (const auto *ETy = LHS->getAsCanonical<EnumType>()) {
11711 return mergeEnumWithInteger(Context&: *this, ET: ETy, other: RHS, isBlockReturnType: false);
11712 }
11713 if (const EnumType *ETy = RHS->getAsCanonical<EnumType>()) {
11714 return mergeEnumWithInteger(Context&: *this, ET: ETy, other: LHS, isBlockReturnType: BlockReturnType);
11715 }
11716 // allow block pointer type to match an 'id' type.
11717 if (OfBlockPointer && !BlockReturnType) {
11718 if (LHS->isObjCIdType() && RHS->isBlockPointerType())
11719 return LHS;
11720 if (RHS->isObjCIdType() && LHS->isBlockPointerType())
11721 return RHS;
11722 }
11723 // Allow __auto_type to match anything; it merges to the type with more
11724 // information.
11725 if (const auto *AT = LHS->getAs<AutoType>()) {
11726 if (!AT->isDeduced() && AT->isGNUAutoType())
11727 return RHS;
11728 }
11729 if (const auto *AT = RHS->getAs<AutoType>()) {
11730 if (!AT->isDeduced() && AT->isGNUAutoType())
11731 return LHS;
11732 }
11733 return {};
11734 }
11735
11736 // The canonical type classes match.
11737 switch (LHSClass) {
11738#define TYPE(Class, Base)
11739#define ABSTRACT_TYPE(Class, Base)
11740#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
11741#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
11742#define DEPENDENT_TYPE(Class, Base) case Type::Class:
11743#include "clang/AST/TypeNodes.inc"
11744 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
11745
11746 case Type::Auto:
11747 case Type::DeducedTemplateSpecialization:
11748 case Type::LValueReference:
11749 case Type::RValueReference:
11750 case Type::MemberPointer:
11751 llvm_unreachable("C++ should never be in mergeTypes");
11752
11753 case Type::ObjCInterface:
11754 case Type::IncompleteArray:
11755 case Type::VariableArray:
11756 case Type::FunctionProto:
11757 case Type::ExtVector:
11758 llvm_unreachable("Types are eliminated above");
11759
11760 case Type::Pointer:
11761 {
11762 // Merge two pointer types, while trying to preserve typedef info
11763 QualType LHSPointee = LHS->castAs<PointerType>()->getPointeeType();
11764 QualType RHSPointee = RHS->castAs<PointerType>()->getPointeeType();
11765 if (Unqualified) {
11766 LHSPointee = LHSPointee.getUnqualifiedType();
11767 RHSPointee = RHSPointee.getUnqualifiedType();
11768 }
11769 QualType ResultType = mergeTypes(LHS: LHSPointee, RHS: RHSPointee, OfBlockPointer: false,
11770 Unqualified);
11771 if (ResultType.isNull())
11772 return {};
11773 if (getCanonicalType(T: LHSPointee) == getCanonicalType(T: ResultType))
11774 return LHS;
11775 if (getCanonicalType(T: RHSPointee) == getCanonicalType(T: ResultType))
11776 return RHS;
11777 return getPointerType(T: ResultType);
11778 }
11779 case Type::BlockPointer:
11780 {
11781 // Merge two block pointer types, while trying to preserve typedef info
11782 QualType LHSPointee = LHS->castAs<BlockPointerType>()->getPointeeType();
11783 QualType RHSPointee = RHS->castAs<BlockPointerType>()->getPointeeType();
11784 if (Unqualified) {
11785 LHSPointee = LHSPointee.getUnqualifiedType();
11786 RHSPointee = RHSPointee.getUnqualifiedType();
11787 }
11788 if (getLangOpts().OpenCL) {
11789 Qualifiers LHSPteeQual = LHSPointee.getQualifiers();
11790 Qualifiers RHSPteeQual = RHSPointee.getQualifiers();
11791 // Blocks can't be an expression in a ternary operator (OpenCL v2.0
11792 // 6.12.5) thus the following check is asymmetric.
11793 if (!LHSPteeQual.isAddressSpaceSupersetOf(other: RHSPteeQual, Ctx: *this))
11794 return {};
11795 LHSPteeQual.removeAddressSpace();
11796 RHSPteeQual.removeAddressSpace();
11797 LHSPointee =
11798 QualType(LHSPointee.getTypePtr(), LHSPteeQual.getAsOpaqueValue());
11799 RHSPointee =
11800 QualType(RHSPointee.getTypePtr(), RHSPteeQual.getAsOpaqueValue());
11801 }
11802 QualType ResultType = mergeTypes(LHS: LHSPointee, RHS: RHSPointee, OfBlockPointer,
11803 Unqualified);
11804 if (ResultType.isNull())
11805 return {};
11806 if (getCanonicalType(T: LHSPointee) == getCanonicalType(T: ResultType))
11807 return LHS;
11808 if (getCanonicalType(T: RHSPointee) == getCanonicalType(T: ResultType))
11809 return RHS;
11810 return getBlockPointerType(T: ResultType);
11811 }
11812 case Type::Atomic:
11813 {
11814 // Merge two pointer types, while trying to preserve typedef info
11815 QualType LHSValue = LHS->castAs<AtomicType>()->getValueType();
11816 QualType RHSValue = RHS->castAs<AtomicType>()->getValueType();
11817 if (Unqualified) {
11818 LHSValue = LHSValue.getUnqualifiedType();
11819 RHSValue = RHSValue.getUnqualifiedType();
11820 }
11821 QualType ResultType = mergeTypes(LHS: LHSValue, RHS: RHSValue, OfBlockPointer: false,
11822 Unqualified);
11823 if (ResultType.isNull())
11824 return {};
11825 if (getCanonicalType(T: LHSValue) == getCanonicalType(T: ResultType))
11826 return LHS;
11827 if (getCanonicalType(T: RHSValue) == getCanonicalType(T: ResultType))
11828 return RHS;
11829 return getAtomicType(T: ResultType);
11830 }
11831 case Type::ConstantArray:
11832 {
11833 const ConstantArrayType* LCAT = getAsConstantArrayType(T: LHS);
11834 const ConstantArrayType* RCAT = getAsConstantArrayType(T: RHS);
11835 if (LCAT && RCAT && RCAT->getZExtSize() != LCAT->getZExtSize())
11836 return {};
11837
11838 QualType LHSElem = getAsArrayType(T: LHS)->getElementType();
11839 QualType RHSElem = getAsArrayType(T: RHS)->getElementType();
11840 if (Unqualified) {
11841 LHSElem = LHSElem.getUnqualifiedType();
11842 RHSElem = RHSElem.getUnqualifiedType();
11843 }
11844
11845 QualType ResultType = mergeTypes(LHS: LHSElem, RHS: RHSElem, OfBlockPointer: false, Unqualified);
11846 if (ResultType.isNull())
11847 return {};
11848
11849 const VariableArrayType* LVAT = getAsVariableArrayType(T: LHS);
11850 const VariableArrayType* RVAT = getAsVariableArrayType(T: RHS);
11851
11852 // If either side is a variable array, and both are complete, check whether
11853 // the current dimension is definite.
11854 if (LVAT || RVAT) {
11855 auto SizeFetch = [this](const VariableArrayType* VAT,
11856 const ConstantArrayType* CAT)
11857 -> std::pair<bool,llvm::APInt> {
11858 if (VAT) {
11859 std::optional<llvm::APSInt> TheInt;
11860 Expr *E = VAT->getSizeExpr();
11861 if (E && (TheInt = E->getIntegerConstantExpr(Ctx: *this)))
11862 return std::make_pair(x: true, y&: *TheInt);
11863 return std::make_pair(x: false, y: llvm::APSInt());
11864 }
11865 if (CAT)
11866 return std::make_pair(x: true, y: CAT->getSize());
11867 return std::make_pair(x: false, y: llvm::APInt());
11868 };
11869
11870 bool HaveLSize, HaveRSize;
11871 llvm::APInt LSize, RSize;
11872 std::tie(args&: HaveLSize, args&: LSize) = SizeFetch(LVAT, LCAT);
11873 std::tie(args&: HaveRSize, args&: RSize) = SizeFetch(RVAT, RCAT);
11874 if (HaveLSize && HaveRSize && !llvm::APInt::isSameValue(I1: LSize, I2: RSize))
11875 return {}; // Definite, but unequal, array dimension
11876 }
11877
11878 if (LCAT && getCanonicalType(T: LHSElem) == getCanonicalType(T: ResultType))
11879 return LHS;
11880 if (RCAT && getCanonicalType(T: RHSElem) == getCanonicalType(T: ResultType))
11881 return RHS;
11882 if (LCAT)
11883 return getConstantArrayType(EltTy: ResultType, ArySizeIn: LCAT->getSize(),
11884 SizeExpr: LCAT->getSizeExpr(), ASM: ArraySizeModifier(), IndexTypeQuals: 0);
11885 if (RCAT)
11886 return getConstantArrayType(EltTy: ResultType, ArySizeIn: RCAT->getSize(),
11887 SizeExpr: RCAT->getSizeExpr(), ASM: ArraySizeModifier(), IndexTypeQuals: 0);
11888 if (LVAT && getCanonicalType(T: LHSElem) == getCanonicalType(T: ResultType))
11889 return LHS;
11890 if (RVAT && getCanonicalType(T: RHSElem) == getCanonicalType(T: ResultType))
11891 return RHS;
11892 if (LVAT) {
11893 // FIXME: This isn't correct! But tricky to implement because
11894 // the array's size has to be the size of LHS, but the type
11895 // has to be different.
11896 return LHS;
11897 }
11898 if (RVAT) {
11899 // FIXME: This isn't correct! But tricky to implement because
11900 // the array's size has to be the size of RHS, but the type
11901 // has to be different.
11902 return RHS;
11903 }
11904 if (getCanonicalType(T: LHSElem) == getCanonicalType(T: ResultType)) return LHS;
11905 if (getCanonicalType(T: RHSElem) == getCanonicalType(T: ResultType)) return RHS;
11906 return getIncompleteArrayType(elementType: ResultType, ASM: ArraySizeModifier(), elementTypeQuals: 0);
11907 }
11908 case Type::FunctionNoProto:
11909 return mergeFunctionTypes(lhs: LHS, rhs: RHS, OfBlockPointer, Unqualified,
11910 /*AllowCXX=*/false, IsConditionalOperator);
11911 case Type::Record:
11912 case Type::Enum:
11913 return mergeTagDefinitions(LHS, RHS);
11914 case Type::Builtin:
11915 // Only exactly equal builtin types are compatible, which is tested above.
11916 return {};
11917 case Type::Complex:
11918 // Distinct complex types are incompatible.
11919 return {};
11920 case Type::Vector:
11921 // FIXME: The merged type should be an ExtVector!
11922 if (areCompatVectorTypes(LHS: LHSCan->castAs<VectorType>(),
11923 RHS: RHSCan->castAs<VectorType>()))
11924 return LHS;
11925 return {};
11926 case Type::ConstantMatrix:
11927 if (areCompatMatrixTypes(LHS: LHSCan->castAs<ConstantMatrixType>(),
11928 RHS: RHSCan->castAs<ConstantMatrixType>()))
11929 return LHS;
11930 return {};
11931 case Type::ObjCObject: {
11932 // Check if the types are assignment compatible.
11933 // FIXME: This should be type compatibility, e.g. whether
11934 // "LHS x; RHS x;" at global scope is legal.
11935 if (canAssignObjCInterfaces(LHS: LHS->castAs<ObjCObjectType>(),
11936 RHS: RHS->castAs<ObjCObjectType>()))
11937 return LHS;
11938 return {};
11939 }
11940 case Type::ObjCObjectPointer:
11941 if (OfBlockPointer) {
11942 if (canAssignObjCInterfacesInBlockPointer(
11943 LHSOPT: LHS->castAs<ObjCObjectPointerType>(),
11944 RHSOPT: RHS->castAs<ObjCObjectPointerType>(), BlockReturnType))
11945 return LHS;
11946 return {};
11947 }
11948 if (canAssignObjCInterfaces(LHSOPT: LHS->castAs<ObjCObjectPointerType>(),
11949 RHSOPT: RHS->castAs<ObjCObjectPointerType>()))
11950 return LHS;
11951 return {};
11952 case Type::Pipe:
11953 assert(LHS != RHS &&
11954 "Equivalent pipe types should have already been handled!");
11955 return {};
11956 case Type::ArrayParameter:
11957 assert(LHS != RHS &&
11958 "Equivalent ArrayParameter types should have already been handled!");
11959 return {};
11960 case Type::BitInt: {
11961 // Merge two bit-precise int types, while trying to preserve typedef info.
11962 bool LHSUnsigned = LHS->castAs<BitIntType>()->isUnsigned();
11963 bool RHSUnsigned = RHS->castAs<BitIntType>()->isUnsigned();
11964 unsigned LHSBits = LHS->castAs<BitIntType>()->getNumBits();
11965 unsigned RHSBits = RHS->castAs<BitIntType>()->getNumBits();
11966
11967 // Like unsigned/int, shouldn't have a type if they don't match.
11968 if (LHSUnsigned != RHSUnsigned)
11969 return {};
11970
11971 if (LHSBits != RHSBits)
11972 return {};
11973 return LHS;
11974 }
11975 case Type::HLSLAttributedResource: {
11976 const HLSLAttributedResourceType *LHSTy =
11977 LHS->castAs<HLSLAttributedResourceType>();
11978 const HLSLAttributedResourceType *RHSTy =
11979 RHS->castAs<HLSLAttributedResourceType>();
11980 assert(LHSTy->getWrappedType() == RHSTy->getWrappedType() &&
11981 LHSTy->getWrappedType()->isHLSLResourceType() &&
11982 "HLSLAttributedResourceType should always wrap __hlsl_resource_t");
11983
11984 if (LHSTy->getAttrs() == RHSTy->getAttrs() &&
11985 LHSTy->getContainedType() == RHSTy->getContainedType())
11986 return LHS;
11987 return {};
11988 }
11989 case Type::HLSLInlineSpirv:
11990 const HLSLInlineSpirvType *LHSTy = LHS->castAs<HLSLInlineSpirvType>();
11991 const HLSLInlineSpirvType *RHSTy = RHS->castAs<HLSLInlineSpirvType>();
11992
11993 if (LHSTy->getOpcode() == RHSTy->getOpcode() &&
11994 LHSTy->getSize() == RHSTy->getSize() &&
11995 LHSTy->getAlignment() == RHSTy->getAlignment()) {
11996 for (size_t I = 0; I < LHSTy->getOperands().size(); I++)
11997 if (LHSTy->getOperands()[I] != RHSTy->getOperands()[I])
11998 return {};
11999
12000 return LHS;
12001 }
12002 return {};
12003 }
12004
12005 llvm_unreachable("Invalid Type::Class!");
12006}
12007
12008bool ASTContext::mergeExtParameterInfo(
12009 const FunctionProtoType *FirstFnType, const FunctionProtoType *SecondFnType,
12010 bool &CanUseFirst, bool &CanUseSecond,
12011 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &NewParamInfos) {
12012 assert(NewParamInfos.empty() && "param info list not empty");
12013 CanUseFirst = CanUseSecond = true;
12014 bool FirstHasInfo = FirstFnType->hasExtParameterInfos();
12015 bool SecondHasInfo = SecondFnType->hasExtParameterInfos();
12016
12017 // Fast path: if the first type doesn't have ext parameter infos,
12018 // we match if and only if the second type also doesn't have them.
12019 if (!FirstHasInfo && !SecondHasInfo)
12020 return true;
12021
12022 bool NeedParamInfo = false;
12023 size_t E = FirstHasInfo ? FirstFnType->getExtParameterInfos().size()
12024 : SecondFnType->getExtParameterInfos().size();
12025
12026 for (size_t I = 0; I < E; ++I) {
12027 FunctionProtoType::ExtParameterInfo FirstParam, SecondParam;
12028 if (FirstHasInfo)
12029 FirstParam = FirstFnType->getExtParameterInfo(I);
12030 if (SecondHasInfo)
12031 SecondParam = SecondFnType->getExtParameterInfo(I);
12032
12033 // Cannot merge unless everything except the noescape flag matches.
12034 if (FirstParam.withIsNoEscape(NoEscape: false) != SecondParam.withIsNoEscape(NoEscape: false))
12035 return false;
12036
12037 bool FirstNoEscape = FirstParam.isNoEscape();
12038 bool SecondNoEscape = SecondParam.isNoEscape();
12039 bool IsNoEscape = FirstNoEscape && SecondNoEscape;
12040 NewParamInfos.push_back(Elt: FirstParam.withIsNoEscape(NoEscape: IsNoEscape));
12041 if (NewParamInfos.back().getOpaqueValue())
12042 NeedParamInfo = true;
12043 if (FirstNoEscape != IsNoEscape)
12044 CanUseFirst = false;
12045 if (SecondNoEscape != IsNoEscape)
12046 CanUseSecond = false;
12047 }
12048
12049 if (!NeedParamInfo)
12050 NewParamInfos.clear();
12051
12052 return true;
12053}
12054
12055void ASTContext::ResetObjCLayout(const ObjCInterfaceDecl *D) {
12056 if (auto It = ObjCLayouts.find(Val: D); It != ObjCLayouts.end()) {
12057 It->second = nullptr;
12058 for (auto *SubClass : ObjCSubClasses.lookup(Val: D))
12059 ResetObjCLayout(D: SubClass);
12060 }
12061}
12062
12063/// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and
12064/// 'RHS' attributes and returns the merged version; including for function
12065/// return types.
12066QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) {
12067 QualType LHSCan = getCanonicalType(T: LHS),
12068 RHSCan = getCanonicalType(T: RHS);
12069 // If two types are identical, they are compatible.
12070 if (LHSCan == RHSCan)
12071 return LHS;
12072 if (RHSCan->isFunctionType()) {
12073 if (!LHSCan->isFunctionType())
12074 return {};
12075 QualType OldReturnType =
12076 cast<FunctionType>(Val: RHSCan.getTypePtr())->getReturnType();
12077 QualType NewReturnType =
12078 cast<FunctionType>(Val: LHSCan.getTypePtr())->getReturnType();
12079 QualType ResReturnType =
12080 mergeObjCGCQualifiers(LHS: NewReturnType, RHS: OldReturnType);
12081 if (ResReturnType.isNull())
12082 return {};
12083 if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) {
12084 // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo();
12085 // In either case, use OldReturnType to build the new function type.
12086 const auto *F = LHS->castAs<FunctionType>();
12087 if (const auto *FPT = cast<FunctionProtoType>(Val: F)) {
12088 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
12089 EPI.ExtInfo = getFunctionExtInfo(t: LHS);
12090 QualType ResultType =
12091 getFunctionType(ResultTy: OldReturnType, Args: FPT->getParamTypes(), EPI);
12092 return ResultType;
12093 }
12094 }
12095 return {};
12096 }
12097
12098 // If the qualifiers are different, the types can still be merged.
12099 Qualifiers LQuals = LHSCan.getLocalQualifiers();
12100 Qualifiers RQuals = RHSCan.getLocalQualifiers();
12101 if (LQuals != RQuals) {
12102 // If any of these qualifiers are different, we have a type mismatch.
12103 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() ||
12104 LQuals.getAddressSpace() != RQuals.getAddressSpace())
12105 return {};
12106
12107 // Exactly one GC qualifier difference is allowed: __strong is
12108 // okay if the other type has no GC qualifier but is an Objective
12109 // C object pointer (i.e. implicitly strong by default). We fix
12110 // this by pretending that the unqualified type was actually
12111 // qualified __strong.
12112 Qualifiers::GC GC_L = LQuals.getObjCGCAttr();
12113 Qualifiers::GC GC_R = RQuals.getObjCGCAttr();
12114 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements");
12115
12116 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak)
12117 return {};
12118
12119 if (GC_L == Qualifiers::Strong)
12120 return LHS;
12121 if (GC_R == Qualifiers::Strong)
12122 return RHS;
12123 return {};
12124 }
12125
12126 if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) {
12127 QualType LHSBaseQT = LHS->castAs<ObjCObjectPointerType>()->getPointeeType();
12128 QualType RHSBaseQT = RHS->castAs<ObjCObjectPointerType>()->getPointeeType();
12129 QualType ResQT = mergeObjCGCQualifiers(LHS: LHSBaseQT, RHS: RHSBaseQT);
12130 if (ResQT == LHSBaseQT)
12131 return LHS;
12132 if (ResQT == RHSBaseQT)
12133 return RHS;
12134 }
12135 return {};
12136}
12137
12138//===----------------------------------------------------------------------===//
12139// Integer Predicates
12140//===----------------------------------------------------------------------===//
12141
12142unsigned ASTContext::getIntWidth(QualType T) const {
12143 if (const auto *ED = T->getAsEnumDecl())
12144 T = ED->getIntegerType();
12145 if (T->isBooleanType())
12146 return 1;
12147 if (const auto *EIT = T->getAs<BitIntType>())
12148 return EIT->getNumBits();
12149 // For builtin types, just use the standard type sizing method
12150 return (unsigned)getTypeSize(T);
12151}
12152
12153QualType ASTContext::getCorrespondingUnsignedType(QualType T) const {
12154 assert((T->hasIntegerRepresentation() || T->isEnumeralType() ||
12155 T->isFixedPointType()) &&
12156 "Unexpected type");
12157
12158 // Turn <4 x signed int> -> <4 x unsigned int>
12159 if (const auto *VTy = T->getAs<VectorType>())
12160 return getVectorType(vecType: getCorrespondingUnsignedType(T: VTy->getElementType()),
12161 NumElts: VTy->getNumElements(), VecKind: VTy->getVectorKind());
12162
12163 // For _BitInt, return an unsigned _BitInt with same width.
12164 if (const auto *EITy = T->getAs<BitIntType>())
12165 return getBitIntType(/*Unsigned=*/IsUnsigned: true, NumBits: EITy->getNumBits());
12166
12167 // For enums, get the underlying integer type of the enum, and let the general
12168 // integer type signchanging code handle it.
12169 if (const auto *ED = T->getAsEnumDecl())
12170 T = ED->getIntegerType();
12171
12172 switch (T->castAs<BuiltinType>()->getKind()) {
12173 case BuiltinType::Char_U:
12174 // Plain `char` is mapped to `unsigned char` even if it's already unsigned
12175 case BuiltinType::Char_S:
12176 case BuiltinType::SChar:
12177 case BuiltinType::Char8:
12178 return UnsignedCharTy;
12179 case BuiltinType::Short:
12180 return UnsignedShortTy;
12181 case BuiltinType::Int:
12182 return UnsignedIntTy;
12183 case BuiltinType::Long:
12184 return UnsignedLongTy;
12185 case BuiltinType::LongLong:
12186 return UnsignedLongLongTy;
12187 case BuiltinType::Int128:
12188 return UnsignedInt128Ty;
12189 // wchar_t is special. It is either signed or not, but when it's signed,
12190 // there's no matching "unsigned wchar_t". Therefore we return the unsigned
12191 // version of its underlying type instead.
12192 case BuiltinType::WChar_S:
12193 return getUnsignedWCharType();
12194
12195 case BuiltinType::ShortAccum:
12196 return UnsignedShortAccumTy;
12197 case BuiltinType::Accum:
12198 return UnsignedAccumTy;
12199 case BuiltinType::LongAccum:
12200 return UnsignedLongAccumTy;
12201 case BuiltinType::SatShortAccum:
12202 return SatUnsignedShortAccumTy;
12203 case BuiltinType::SatAccum:
12204 return SatUnsignedAccumTy;
12205 case BuiltinType::SatLongAccum:
12206 return SatUnsignedLongAccumTy;
12207 case BuiltinType::ShortFract:
12208 return UnsignedShortFractTy;
12209 case BuiltinType::Fract:
12210 return UnsignedFractTy;
12211 case BuiltinType::LongFract:
12212 return UnsignedLongFractTy;
12213 case BuiltinType::SatShortFract:
12214 return SatUnsignedShortFractTy;
12215 case BuiltinType::SatFract:
12216 return SatUnsignedFractTy;
12217 case BuiltinType::SatLongFract:
12218 return SatUnsignedLongFractTy;
12219 default:
12220 assert((T->hasUnsignedIntegerRepresentation() ||
12221 T->isUnsignedFixedPointType()) &&
12222 "Unexpected signed integer or fixed point type");
12223 return T;
12224 }
12225}
12226
12227QualType ASTContext::getCorrespondingSignedType(QualType T) const {
12228 assert((T->hasIntegerRepresentation() || T->isEnumeralType() ||
12229 T->isFixedPointType()) &&
12230 "Unexpected type");
12231
12232 // Turn <4 x unsigned int> -> <4 x signed int>
12233 if (const auto *VTy = T->getAs<VectorType>())
12234 return getVectorType(vecType: getCorrespondingSignedType(T: VTy->getElementType()),
12235 NumElts: VTy->getNumElements(), VecKind: VTy->getVectorKind());
12236
12237 // For _BitInt, return a signed _BitInt with same width.
12238 if (const auto *EITy = T->getAs<BitIntType>())
12239 return getBitIntType(/*Unsigned=*/IsUnsigned: false, NumBits: EITy->getNumBits());
12240
12241 // For enums, get the underlying integer type of the enum, and let the general
12242 // integer type signchanging code handle it.
12243 if (const auto *ED = T->getAsEnumDecl())
12244 T = ED->getIntegerType();
12245
12246 switch (T->castAs<BuiltinType>()->getKind()) {
12247 case BuiltinType::Char_S:
12248 // Plain `char` is mapped to `signed char` even if it's already signed
12249 case BuiltinType::Char_U:
12250 case BuiltinType::UChar:
12251 case BuiltinType::Char8:
12252 return SignedCharTy;
12253 case BuiltinType::UShort:
12254 return ShortTy;
12255 case BuiltinType::UInt:
12256 return IntTy;
12257 case BuiltinType::ULong:
12258 return LongTy;
12259 case BuiltinType::ULongLong:
12260 return LongLongTy;
12261 case BuiltinType::UInt128:
12262 return Int128Ty;
12263 // wchar_t is special. It is either unsigned or not, but when it's unsigned,
12264 // there's no matching "signed wchar_t". Therefore we return the signed
12265 // version of its underlying type instead.
12266 case BuiltinType::WChar_U:
12267 return getSignedWCharType();
12268
12269 case BuiltinType::UShortAccum:
12270 return ShortAccumTy;
12271 case BuiltinType::UAccum:
12272 return AccumTy;
12273 case BuiltinType::ULongAccum:
12274 return LongAccumTy;
12275 case BuiltinType::SatUShortAccum:
12276 return SatShortAccumTy;
12277 case BuiltinType::SatUAccum:
12278 return SatAccumTy;
12279 case BuiltinType::SatULongAccum:
12280 return SatLongAccumTy;
12281 case BuiltinType::UShortFract:
12282 return ShortFractTy;
12283 case BuiltinType::UFract:
12284 return FractTy;
12285 case BuiltinType::ULongFract:
12286 return LongFractTy;
12287 case BuiltinType::SatUShortFract:
12288 return SatShortFractTy;
12289 case BuiltinType::SatUFract:
12290 return SatFractTy;
12291 case BuiltinType::SatULongFract:
12292 return SatLongFractTy;
12293 default:
12294 assert(
12295 (T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) &&
12296 "Unexpected signed integer or fixed point type");
12297 return T;
12298 }
12299}
12300
12301ASTMutationListener::~ASTMutationListener() = default;
12302
12303void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD,
12304 QualType ReturnType) {}
12305
12306//===----------------------------------------------------------------------===//
12307// Builtin Type Computation
12308//===----------------------------------------------------------------------===//
12309
12310/// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the
12311/// pointer over the consumed characters. This returns the resultant type. If
12312/// AllowTypeModifiers is false then modifier like * are not parsed, just basic
12313/// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of
12314/// a vector of "i*".
12315///
12316/// RequiresICE is filled in on return to indicate whether the value is required
12317/// to be an Integer Constant Expression.
12318static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
12319 ASTContext::GetBuiltinTypeError &Error,
12320 bool &RequiresICE,
12321 bool AllowTypeModifiers) {
12322 // Modifiers.
12323 int HowLong = 0;
12324 bool Signed = false, Unsigned = false;
12325 RequiresICE = false;
12326
12327 // Read the prefixed modifiers first.
12328 bool Done = false;
12329 #ifndef NDEBUG
12330 bool IsSpecial = false;
12331 #endif
12332 while (!Done) {
12333 switch (*Str++) {
12334 default: Done = true; --Str; break;
12335 case 'I':
12336 RequiresICE = true;
12337 break;
12338 case 'S':
12339 assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!");
12340 assert(!Signed && "Can't use 'S' modifier multiple times!");
12341 Signed = true;
12342 break;
12343 case 'U':
12344 assert(!Signed && "Can't use both 'S' and 'U' modifiers!");
12345 assert(!Unsigned && "Can't use 'U' modifier multiple times!");
12346 Unsigned = true;
12347 break;
12348 case 'L':
12349 assert(!IsSpecial && "Can't use 'L' with 'W', 'N', 'Z' or 'O' modifiers");
12350 assert(HowLong <= 2 && "Can't have LLLL modifier");
12351 ++HowLong;
12352 break;
12353 case 'N':
12354 // 'N' behaves like 'L' for all non LP64 targets and 'int' otherwise.
12355 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
12356 assert(HowLong == 0 && "Can't use both 'L' and 'N' modifiers!");
12357 #ifndef NDEBUG
12358 IsSpecial = true;
12359 #endif
12360 if (Context.getTargetInfo().getLongWidth() == 32)
12361 ++HowLong;
12362 break;
12363 case 'W':
12364 // This modifier represents int64 type.
12365 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
12366 assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!");
12367 #ifndef NDEBUG
12368 IsSpecial = true;
12369 #endif
12370 switch (Context.getTargetInfo().getInt64Type()) {
12371 default:
12372 llvm_unreachable("Unexpected integer type");
12373 case TargetInfo::SignedLong:
12374 HowLong = 1;
12375 break;
12376 case TargetInfo::SignedLongLong:
12377 HowLong = 2;
12378 break;
12379 }
12380 break;
12381 case 'Z':
12382 // This modifier represents int32 type.
12383 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
12384 assert(HowLong == 0 && "Can't use both 'L' and 'Z' modifiers!");
12385 #ifndef NDEBUG
12386 IsSpecial = true;
12387 #endif
12388 switch (Context.getTargetInfo().getIntTypeByWidth(BitWidth: 32, IsSigned: true)) {
12389 default:
12390 llvm_unreachable("Unexpected integer type");
12391 case TargetInfo::SignedInt:
12392 HowLong = 0;
12393 break;
12394 case TargetInfo::SignedLong:
12395 HowLong = 1;
12396 break;
12397 case TargetInfo::SignedLongLong:
12398 HowLong = 2;
12399 break;
12400 }
12401 break;
12402 case 'O':
12403 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
12404 assert(HowLong == 0 && "Can't use both 'L' and 'O' modifiers!");
12405 #ifndef NDEBUG
12406 IsSpecial = true;
12407 #endif
12408 if (Context.getLangOpts().OpenCL)
12409 HowLong = 1;
12410 else
12411 HowLong = 2;
12412 break;
12413 }
12414 }
12415
12416 QualType Type;
12417
12418 // Read the base type.
12419 switch (*Str++) {
12420 default:
12421 llvm_unreachable("Unknown builtin type letter!");
12422 case 'x':
12423 assert(HowLong == 0 && !Signed && !Unsigned &&
12424 "Bad modifiers used with 'x'!");
12425 Type = Context.Float16Ty;
12426 break;
12427 case 'y':
12428 assert(HowLong == 0 && !Signed && !Unsigned &&
12429 "Bad modifiers used with 'y'!");
12430 Type = Context.BFloat16Ty;
12431 break;
12432 case 'v':
12433 assert(HowLong == 0 && !Signed && !Unsigned &&
12434 "Bad modifiers used with 'v'!");
12435 Type = Context.VoidTy;
12436 break;
12437 case 'h':
12438 assert(HowLong == 0 && !Signed && !Unsigned &&
12439 "Bad modifiers used with 'h'!");
12440 Type = Context.HalfTy;
12441 break;
12442 case 'f':
12443 assert(HowLong == 0 && !Signed && !Unsigned &&
12444 "Bad modifiers used with 'f'!");
12445 Type = Context.FloatTy;
12446 break;
12447 case 'd':
12448 assert(HowLong < 3 && !Signed && !Unsigned &&
12449 "Bad modifiers used with 'd'!");
12450 if (HowLong == 1)
12451 Type = Context.LongDoubleTy;
12452 else if (HowLong == 2)
12453 Type = Context.Float128Ty;
12454 else
12455 Type = Context.DoubleTy;
12456 break;
12457 case 's':
12458 assert(HowLong == 0 && "Bad modifiers used with 's'!");
12459 if (Unsigned)
12460 Type = Context.UnsignedShortTy;
12461 else
12462 Type = Context.ShortTy;
12463 break;
12464 case 'i':
12465 if (HowLong == 3)
12466 Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty;
12467 else if (HowLong == 2)
12468 Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy;
12469 else if (HowLong == 1)
12470 Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy;
12471 else
12472 Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy;
12473 break;
12474 case 'c':
12475 assert(HowLong == 0 && "Bad modifiers used with 'c'!");
12476 if (Signed)
12477 Type = Context.SignedCharTy;
12478 else if (Unsigned)
12479 Type = Context.UnsignedCharTy;
12480 else
12481 Type = Context.CharTy;
12482 break;
12483 case 'b': // boolean
12484 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!");
12485 Type = Context.BoolTy;
12486 break;
12487 case 'z': // size_t.
12488 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!");
12489 Type = Context.getSizeType();
12490 break;
12491 case 'w': // wchar_t.
12492 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'w'!");
12493 Type = Context.getWideCharType();
12494 break;
12495 case 'F':
12496 Type = Context.getCFConstantStringType();
12497 break;
12498 case 'G':
12499 Type = Context.getObjCIdType();
12500 break;
12501 case 'H':
12502 Type = Context.getObjCSelType();
12503 break;
12504 case 'M':
12505 Type = Context.getObjCSuperType();
12506 break;
12507 case 'a':
12508 Type = Context.getBuiltinVaListType();
12509 assert(!Type.isNull() && "builtin va list type not initialized!");
12510 break;
12511 case 'A':
12512 // This is a "reference" to a va_list; however, what exactly
12513 // this means depends on how va_list is defined. There are two
12514 // different kinds of va_list: ones passed by value, and ones
12515 // passed by reference. An example of a by-value va_list is
12516 // x86, where va_list is a char*. An example of by-ref va_list
12517 // is x86-64, where va_list is a __va_list_tag[1]. For x86,
12518 // we want this argument to be a char*&; for x86-64, we want
12519 // it to be a __va_list_tag*.
12520 Type = Context.getBuiltinVaListType();
12521 assert(!Type.isNull() && "builtin va list type not initialized!");
12522 if (Type->isArrayType())
12523 Type = Context.getArrayDecayedType(Ty: Type);
12524 else
12525 Type = Context.getLValueReferenceType(T: Type);
12526 break;
12527 case 'q': {
12528 char *End;
12529 unsigned NumElements = strtoul(nptr: Str, endptr: &End, base: 10);
12530 assert(End != Str && "Missing vector size");
12531 Str = End;
12532
12533 QualType ElementType = DecodeTypeFromStr(Str, Context, Error,
12534 RequiresICE, AllowTypeModifiers: false);
12535 assert(!RequiresICE && "Can't require vector ICE");
12536
12537 Type = Context.getScalableVectorType(EltTy: ElementType, NumElts: NumElements);
12538 break;
12539 }
12540 case 'Q': {
12541 switch (*Str++) {
12542 case 'a': {
12543 Type = Context.SveCountTy;
12544 break;
12545 }
12546 case 'b': {
12547 Type = Context.AMDGPUBufferRsrcTy;
12548 break;
12549 }
12550 case 't': {
12551 Type = Context.AMDGPUTextureTy;
12552 break;
12553 }
12554 case 'r': {
12555 Type = Context.HLSLResourceTy;
12556 break;
12557 }
12558 default:
12559 llvm_unreachable("Unexpected target builtin type");
12560 }
12561 break;
12562 }
12563 case 'V': {
12564 char *End;
12565 unsigned NumElements = strtoul(nptr: Str, endptr: &End, base: 10);
12566 assert(End != Str && "Missing vector size");
12567 Str = End;
12568
12569 QualType ElementType = DecodeTypeFromStr(Str, Context, Error,
12570 RequiresICE, AllowTypeModifiers: false);
12571 assert(!RequiresICE && "Can't require vector ICE");
12572
12573 // TODO: No way to make AltiVec vectors in builtins yet.
12574 Type = Context.getVectorType(vecType: ElementType, NumElts: NumElements, VecKind: VectorKind::Generic);
12575 break;
12576 }
12577 case 'E': {
12578 char *End;
12579
12580 unsigned NumElements = strtoul(nptr: Str, endptr: &End, base: 10);
12581 assert(End != Str && "Missing vector size");
12582
12583 Str = End;
12584
12585 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE,
12586 AllowTypeModifiers: false);
12587 Type = Context.getExtVectorType(vecType: ElementType, NumElts: NumElements);
12588 break;
12589 }
12590 case 'X': {
12591 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE,
12592 AllowTypeModifiers: false);
12593 assert(!RequiresICE && "Can't require complex ICE");
12594 Type = Context.getComplexType(T: ElementType);
12595 break;
12596 }
12597 case 'Y':
12598 Type = Context.getPointerDiffType();
12599 break;
12600 case 'P':
12601 Type = Context.getFILEType();
12602 if (Type.isNull()) {
12603 Error = ASTContext::GE_Missing_stdio;
12604 return {};
12605 }
12606 break;
12607 case 'J':
12608 if (Signed)
12609 Type = Context.getsigjmp_bufType();
12610 else
12611 Type = Context.getjmp_bufType();
12612
12613 if (Type.isNull()) {
12614 Error = ASTContext::GE_Missing_setjmp;
12615 return {};
12616 }
12617 break;
12618 case 'K':
12619 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!");
12620 Type = Context.getucontext_tType();
12621
12622 if (Type.isNull()) {
12623 Error = ASTContext::GE_Missing_ucontext;
12624 return {};
12625 }
12626 break;
12627 case 'p':
12628 Type = Context.getProcessIDType();
12629 break;
12630 case 'm':
12631 Type = Context.MFloat8Ty;
12632 break;
12633 }
12634
12635 // If there are modifiers and if we're allowed to parse them, go for it.
12636 Done = !AllowTypeModifiers;
12637 while (!Done) {
12638 switch (char c = *Str++) {
12639 default: Done = true; --Str; break;
12640 case '*':
12641 case '&': {
12642 // Both pointers and references can have their pointee types
12643 // qualified with an address space.
12644 char *End;
12645 unsigned AddrSpace = strtoul(nptr: Str, endptr: &End, base: 10);
12646 if (End != Str) {
12647 // Note AddrSpace == 0 is not the same as an unspecified address space.
12648 Type = Context.getAddrSpaceQualType(
12649 T: Type,
12650 AddressSpace: Context.getLangASForBuiltinAddressSpace(AS: AddrSpace));
12651 Str = End;
12652 }
12653 if (c == '*')
12654 Type = Context.getPointerType(T: Type);
12655 else
12656 Type = Context.getLValueReferenceType(T: Type);
12657 break;
12658 }
12659 // FIXME: There's no way to have a built-in with an rvalue ref arg.
12660 case 'C':
12661 Type = Type.withConst();
12662 break;
12663 case 'D':
12664 Type = Context.getVolatileType(T: Type);
12665 break;
12666 case 'R':
12667 Type = Type.withRestrict();
12668 break;
12669 }
12670 }
12671
12672 assert((!RequiresICE || Type->isIntegralOrEnumerationType()) &&
12673 "Integer constant 'I' type must be an integer");
12674
12675 return Type;
12676}
12677
12678// On some targets such as PowerPC, some of the builtins are defined with custom
12679// type descriptors for target-dependent types. These descriptors are decoded in
12680// other functions, but it may be useful to be able to fall back to default
12681// descriptor decoding to define builtins mixing target-dependent and target-
12682// independent types. This function allows decoding one type descriptor with
12683// default decoding.
12684QualType ASTContext::DecodeTypeStr(const char *&Str, const ASTContext &Context,
12685 GetBuiltinTypeError &Error, bool &RequireICE,
12686 bool AllowTypeModifiers) const {
12687 return DecodeTypeFromStr(Str, Context, Error, RequiresICE&: RequireICE, AllowTypeModifiers);
12688}
12689
12690/// GetBuiltinType - Return the type for the specified builtin.
12691QualType ASTContext::GetBuiltinType(unsigned Id,
12692 GetBuiltinTypeError &Error,
12693 unsigned *IntegerConstantArgs) const {
12694 const char *TypeStr = BuiltinInfo.getTypeString(ID: Id);
12695 if (TypeStr[0] == '\0') {
12696 Error = GE_Missing_type;
12697 return {};
12698 }
12699
12700 SmallVector<QualType, 8> ArgTypes;
12701
12702 bool RequiresICE = false;
12703 Error = GE_None;
12704 QualType ResType = DecodeTypeFromStr(Str&: TypeStr, Context: *this, Error,
12705 RequiresICE, AllowTypeModifiers: true);
12706 if (Error != GE_None)
12707 return {};
12708
12709 assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE");
12710
12711 while (TypeStr[0] && TypeStr[0] != '.') {
12712 QualType Ty = DecodeTypeFromStr(Str&: TypeStr, Context: *this, Error, RequiresICE, AllowTypeModifiers: true);
12713 if (Error != GE_None)
12714 return {};
12715
12716 // If this argument is required to be an IntegerConstantExpression and the
12717 // caller cares, fill in the bitmask we return.
12718 if (RequiresICE && IntegerConstantArgs)
12719 *IntegerConstantArgs |= 1 << ArgTypes.size();
12720
12721 // Do array -> pointer decay. The builtin should use the decayed type.
12722 if (Ty->isArrayType())
12723 Ty = getArrayDecayedType(Ty);
12724
12725 ArgTypes.push_back(Elt: Ty);
12726 }
12727
12728 if (Id == Builtin::BI__GetExceptionInfo)
12729 return {};
12730
12731 assert((TypeStr[0] != '.' || TypeStr[1] == 0) &&
12732 "'.' should only occur at end of builtin type list!");
12733
12734 bool Variadic = (TypeStr[0] == '.');
12735
12736 FunctionType::ExtInfo EI(Target->getDefaultCallingConv());
12737 if (BuiltinInfo.isNoReturn(ID: Id))
12738 EI = EI.withNoReturn(noReturn: true);
12739
12740 // We really shouldn't be making a no-proto type here.
12741 if (ArgTypes.empty() && Variadic && !getLangOpts().requiresStrictPrototypes())
12742 return getFunctionNoProtoType(ResultTy: ResType, Info: EI);
12743
12744 FunctionProtoType::ExtProtoInfo EPI;
12745 EPI.ExtInfo = EI;
12746 EPI.Variadic = Variadic;
12747 if (getLangOpts().CPlusPlus && BuiltinInfo.isNoThrow(ID: Id))
12748 EPI.ExceptionSpec.Type =
12749 getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone;
12750
12751 return getFunctionType(ResultTy: ResType, Args: ArgTypes, EPI);
12752}
12753
12754static GVALinkage basicGVALinkageForFunction(const ASTContext &Context,
12755 const FunctionDecl *FD) {
12756 if (!FD->isExternallyVisible())
12757 return GVA_Internal;
12758
12759 // Non-user-provided functions get emitted as weak definitions with every
12760 // use, no matter whether they've been explicitly instantiated etc.
12761 if (!FD->isUserProvided())
12762 return GVA_DiscardableODR;
12763
12764 GVALinkage External;
12765 switch (FD->getTemplateSpecializationKind()) {
12766 case TSK_Undeclared:
12767 case TSK_ExplicitSpecialization:
12768 External = GVA_StrongExternal;
12769 break;
12770
12771 case TSK_ExplicitInstantiationDefinition:
12772 return GVA_StrongODR;
12773
12774 // C++11 [temp.explicit]p10:
12775 // [ Note: The intent is that an inline function that is the subject of
12776 // an explicit instantiation declaration will still be implicitly
12777 // instantiated when used so that the body can be considered for
12778 // inlining, but that no out-of-line copy of the inline function would be
12779 // generated in the translation unit. -- end note ]
12780 case TSK_ExplicitInstantiationDeclaration:
12781 return GVA_AvailableExternally;
12782
12783 case TSK_ImplicitInstantiation:
12784 External = GVA_DiscardableODR;
12785 break;
12786 }
12787
12788 if (!FD->isInlined())
12789 return External;
12790
12791 if ((!Context.getLangOpts().CPlusPlus &&
12792 !Context.getTargetInfo().getCXXABI().isMicrosoft() &&
12793 !FD->hasAttr<DLLExportAttr>()) ||
12794 FD->hasAttr<GNUInlineAttr>()) {
12795 // FIXME: This doesn't match gcc's behavior for dllexport inline functions.
12796
12797 // GNU or C99 inline semantics. Determine whether this symbol should be
12798 // externally visible.
12799 if (FD->isInlineDefinitionExternallyVisible())
12800 return External;
12801
12802 // C99 inline semantics, where the symbol is not externally visible.
12803 return GVA_AvailableExternally;
12804 }
12805
12806 // Functions specified with extern and inline in -fms-compatibility mode
12807 // forcibly get emitted. While the body of the function cannot be later
12808 // replaced, the function definition cannot be discarded.
12809 if (FD->isMSExternInline())
12810 return GVA_StrongODR;
12811
12812 if (Context.getTargetInfo().getCXXABI().isMicrosoft() &&
12813 isa<CXXConstructorDecl>(Val: FD) &&
12814 cast<CXXConstructorDecl>(Val: FD)->isInheritingConstructor())
12815 // Our approach to inheriting constructors is fundamentally different from
12816 // that used by the MS ABI, so keep our inheriting constructor thunks
12817 // internal rather than trying to pick an unambiguous mangling for them.
12818 return GVA_Internal;
12819
12820 return GVA_DiscardableODR;
12821}
12822
12823static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context,
12824 const Decl *D, GVALinkage L) {
12825 // See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx
12826 // dllexport/dllimport on inline functions.
12827 if (D->hasAttr<DLLImportAttr>()) {
12828 if (L == GVA_DiscardableODR || L == GVA_StrongODR)
12829 return GVA_AvailableExternally;
12830 } else if (D->hasAttr<DLLExportAttr>()) {
12831 if (L == GVA_DiscardableODR)
12832 return GVA_StrongODR;
12833 } else if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice) {
12834 // Device-side functions with __global__ attribute must always be
12835 // visible externally so they can be launched from host.
12836 if (D->hasAttr<CUDAGlobalAttr>() &&
12837 (L == GVA_DiscardableODR || L == GVA_Internal))
12838 return GVA_StrongODR;
12839 // Single source offloading languages like CUDA/HIP need to be able to
12840 // access static device variables from host code of the same compilation
12841 // unit. This is done by externalizing the static variable with a shared
12842 // name between the host and device compilation which is the same for the
12843 // same compilation unit whereas different among different compilation
12844 // units.
12845 if (Context.shouldExternalize(D))
12846 return GVA_StrongExternal;
12847 }
12848 return L;
12849}
12850
12851/// Adjust the GVALinkage for a declaration based on what an external AST source
12852/// knows about whether there can be other definitions of this declaration.
12853static GVALinkage
12854adjustGVALinkageForExternalDefinitionKind(const ASTContext &Ctx, const Decl *D,
12855 GVALinkage L) {
12856 ExternalASTSource *Source = Ctx.getExternalSource();
12857 if (!Source)
12858 return L;
12859
12860 switch (Source->hasExternalDefinitions(D)) {
12861 case ExternalASTSource::EK_Never:
12862 // Other translation units rely on us to provide the definition.
12863 if (L == GVA_DiscardableODR)
12864 return GVA_StrongODR;
12865 break;
12866
12867 case ExternalASTSource::EK_Always:
12868 return GVA_AvailableExternally;
12869
12870 case ExternalASTSource::EK_ReplyHazy:
12871 break;
12872 }
12873 return L;
12874}
12875
12876GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const {
12877 return adjustGVALinkageForExternalDefinitionKind(Ctx: *this, D: FD,
12878 L: adjustGVALinkageForAttributes(Context: *this, D: FD,
12879 L: basicGVALinkageForFunction(Context: *this, FD)));
12880}
12881
12882static GVALinkage basicGVALinkageForVariable(const ASTContext &Context,
12883 const VarDecl *VD) {
12884 // As an extension for interactive REPLs, make sure constant variables are
12885 // only emitted once instead of LinkageComputer::getLVForNamespaceScopeDecl
12886 // marking them as internal.
12887 if (Context.getLangOpts().CPlusPlus &&
12888 Context.getLangOpts().IncrementalExtensions &&
12889 VD->getType().isConstQualified() &&
12890 !VD->getType().isVolatileQualified() && !VD->isInline() &&
12891 !isa<VarTemplateSpecializationDecl>(Val: VD) && !VD->getDescribedVarTemplate())
12892 return GVA_DiscardableODR;
12893
12894 if (!VD->isExternallyVisible())
12895 return GVA_Internal;
12896
12897 if (VD->isStaticLocal()) {
12898 const DeclContext *LexicalContext = VD->getParentFunctionOrMethod();
12899 while (LexicalContext && !isa<FunctionDecl>(Val: LexicalContext))
12900 LexicalContext = LexicalContext->getLexicalParent();
12901
12902 // ObjC Blocks can create local variables that don't have a FunctionDecl
12903 // LexicalContext.
12904 if (!LexicalContext)
12905 return GVA_DiscardableODR;
12906
12907 // Otherwise, let the static local variable inherit its linkage from the
12908 // nearest enclosing function.
12909 auto StaticLocalLinkage =
12910 Context.GetGVALinkageForFunction(FD: cast<FunctionDecl>(Val: LexicalContext));
12911
12912 // Itanium ABI 5.2.2: "Each COMDAT group [for a static local variable] must
12913 // be emitted in any object with references to the symbol for the object it
12914 // contains, whether inline or out-of-line."
12915 // Similar behavior is observed with MSVC. An alternative ABI could use
12916 // StrongODR/AvailableExternally to match the function, but none are
12917 // known/supported currently.
12918 if (StaticLocalLinkage == GVA_StrongODR ||
12919 StaticLocalLinkage == GVA_AvailableExternally)
12920 return GVA_DiscardableODR;
12921 return StaticLocalLinkage;
12922 }
12923
12924 // MSVC treats in-class initialized static data members as definitions.
12925 // By giving them non-strong linkage, out-of-line definitions won't
12926 // cause link errors.
12927 if (Context.isMSStaticDataMemberInlineDefinition(VD))
12928 return GVA_DiscardableODR;
12929
12930 // Most non-template variables have strong linkage; inline variables are
12931 // linkonce_odr or (occasionally, for compatibility) weak_odr.
12932 GVALinkage StrongLinkage;
12933 switch (Context.getInlineVariableDefinitionKind(VD)) {
12934 case ASTContext::InlineVariableDefinitionKind::None:
12935 StrongLinkage = GVA_StrongExternal;
12936 break;
12937 case ASTContext::InlineVariableDefinitionKind::Weak:
12938 case ASTContext::InlineVariableDefinitionKind::WeakUnknown:
12939 StrongLinkage = GVA_DiscardableODR;
12940 break;
12941 case ASTContext::InlineVariableDefinitionKind::Strong:
12942 StrongLinkage = GVA_StrongODR;
12943 break;
12944 }
12945
12946 switch (VD->getTemplateSpecializationKind()) {
12947 case TSK_Undeclared:
12948 return StrongLinkage;
12949
12950 case TSK_ExplicitSpecialization:
12951 return Context.getTargetInfo().getCXXABI().isMicrosoft() &&
12952 VD->isStaticDataMember()
12953 ? GVA_StrongODR
12954 : StrongLinkage;
12955
12956 case TSK_ExplicitInstantiationDefinition:
12957 return GVA_StrongODR;
12958
12959 case TSK_ExplicitInstantiationDeclaration:
12960 return GVA_AvailableExternally;
12961
12962 case TSK_ImplicitInstantiation:
12963 return GVA_DiscardableODR;
12964 }
12965
12966 llvm_unreachable("Invalid Linkage!");
12967}
12968
12969GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) const {
12970 return adjustGVALinkageForExternalDefinitionKind(Ctx: *this, D: VD,
12971 L: adjustGVALinkageForAttributes(Context: *this, D: VD,
12972 L: basicGVALinkageForVariable(Context: *this, VD)));
12973}
12974
12975bool ASTContext::DeclMustBeEmitted(const Decl *D) {
12976 if (const auto *VD = dyn_cast<VarDecl>(Val: D)) {
12977 if (!VD->isFileVarDecl())
12978 return false;
12979 // Global named register variables (GNU extension) are never emitted.
12980 if (VD->getStorageClass() == SC_Register)
12981 return false;
12982 if (VD->getDescribedVarTemplate() ||
12983 isa<VarTemplatePartialSpecializationDecl>(Val: VD))
12984 return false;
12985 } else if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
12986 // We never need to emit an uninstantiated function template.
12987 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate)
12988 return false;
12989 } else if (isa<PragmaCommentDecl>(Val: D))
12990 return true;
12991 else if (isa<PragmaDetectMismatchDecl>(Val: D))
12992 return true;
12993 else if (isa<OMPRequiresDecl>(Val: D))
12994 return true;
12995 else if (isa<OMPThreadPrivateDecl>(Val: D))
12996 return !D->getDeclContext()->isDependentContext();
12997 else if (isa<OMPAllocateDecl>(Val: D))
12998 return !D->getDeclContext()->isDependentContext();
12999 else if (isa<OMPDeclareReductionDecl>(Val: D) || isa<OMPDeclareMapperDecl>(Val: D))
13000 return !D->getDeclContext()->isDependentContext();
13001 else if (isa<ImportDecl>(Val: D))
13002 return true;
13003 else
13004 return false;
13005
13006 // If this is a member of a class template, we do not need to emit it.
13007 if (D->getDeclContext()->isDependentContext())
13008 return false;
13009
13010 // Weak references don't produce any output by themselves.
13011 if (D->hasAttr<WeakRefAttr>())
13012 return false;
13013
13014 // SYCL device compilation requires that functions defined with the
13015 // sycl_kernel_entry_point or sycl_external attributes be emitted. All
13016 // other entities are emitted only if they are used by a function
13017 // defined with one of those attributes.
13018 if (LangOpts.SYCLIsDevice)
13019 return isa<FunctionDecl>(Val: D) && (D->hasAttr<SYCLKernelEntryPointAttr>() ||
13020 D->hasAttr<SYCLExternalAttr>());
13021
13022 // Aliases and used decls are required.
13023 if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>())
13024 return true;
13025
13026 if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
13027 // Forward declarations aren't required.
13028 if (!FD->doesThisDeclarationHaveABody())
13029 return FD->doesDeclarationForceExternallyVisibleDefinition();
13030
13031 // Constructors and destructors are required.
13032 if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>())
13033 return true;
13034
13035 // The key function for a class is required. This rule only comes
13036 // into play when inline functions can be key functions, though.
13037 if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) {
13038 if (const auto *MD = dyn_cast<CXXMethodDecl>(Val: FD)) {
13039 const CXXRecordDecl *RD = MD->getParent();
13040 if (MD->isOutOfLine() && RD->isDynamicClass()) {
13041 const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD);
13042 if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl())
13043 return true;
13044 }
13045 }
13046 }
13047
13048 GVALinkage Linkage = GetGVALinkageForFunction(FD);
13049
13050 // static, static inline, always_inline, and extern inline functions can
13051 // always be deferred. Normal inline functions can be deferred in C99/C++.
13052 // Implicit template instantiations can also be deferred in C++.
13053 return !isDiscardableGVALinkage(L: Linkage);
13054 }
13055
13056 const auto *VD = cast<VarDecl>(Val: D);
13057 assert(VD->isFileVarDecl() && "Expected file scoped var");
13058
13059 // If the decl is marked as `declare target to`, it should be emitted for the
13060 // host and for the device.
13061 if (LangOpts.OpenMP &&
13062 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
13063 return true;
13064
13065 if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly &&
13066 !isMSStaticDataMemberInlineDefinition(VD))
13067 return false;
13068
13069 if (VD->shouldEmitInExternalSource())
13070 return false;
13071
13072 // Variables that can be needed in other TUs are required.
13073 auto Linkage = GetGVALinkageForVariable(VD);
13074 if (!isDiscardableGVALinkage(L: Linkage))
13075 return true;
13076
13077 // We never need to emit a variable that is available in another TU.
13078 if (Linkage == GVA_AvailableExternally)
13079 return false;
13080
13081 // Variables that have destruction with side-effects are required.
13082 if (VD->needsDestruction(Ctx: *this))
13083 return true;
13084
13085 // Variables that have initialization with side-effects are required.
13086 if (VD->hasInitWithSideEffects())
13087 return true;
13088
13089 // Likewise, variables with tuple-like bindings are required if their
13090 // bindings have side-effects.
13091 if (const auto *DD = dyn_cast<DecompositionDecl>(Val: VD)) {
13092 for (const auto *BD : DD->flat_bindings())
13093 if (const auto *BindingVD = BD->getHoldingVar())
13094 if (DeclMustBeEmitted(D: BindingVD))
13095 return true;
13096 }
13097
13098 return false;
13099}
13100
13101void ASTContext::forEachMultiversionedFunctionVersion(
13102 const FunctionDecl *FD,
13103 llvm::function_ref<void(FunctionDecl *)> Pred) const {
13104 assert(FD->isMultiVersion() && "Only valid for multiversioned functions");
13105 llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls;
13106 FD = FD->getMostRecentDecl();
13107 // FIXME: The order of traversal here matters and depends on the order of
13108 // lookup results, which happens to be (mostly) oldest-to-newest, but we
13109 // shouldn't rely on that.
13110 for (auto *CurDecl :
13111 FD->getDeclContext()->getRedeclContext()->lookup(Name: FD->getDeclName())) {
13112 FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl();
13113 if (CurFD && hasSameType(T1: CurFD->getType(), T2: FD->getType()) &&
13114 SeenDecls.insert(V: CurFD).second) {
13115 Pred(CurFD);
13116 }
13117 }
13118}
13119
13120CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic,
13121 bool IsCXXMethod) const {
13122 // Pass through to the C++ ABI object
13123 if (IsCXXMethod)
13124 return ABI->getDefaultMethodCallConv(isVariadic: IsVariadic);
13125
13126 switch (LangOpts.getDefaultCallingConv()) {
13127 case LangOptions::DCC_None:
13128 break;
13129 case LangOptions::DCC_CDecl:
13130 return CC_C;
13131 case LangOptions::DCC_FastCall:
13132 if (getTargetInfo().hasFeature(Feature: "sse2") && !IsVariadic)
13133 return CC_X86FastCall;
13134 break;
13135 case LangOptions::DCC_StdCall:
13136 if (!IsVariadic)
13137 return CC_X86StdCall;
13138 break;
13139 case LangOptions::DCC_VectorCall:
13140 // __vectorcall cannot be applied to variadic functions.
13141 if (!IsVariadic)
13142 return CC_X86VectorCall;
13143 break;
13144 case LangOptions::DCC_RegCall:
13145 // __regcall cannot be applied to variadic functions.
13146 if (!IsVariadic)
13147 return CC_X86RegCall;
13148 break;
13149 case LangOptions::DCC_RtdCall:
13150 if (!IsVariadic)
13151 return CC_M68kRTD;
13152 break;
13153 }
13154 return Target->getDefaultCallingConv();
13155}
13156
13157bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const {
13158 // Pass through to the C++ ABI object
13159 return ABI->isNearlyEmpty(RD);
13160}
13161
13162VTableContextBase *ASTContext::getVTableContext() {
13163 if (!VTContext) {
13164 auto ABI = Target->getCXXABI();
13165 if (ABI.isMicrosoft())
13166 VTContext.reset(p: new MicrosoftVTableContext(*this));
13167 else {
13168 auto ComponentLayout = getLangOpts().RelativeCXXABIVTables
13169 ? ItaniumVTableContext::Relative
13170 : ItaniumVTableContext::Pointer;
13171 VTContext.reset(p: new ItaniumVTableContext(*this, ComponentLayout));
13172 }
13173 }
13174 return VTContext.get();
13175}
13176
13177MangleContext *ASTContext::createMangleContext(const TargetInfo *T) {
13178 if (!T)
13179 T = Target;
13180 switch (T->getCXXABI().getKind()) {
13181 case TargetCXXABI::AppleARM64:
13182 case TargetCXXABI::Fuchsia:
13183 case TargetCXXABI::GenericAArch64:
13184 case TargetCXXABI::GenericItanium:
13185 case TargetCXXABI::GenericARM:
13186 case TargetCXXABI::GenericMIPS:
13187 case TargetCXXABI::iOS:
13188 case TargetCXXABI::WebAssembly:
13189 case TargetCXXABI::WatchOS:
13190 case TargetCXXABI::XL:
13191 return ItaniumMangleContext::create(Context&: *this, Diags&: getDiagnostics());
13192 case TargetCXXABI::Microsoft:
13193 return MicrosoftMangleContext::create(Context&: *this, Diags&: getDiagnostics());
13194 }
13195 llvm_unreachable("Unsupported ABI");
13196}
13197
13198MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) {
13199 assert(T.getCXXABI().getKind() != TargetCXXABI::Microsoft &&
13200 "Device mangle context does not support Microsoft mangling.");
13201 switch (T.getCXXABI().getKind()) {
13202 case TargetCXXABI::AppleARM64:
13203 case TargetCXXABI::Fuchsia:
13204 case TargetCXXABI::GenericAArch64:
13205 case TargetCXXABI::GenericItanium:
13206 case TargetCXXABI::GenericARM:
13207 case TargetCXXABI::GenericMIPS:
13208 case TargetCXXABI::iOS:
13209 case TargetCXXABI::WebAssembly:
13210 case TargetCXXABI::WatchOS:
13211 case TargetCXXABI::XL:
13212 return ItaniumMangleContext::create(
13213 Context&: *this, Diags&: getDiagnostics(),
13214 Discriminator: [](ASTContext &, const NamedDecl *ND) -> UnsignedOrNone {
13215 if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: ND))
13216 return RD->getDeviceLambdaManglingNumber();
13217 return std::nullopt;
13218 },
13219 /*IsAux=*/true);
13220 case TargetCXXABI::Microsoft:
13221 return MicrosoftMangleContext::create(Context&: *this, Diags&: getDiagnostics(),
13222 /*IsAux=*/true);
13223 }
13224 llvm_unreachable("Unsupported ABI");
13225}
13226
13227MangleContext *ASTContext::cudaNVInitDeviceMC() {
13228 // If the host and device have different C++ ABIs, mark it as the device
13229 // mangle context so that the mangling needs to retrieve the additional
13230 // device lambda mangling number instead of the regular host one.
13231 if (getAuxTargetInfo() && getTargetInfo().getCXXABI().isMicrosoft() &&
13232 getAuxTargetInfo()->getCXXABI().isItaniumFamily()) {
13233 return createDeviceMangleContext(T: *getAuxTargetInfo());
13234 }
13235
13236 return createMangleContext(T: getAuxTargetInfo());
13237}
13238
13239CXXABI::~CXXABI() = default;
13240
13241size_t ASTContext::getSideTableAllocatedMemory() const {
13242 return ASTRecordLayouts.getMemorySize() +
13243 llvm::capacity_in_bytes(X: ObjCLayouts) +
13244 llvm::capacity_in_bytes(X: KeyFunctions) +
13245 llvm::capacity_in_bytes(X: ObjCImpls) +
13246 llvm::capacity_in_bytes(X: BlockVarCopyInits) +
13247 llvm::capacity_in_bytes(X: DeclAttrs) +
13248 llvm::capacity_in_bytes(X: TemplateOrInstantiation) +
13249 llvm::capacity_in_bytes(X: InstantiatedFromUsingDecl) +
13250 llvm::capacity_in_bytes(X: InstantiatedFromUsingShadowDecl) +
13251 llvm::capacity_in_bytes(X: InstantiatedFromUnnamedFieldDecl) +
13252 llvm::capacity_in_bytes(X: OverriddenMethods) +
13253 llvm::capacity_in_bytes(X: Types) +
13254 llvm::capacity_in_bytes(x: VariableArrayTypes);
13255}
13256
13257/// getIntTypeForBitwidth -
13258/// sets integer QualTy according to specified details:
13259/// bitwidth, signed/unsigned.
13260/// Returns empty type if there is no appropriate target types.
13261QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth,
13262 unsigned Signed) const {
13263 TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(BitWidth: DestWidth, IsSigned: Signed);
13264 CanQualType QualTy = getFromTargetType(Type: Ty);
13265 if (!QualTy && DestWidth == 128)
13266 return Signed ? Int128Ty : UnsignedInt128Ty;
13267 return QualTy;
13268}
13269
13270/// getRealTypeForBitwidth -
13271/// sets floating point QualTy according to specified bitwidth.
13272/// Returns empty type if there is no appropriate target types.
13273QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth,
13274 FloatModeKind ExplicitType) const {
13275 FloatModeKind Ty =
13276 getTargetInfo().getRealTypeByWidth(BitWidth: DestWidth, ExplicitType);
13277 switch (Ty) {
13278 case FloatModeKind::Half:
13279 return HalfTy;
13280 case FloatModeKind::Float:
13281 return FloatTy;
13282 case FloatModeKind::Double:
13283 return DoubleTy;
13284 case FloatModeKind::LongDouble:
13285 return LongDoubleTy;
13286 case FloatModeKind::Float128:
13287 return Float128Ty;
13288 case FloatModeKind::Ibm128:
13289 return Ibm128Ty;
13290 case FloatModeKind::NoFloat:
13291 return {};
13292 }
13293
13294 llvm_unreachable("Unhandled TargetInfo::RealType value");
13295}
13296
13297void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) {
13298 if (Number <= 1)
13299 return;
13300
13301 MangleNumbers[ND] = Number;
13302
13303 if (Listener)
13304 Listener->AddedManglingNumber(D: ND, Number);
13305}
13306
13307unsigned ASTContext::getManglingNumber(const NamedDecl *ND,
13308 bool ForAuxTarget) const {
13309 auto I = MangleNumbers.find(Key: ND);
13310 unsigned Res = I != MangleNumbers.end() ? I->second : 1;
13311 // CUDA/HIP host compilation encodes host and device mangling numbers
13312 // as lower and upper half of 32 bit integer.
13313 if (LangOpts.CUDA && !LangOpts.CUDAIsDevice) {
13314 Res = ForAuxTarget ? Res >> 16 : Res & 0xFFFF;
13315 } else {
13316 assert(!ForAuxTarget && "Only CUDA/HIP host compilation supports mangling "
13317 "number for aux target");
13318 }
13319 return Res > 1 ? Res : 1;
13320}
13321
13322void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) {
13323 if (Number <= 1)
13324 return;
13325
13326 StaticLocalNumbers[VD] = Number;
13327
13328 if (Listener)
13329 Listener->AddedStaticLocalNumbers(D: VD, Number);
13330}
13331
13332unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const {
13333 auto I = StaticLocalNumbers.find(Key: VD);
13334 return I != StaticLocalNumbers.end() ? I->second : 1;
13335}
13336
13337void ASTContext::setIsDestroyingOperatorDelete(const FunctionDecl *FD,
13338 bool IsDestroying) {
13339 if (!IsDestroying) {
13340 assert(!DestroyingOperatorDeletes.contains(FD->getCanonicalDecl()));
13341 return;
13342 }
13343 DestroyingOperatorDeletes.insert(V: FD->getCanonicalDecl());
13344}
13345
13346bool ASTContext::isDestroyingOperatorDelete(const FunctionDecl *FD) const {
13347 return DestroyingOperatorDeletes.contains(V: FD->getCanonicalDecl());
13348}
13349
13350void ASTContext::setIsTypeAwareOperatorNewOrDelete(const FunctionDecl *FD,
13351 bool IsTypeAware) {
13352 if (!IsTypeAware) {
13353 assert(!TypeAwareOperatorNewAndDeletes.contains(FD->getCanonicalDecl()));
13354 return;
13355 }
13356 TypeAwareOperatorNewAndDeletes.insert(V: FD->getCanonicalDecl());
13357}
13358
13359bool ASTContext::isTypeAwareOperatorNewOrDelete(const FunctionDecl *FD) const {
13360 return TypeAwareOperatorNewAndDeletes.contains(V: FD->getCanonicalDecl());
13361}
13362
13363void ASTContext::addOperatorDeleteForVDtor(const CXXDestructorDecl *Dtor,
13364 FunctionDecl *OperatorDelete,
13365 OperatorDeleteKind K) const {
13366 switch (K) {
13367 case OperatorDeleteKind::Regular:
13368 OperatorDeletesForVirtualDtor[Dtor->getCanonicalDecl()] = OperatorDelete;
13369 break;
13370 case OperatorDeleteKind::GlobalRegular:
13371 GlobalOperatorDeletesForVirtualDtor[Dtor->getCanonicalDecl()] =
13372 OperatorDelete;
13373 break;
13374 case OperatorDeleteKind::Array:
13375 ArrayOperatorDeletesForVirtualDtor[Dtor->getCanonicalDecl()] =
13376 OperatorDelete;
13377 break;
13378 case OperatorDeleteKind::ArrayGlobal:
13379 GlobalArrayOperatorDeletesForVirtualDtor[Dtor->getCanonicalDecl()] =
13380 OperatorDelete;
13381 break;
13382 }
13383}
13384
13385bool ASTContext::dtorHasOperatorDelete(const CXXDestructorDecl *Dtor,
13386 OperatorDeleteKind K) const {
13387 switch (K) {
13388 case OperatorDeleteKind::Regular:
13389 return OperatorDeletesForVirtualDtor.contains(Val: Dtor->getCanonicalDecl());
13390 case OperatorDeleteKind::GlobalRegular:
13391 return GlobalOperatorDeletesForVirtualDtor.contains(
13392 Val: Dtor->getCanonicalDecl());
13393 case OperatorDeleteKind::Array:
13394 return ArrayOperatorDeletesForVirtualDtor.contains(
13395 Val: Dtor->getCanonicalDecl());
13396 case OperatorDeleteKind::ArrayGlobal:
13397 return GlobalArrayOperatorDeletesForVirtualDtor.contains(
13398 Val: Dtor->getCanonicalDecl());
13399 }
13400 return false;
13401}
13402
13403FunctionDecl *
13404ASTContext::getOperatorDeleteForVDtor(const CXXDestructorDecl *Dtor,
13405 OperatorDeleteKind K) const {
13406 const CXXDestructorDecl *Canon = Dtor->getCanonicalDecl();
13407 switch (K) {
13408 case OperatorDeleteKind::Regular:
13409 if (OperatorDeletesForVirtualDtor.contains(Val: Canon))
13410 return OperatorDeletesForVirtualDtor[Canon];
13411 return nullptr;
13412 case OperatorDeleteKind::GlobalRegular:
13413 if (GlobalOperatorDeletesForVirtualDtor.contains(Val: Canon))
13414 return GlobalOperatorDeletesForVirtualDtor[Canon];
13415 return nullptr;
13416 case OperatorDeleteKind::Array:
13417 if (ArrayOperatorDeletesForVirtualDtor.contains(Val: Canon))
13418 return ArrayOperatorDeletesForVirtualDtor[Canon];
13419 return nullptr;
13420 case OperatorDeleteKind::ArrayGlobal:
13421 if (GlobalArrayOperatorDeletesForVirtualDtor.contains(Val: Canon))
13422 return GlobalArrayOperatorDeletesForVirtualDtor[Canon];
13423 return nullptr;
13424 }
13425 return nullptr;
13426}
13427
13428bool ASTContext::classNeedsVectorDeletingDestructor(const CXXRecordDecl *RD) {
13429 if (!getTargetInfo().emitVectorDeletingDtors(getLangOpts()))
13430 return false;
13431 CXXDestructorDecl *Dtor = RD->getDestructor();
13432 // The compiler can't know if new[]/delete[] will be used outside of the DLL,
13433 // so just force vector deleting destructor emission if dllexport is present.
13434 // This matches MSVC behavior.
13435 if (Dtor && Dtor->isVirtual() && Dtor->hasAttr<DLLExportAttr>())
13436 return true;
13437
13438 return RequireVectorDeletingDtor.count(V: RD);
13439}
13440
13441void ASTContext::setClassNeedsVectorDeletingDestructor(
13442 const CXXRecordDecl *RD) {
13443 if (!getTargetInfo().emitVectorDeletingDtors(getLangOpts()))
13444 return;
13445 RequireVectorDeletingDtor.insert(V: RD);
13446}
13447
13448MangleNumberingContext &
13449ASTContext::getManglingNumberContext(const DeclContext *DC) {
13450 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C.
13451 std::unique_ptr<MangleNumberingContext> &MCtx = MangleNumberingContexts[DC];
13452 if (!MCtx)
13453 MCtx = createMangleNumberingContext();
13454 return *MCtx;
13455}
13456
13457MangleNumberingContext &
13458ASTContext::getManglingNumberContext(NeedExtraManglingDecl_t, const Decl *D) {
13459 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C.
13460 std::unique_ptr<MangleNumberingContext> &MCtx =
13461 ExtraMangleNumberingContexts[D];
13462 if (!MCtx)
13463 MCtx = createMangleNumberingContext();
13464 return *MCtx;
13465}
13466
13467std::unique_ptr<MangleNumberingContext>
13468ASTContext::createMangleNumberingContext() const {
13469 return ABI->createMangleNumberingContext();
13470}
13471
13472const CXXConstructorDecl *
13473ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) {
13474 return ABI->getCopyConstructorForExceptionObject(
13475 cast<CXXRecordDecl>(Val: RD->getFirstDecl()));
13476}
13477
13478void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD,
13479 CXXConstructorDecl *CD) {
13480 return ABI->addCopyConstructorForExceptionObject(
13481 cast<CXXRecordDecl>(Val: RD->getFirstDecl()),
13482 cast<CXXConstructorDecl>(Val: CD->getFirstDecl()));
13483}
13484
13485void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD,
13486 TypedefNameDecl *DD) {
13487 return ABI->addTypedefNameForUnnamedTagDecl(TD, DD);
13488}
13489
13490TypedefNameDecl *
13491ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl *TD) {
13492 return ABI->getTypedefNameForUnnamedTagDecl(TD);
13493}
13494
13495void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl *TD,
13496 DeclaratorDecl *DD) {
13497 return ABI->addDeclaratorForUnnamedTagDecl(TD, DD);
13498}
13499
13500DeclaratorDecl *ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl *TD) {
13501 return ABI->getDeclaratorForUnnamedTagDecl(TD);
13502}
13503
13504void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) {
13505 ParamIndices[D] = index;
13506}
13507
13508unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const {
13509 ParameterIndexTable::const_iterator I = ParamIndices.find(Val: D);
13510 assert(I != ParamIndices.end() &&
13511 "ParmIndices lacks entry set by ParmVarDecl");
13512 return I->second;
13513}
13514
13515QualType ASTContext::getStringLiteralArrayType(QualType EltTy,
13516 unsigned Length) const {
13517 // A C++ string literal has a const-qualified element type (C++ 2.13.4p1).
13518 if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings)
13519 EltTy = EltTy.withConst();
13520
13521 EltTy = adjustStringLiteralBaseType(Ty: EltTy);
13522
13523 // Get an array type for the string, according to C99 6.4.5. This includes
13524 // the null terminator character.
13525 return getConstantArrayType(EltTy, ArySizeIn: llvm::APInt(32, Length + 1), SizeExpr: nullptr,
13526 ASM: ArraySizeModifier::Normal, /*IndexTypeQuals*/ 0);
13527}
13528
13529StringLiteral *
13530ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const {
13531 StringLiteral *&Result = StringLiteralCache[Key];
13532 if (!Result)
13533 Result = StringLiteral::Create(
13534 Ctx: *this, Str: Key, Kind: StringLiteralKind::Ordinary,
13535 /*Pascal*/ false, Ty: getStringLiteralArrayType(EltTy: CharTy, Length: Key.size()),
13536 Locs: SourceLocation());
13537 return Result;
13538}
13539
13540MSGuidDecl *
13541ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const {
13542 assert(MSGuidTagDecl && "building MS GUID without MS extensions?");
13543
13544 llvm::FoldingSetNodeID ID;
13545 MSGuidDecl::Profile(ID, P: Parts);
13546
13547 void *InsertPos;
13548 if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos))
13549 return Existing;
13550
13551 QualType GUIDType = getMSGuidType().withConst();
13552 MSGuidDecl *New = MSGuidDecl::Create(C: *this, T: GUIDType, P: Parts);
13553 MSGuidDecls.InsertNode(N: New, InsertPos);
13554 return New;
13555}
13556
13557UnnamedGlobalConstantDecl *
13558ASTContext::getUnnamedGlobalConstantDecl(QualType Ty,
13559 const APValue &APVal) const {
13560 llvm::FoldingSetNodeID ID;
13561 UnnamedGlobalConstantDecl::Profile(ID, Ty, APVal);
13562
13563 void *InsertPos;
13564 if (UnnamedGlobalConstantDecl *Existing =
13565 UnnamedGlobalConstantDecls.FindNodeOrInsertPos(ID, InsertPos))
13566 return Existing;
13567
13568 UnnamedGlobalConstantDecl *New =
13569 UnnamedGlobalConstantDecl::Create(C: *this, T: Ty, APVal);
13570 UnnamedGlobalConstantDecls.InsertNode(N: New, InsertPos);
13571 return New;
13572}
13573
13574TemplateParamObjectDecl *
13575ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const {
13576 assert(T->isRecordType() && "template param object of unexpected type");
13577
13578 // C++ [temp.param]p8:
13579 // [...] a static storage duration object of type 'const T' [...]
13580 T.addConst();
13581
13582 llvm::FoldingSetNodeID ID;
13583 TemplateParamObjectDecl::Profile(ID, T, V);
13584
13585 void *InsertPos;
13586 if (TemplateParamObjectDecl *Existing =
13587 TemplateParamObjectDecls.FindNodeOrInsertPos(ID, InsertPos))
13588 return Existing;
13589
13590 TemplateParamObjectDecl *New = TemplateParamObjectDecl::Create(C: *this, T, V);
13591 TemplateParamObjectDecls.InsertNode(N: New, InsertPos);
13592 return New;
13593}
13594
13595bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const {
13596 const llvm::Triple &T = getTargetInfo().getTriple();
13597 if (!T.isOSDarwin())
13598 return false;
13599
13600 if (!(T.isiOS() && T.isOSVersionLT(Major: 7)) &&
13601 !(T.isMacOSX() && T.isOSVersionLT(Major: 10, Minor: 9)))
13602 return false;
13603
13604 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
13605 CharUnits sizeChars = getTypeSizeInChars(T: AtomicTy);
13606 uint64_t Size = sizeChars.getQuantity();
13607 CharUnits alignChars = getTypeAlignInChars(T: AtomicTy);
13608 unsigned Align = alignChars.getQuantity();
13609 unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth();
13610 return (Size != Align || toBits(CharSize: sizeChars) > MaxInlineWidthInBits);
13611}
13612
13613bool
13614ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl,
13615 const ObjCMethodDecl *MethodImpl) {
13616 // No point trying to match an unavailable/deprecated mothod.
13617 if (MethodDecl->hasAttr<UnavailableAttr>()
13618 || MethodDecl->hasAttr<DeprecatedAttr>())
13619 return false;
13620 if (MethodDecl->getObjCDeclQualifier() !=
13621 MethodImpl->getObjCDeclQualifier())
13622 return false;
13623 if (!hasSameType(T1: MethodDecl->getReturnType(), T2: MethodImpl->getReturnType()))
13624 return false;
13625
13626 if (MethodDecl->param_size() != MethodImpl->param_size())
13627 return false;
13628
13629 for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(),
13630 IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(),
13631 EF = MethodDecl->param_end();
13632 IM != EM && IF != EF; ++IM, ++IF) {
13633 const ParmVarDecl *DeclVar = (*IF);
13634 const ParmVarDecl *ImplVar = (*IM);
13635 if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier())
13636 return false;
13637 if (!hasSameType(T1: DeclVar->getType(), T2: ImplVar->getType()))
13638 return false;
13639 }
13640
13641 return (MethodDecl->isVariadic() == MethodImpl->isVariadic());
13642}
13643
13644uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const {
13645 LangAS AS;
13646 if (QT->getUnqualifiedDesugaredType()->isNullPtrType())
13647 AS = LangAS::Default;
13648 else
13649 AS = QT->getPointeeType().getAddressSpace();
13650
13651 return getTargetInfo().getNullPointerValue(AddrSpace: AS);
13652}
13653
13654unsigned ASTContext::getTargetAddressSpace(LangAS AS) const {
13655 return getTargetInfo().getTargetAddressSpace(AS);
13656}
13657
13658bool ASTContext::hasSameExpr(const Expr *X, const Expr *Y) const {
13659 if (X == Y)
13660 return true;
13661 if (!X || !Y)
13662 return false;
13663 llvm::FoldingSetNodeID IDX, IDY;
13664 X->Profile(ID&: IDX, Context: *this, /*Canonical=*/true);
13665 Y->Profile(ID&: IDY, Context: *this, /*Canonical=*/true);
13666 return IDX == IDY;
13667}
13668
13669// The getCommon* helpers return, for given 'same' X and Y entities given as
13670// inputs, another entity which is also the 'same' as the inputs, but which
13671// is closer to the canonical form of the inputs, each according to a given
13672// criteria.
13673// The getCommon*Checked variants are 'null inputs not-allowed' equivalents of
13674// the regular ones.
13675
13676static Decl *getCommonDecl(Decl *X, Decl *Y) {
13677 if (!declaresSameEntity(D1: X, D2: Y))
13678 return nullptr;
13679 for (const Decl *DX : X->redecls()) {
13680 // If we reach Y before reaching the first decl, that means X is older.
13681 if (DX == Y)
13682 return X;
13683 // If we reach the first decl, then Y is older.
13684 if (DX->isFirstDecl())
13685 return Y;
13686 }
13687 llvm_unreachable("Corrupt redecls chain");
13688}
13689
13690template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true>
13691static T *getCommonDecl(T *X, T *Y) {
13692 return cast_or_null<T>(
13693 getCommonDecl(X: const_cast<Decl *>(cast_or_null<Decl>(X)),
13694 Y: const_cast<Decl *>(cast_or_null<Decl>(Y))));
13695}
13696
13697template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true>
13698static T *getCommonDeclChecked(T *X, T *Y) {
13699 return cast<T>(getCommonDecl(X: const_cast<Decl *>(cast<Decl>(X)),
13700 Y: const_cast<Decl *>(cast<Decl>(Y))));
13701}
13702
13703static TemplateName getCommonTemplateName(const ASTContext &Ctx, TemplateName X,
13704 TemplateName Y,
13705 bool IgnoreDeduced = false) {
13706 if (X.getAsVoidPointer() == Y.getAsVoidPointer())
13707 return X;
13708 // FIXME: There are cases here where we could find a common template name
13709 // with more sugar. For example one could be a SubstTemplateTemplate*
13710 // replacing the other.
13711 TemplateName CX = Ctx.getCanonicalTemplateName(Name: X, IgnoreDeduced);
13712 if (CX.getAsVoidPointer() !=
13713 Ctx.getCanonicalTemplateName(Name: Y).getAsVoidPointer())
13714 return TemplateName();
13715 return CX;
13716}
13717
13718static TemplateName getCommonTemplateNameChecked(const ASTContext &Ctx,
13719 TemplateName X, TemplateName Y,
13720 bool IgnoreDeduced) {
13721 TemplateName R = getCommonTemplateName(Ctx, X, Y, IgnoreDeduced);
13722 assert(R.getAsVoidPointer() != nullptr);
13723 return R;
13724}
13725
13726static auto getCommonTypes(const ASTContext &Ctx, ArrayRef<QualType> Xs,
13727 ArrayRef<QualType> Ys, bool Unqualified = false) {
13728 assert(Xs.size() == Ys.size());
13729 SmallVector<QualType, 8> Rs(Xs.size());
13730 for (size_t I = 0; I < Rs.size(); ++I)
13731 Rs[I] = Ctx.getCommonSugaredType(X: Xs[I], Y: Ys[I], Unqualified);
13732 return Rs;
13733}
13734
13735template <class T>
13736static SourceLocation getCommonAttrLoc(const T *X, const T *Y) {
13737 return X->getAttributeLoc() == Y->getAttributeLoc() ? X->getAttributeLoc()
13738 : SourceLocation();
13739}
13740
13741static TemplateArgument getCommonTemplateArgument(const ASTContext &Ctx,
13742 const TemplateArgument &X,
13743 const TemplateArgument &Y) {
13744 if (X.getKind() != Y.getKind())
13745 return TemplateArgument();
13746
13747 switch (X.getKind()) {
13748 case TemplateArgument::ArgKind::Type:
13749 if (!Ctx.hasSameType(T1: X.getAsType(), T2: Y.getAsType()))
13750 return TemplateArgument();
13751 return TemplateArgument(
13752 Ctx.getCommonSugaredType(X: X.getAsType(), Y: Y.getAsType()));
13753 case TemplateArgument::ArgKind::NullPtr:
13754 if (!Ctx.hasSameType(T1: X.getNullPtrType(), T2: Y.getNullPtrType()))
13755 return TemplateArgument();
13756 return TemplateArgument(
13757 Ctx.getCommonSugaredType(X: X.getNullPtrType(), Y: Y.getNullPtrType()),
13758 /*Unqualified=*/true);
13759 case TemplateArgument::ArgKind::Expression:
13760 if (!Ctx.hasSameType(T1: X.getAsExpr()->getType(), T2: Y.getAsExpr()->getType()))
13761 return TemplateArgument();
13762 // FIXME: Try to keep the common sugar.
13763 return X;
13764 case TemplateArgument::ArgKind::Template: {
13765 TemplateName TX = X.getAsTemplate(), TY = Y.getAsTemplate();
13766 TemplateName CTN = ::getCommonTemplateName(Ctx, X: TX, Y: TY);
13767 if (!CTN.getAsVoidPointer())
13768 return TemplateArgument();
13769 return TemplateArgument(CTN);
13770 }
13771 case TemplateArgument::ArgKind::TemplateExpansion: {
13772 TemplateName TX = X.getAsTemplateOrTemplatePattern(),
13773 TY = Y.getAsTemplateOrTemplatePattern();
13774 TemplateName CTN = ::getCommonTemplateName(Ctx, X: TX, Y: TY);
13775 if (!CTN.getAsVoidPointer())
13776 return TemplateName();
13777 auto NExpX = X.getNumTemplateExpansions();
13778 assert(NExpX == Y.getNumTemplateExpansions());
13779 return TemplateArgument(CTN, NExpX);
13780 }
13781 default:
13782 // FIXME: Handle the other argument kinds.
13783 return X;
13784 }
13785}
13786
13787static bool getCommonTemplateArguments(const ASTContext &Ctx,
13788 SmallVectorImpl<TemplateArgument> &R,
13789 ArrayRef<TemplateArgument> Xs,
13790 ArrayRef<TemplateArgument> Ys) {
13791 if (Xs.size() != Ys.size())
13792 return true;
13793 R.resize(N: Xs.size());
13794 for (size_t I = 0; I < R.size(); ++I) {
13795 R[I] = getCommonTemplateArgument(Ctx, X: Xs[I], Y: Ys[I]);
13796 if (R[I].isNull())
13797 return true;
13798 }
13799 return false;
13800}
13801
13802static auto getCommonTemplateArguments(const ASTContext &Ctx,
13803 ArrayRef<TemplateArgument> Xs,
13804 ArrayRef<TemplateArgument> Ys) {
13805 SmallVector<TemplateArgument, 8> R;
13806 bool Different = getCommonTemplateArguments(Ctx, R, Xs, Ys);
13807 assert(!Different);
13808 (void)Different;
13809 return R;
13810}
13811
13812template <class T>
13813static ElaboratedTypeKeyword getCommonTypeKeyword(const T *X, const T *Y,
13814 bool IsSame) {
13815 ElaboratedTypeKeyword KX = X->getKeyword(), KY = Y->getKeyword();
13816 if (KX == KY)
13817 return KX;
13818 KX = getCanonicalElaboratedTypeKeyword(Keyword: KX);
13819 assert(!IsSame || KX == getCanonicalElaboratedTypeKeyword(KY));
13820 return KX;
13821}
13822
13823/// Returns a NestedNameSpecifier which has only the common sugar
13824/// present in both NNS1 and NNS2.
13825static NestedNameSpecifier getCommonNNS(const ASTContext &Ctx,
13826 NestedNameSpecifier NNS1,
13827 NestedNameSpecifier NNS2, bool IsSame) {
13828 // If they are identical, all sugar is common.
13829 if (NNS1 == NNS2)
13830 return NNS1;
13831
13832 // IsSame implies both Qualifiers are equivalent.
13833 NestedNameSpecifier Canon = NNS1.getCanonical();
13834 if (Canon != NNS2.getCanonical()) {
13835 assert(!IsSame && "Should be the same NestedNameSpecifier");
13836 // If they are not the same, there is nothing to unify.
13837 return std::nullopt;
13838 }
13839
13840 NestedNameSpecifier R = std::nullopt;
13841 NestedNameSpecifier::Kind Kind = NNS1.getKind();
13842 assert(Kind == NNS2.getKind());
13843 switch (Kind) {
13844 case NestedNameSpecifier::Kind::Namespace: {
13845 auto [Namespace1, Prefix1] = NNS1.getAsNamespaceAndPrefix();
13846 auto [Namespace2, Prefix2] = NNS2.getAsNamespaceAndPrefix();
13847 auto Kind = Namespace1->getKind();
13848 if (Kind != Namespace2->getKind() ||
13849 (Kind == Decl::NamespaceAlias &&
13850 !declaresSameEntity(D1: Namespace1, D2: Namespace2))) {
13851 R = NestedNameSpecifier(
13852 Ctx,
13853 ::getCommonDeclChecked(X: Namespace1->getNamespace(),
13854 Y: Namespace2->getNamespace()),
13855 /*Prefix=*/std::nullopt);
13856 break;
13857 }
13858 // The prefixes for namespaces are not significant, its declaration
13859 // identifies it uniquely.
13860 NestedNameSpecifier Prefix = ::getCommonNNS(Ctx, NNS1: Prefix1, NNS2: Prefix2,
13861 /*IsSame=*/false);
13862 R = NestedNameSpecifier(Ctx, ::getCommonDeclChecked(X: Namespace1, Y: Namespace2),
13863 Prefix);
13864 break;
13865 }
13866 case NestedNameSpecifier::Kind::Type: {
13867 const Type *T1 = NNS1.getAsType(), *T2 = NNS2.getAsType();
13868 const Type *T = Ctx.getCommonSugaredType(X: QualType(T1, 0), Y: QualType(T2, 0),
13869 /*Unqualified=*/true)
13870 .getTypePtr();
13871 R = NestedNameSpecifier(T);
13872 break;
13873 }
13874 case NestedNameSpecifier::Kind::MicrosoftSuper: {
13875 // FIXME: Can __super even be used with data members?
13876 // If it's only usable in functions, we will never see it here,
13877 // unless we save the qualifiers used in function types.
13878 // In that case, it might be possible NNS2 is a type,
13879 // in which case we should degrade the result to
13880 // a CXXRecordType.
13881 R = NestedNameSpecifier(getCommonDeclChecked(X: NNS1.getAsMicrosoftSuper(),
13882 Y: NNS2.getAsMicrosoftSuper()));
13883 break;
13884 }
13885 case NestedNameSpecifier::Kind::Null:
13886 case NestedNameSpecifier::Kind::Global:
13887 // These are singletons.
13888 llvm_unreachable("singletons did not compare equal");
13889 }
13890 assert(R.getCanonical() == Canon);
13891 return R;
13892}
13893
13894template <class T>
13895static NestedNameSpecifier getCommonQualifier(const ASTContext &Ctx, const T *X,
13896 const T *Y, bool IsSame) {
13897 return ::getCommonNNS(Ctx, NNS1: X->getQualifier(), NNS2: Y->getQualifier(), IsSame);
13898}
13899
13900template <class T>
13901static QualType getCommonElementType(const ASTContext &Ctx, const T *X,
13902 const T *Y) {
13903 return Ctx.getCommonSugaredType(X: X->getElementType(), Y: Y->getElementType());
13904}
13905
13906template <class T>
13907static QualType getCommonArrayElementType(const ASTContext &Ctx, const T *X,
13908 Qualifiers &QX, const T *Y,
13909 Qualifiers &QY) {
13910 QualType EX = X->getElementType(), EY = Y->getElementType();
13911 QualType R = Ctx.getCommonSugaredType(X: EX, Y: EY,
13912 /*Unqualified=*/true);
13913 // Qualifiers common to both element types.
13914 Qualifiers RQ = R.getQualifiers();
13915 // For each side, move to the top level any qualifiers which are not common to
13916 // both element types. The caller must assume top level qualifiers might
13917 // be different, even if they are the same type, and can be treated as sugar.
13918 QX += EX.getQualifiers() - RQ;
13919 QY += EY.getQualifiers() - RQ;
13920 return R;
13921}
13922
13923template <class T>
13924static QualType getCommonPointeeType(const ASTContext &Ctx, const T *X,
13925 const T *Y) {
13926 return Ctx.getCommonSugaredType(X: X->getPointeeType(), Y: Y->getPointeeType());
13927}
13928
13929template <class T>
13930static auto *getCommonSizeExpr(const ASTContext &Ctx, T *X, T *Y) {
13931 assert(Ctx.hasSameExpr(X->getSizeExpr(), Y->getSizeExpr()));
13932 return X->getSizeExpr();
13933}
13934
13935static auto getCommonSizeModifier(const ArrayType *X, const ArrayType *Y) {
13936 assert(X->getSizeModifier() == Y->getSizeModifier());
13937 return X->getSizeModifier();
13938}
13939
13940static auto getCommonIndexTypeCVRQualifiers(const ArrayType *X,
13941 const ArrayType *Y) {
13942 assert(X->getIndexTypeCVRQualifiers() == Y->getIndexTypeCVRQualifiers());
13943 return X->getIndexTypeCVRQualifiers();
13944}
13945
13946// Merges two type lists such that the resulting vector will contain
13947// each type (in a canonical sense) only once, in the order they appear
13948// from X to Y. If they occur in both X and Y, the result will contain
13949// the common sugared type between them.
13950static void mergeTypeLists(const ASTContext &Ctx,
13951 SmallVectorImpl<QualType> &Out, ArrayRef<QualType> X,
13952 ArrayRef<QualType> Y) {
13953 llvm::DenseMap<QualType, unsigned> Found;
13954 for (auto Ts : {X, Y}) {
13955 for (QualType T : Ts) {
13956 auto Res = Found.try_emplace(Key: Ctx.getCanonicalType(T), Args: Out.size());
13957 if (!Res.second) {
13958 QualType &U = Out[Res.first->second];
13959 U = Ctx.getCommonSugaredType(X: U, Y: T);
13960 } else {
13961 Out.emplace_back(Args&: T);
13962 }
13963 }
13964 }
13965}
13966
13967FunctionProtoType::ExceptionSpecInfo
13968ASTContext::mergeExceptionSpecs(FunctionProtoType::ExceptionSpecInfo ESI1,
13969 FunctionProtoType::ExceptionSpecInfo ESI2,
13970 SmallVectorImpl<QualType> &ExceptionTypeStorage,
13971 bool AcceptDependent) const {
13972 ExceptionSpecificationType EST1 = ESI1.Type, EST2 = ESI2.Type;
13973
13974 // If either of them can throw anything, that is the result.
13975 for (auto I : {EST_None, EST_MSAny, EST_NoexceptFalse}) {
13976 if (EST1 == I)
13977 return ESI1;
13978 if (EST2 == I)
13979 return ESI2;
13980 }
13981
13982 // If either of them is non-throwing, the result is the other.
13983 for (auto I :
13984 {EST_NoThrow, EST_DynamicNone, EST_BasicNoexcept, EST_NoexceptTrue}) {
13985 if (EST1 == I)
13986 return ESI2;
13987 if (EST2 == I)
13988 return ESI1;
13989 }
13990
13991 // If we're left with value-dependent computed noexcept expressions, we're
13992 // stuck. Before C++17, we can just drop the exception specification entirely,
13993 // since it's not actually part of the canonical type. And this should never
13994 // happen in C++17, because it would mean we were computing the composite
13995 // pointer type of dependent types, which should never happen.
13996 if (EST1 == EST_DependentNoexcept || EST2 == EST_DependentNoexcept) {
13997 assert(AcceptDependent &&
13998 "computing composite pointer type of dependent types");
13999 return FunctionProtoType::ExceptionSpecInfo();
14000 }
14001
14002 // Switch over the possibilities so that people adding new values know to
14003 // update this function.
14004 switch (EST1) {
14005 case EST_None:
14006 case EST_DynamicNone:
14007 case EST_MSAny:
14008 case EST_BasicNoexcept:
14009 case EST_DependentNoexcept:
14010 case EST_NoexceptFalse:
14011 case EST_NoexceptTrue:
14012 case EST_NoThrow:
14013 llvm_unreachable("These ESTs should be handled above");
14014
14015 case EST_Dynamic: {
14016 // This is the fun case: both exception specifications are dynamic. Form
14017 // the union of the two lists.
14018 assert(EST2 == EST_Dynamic && "other cases should already be handled");
14019 mergeTypeLists(Ctx: *this, Out&: ExceptionTypeStorage, X: ESI1.Exceptions,
14020 Y: ESI2.Exceptions);
14021 FunctionProtoType::ExceptionSpecInfo Result(EST_Dynamic);
14022 Result.Exceptions = ExceptionTypeStorage;
14023 return Result;
14024 }
14025
14026 case EST_Unevaluated:
14027 case EST_Uninstantiated:
14028 case EST_Unparsed:
14029 llvm_unreachable("shouldn't see unresolved exception specifications here");
14030 }
14031
14032 llvm_unreachable("invalid ExceptionSpecificationType");
14033}
14034
14035static QualType getCommonNonSugarTypeNode(const ASTContext &Ctx, const Type *X,
14036 Qualifiers &QX, const Type *Y,
14037 Qualifiers &QY) {
14038 Type::TypeClass TC = X->getTypeClass();
14039 assert(TC == Y->getTypeClass());
14040 switch (TC) {
14041#define UNEXPECTED_TYPE(Class, Kind) \
14042 case Type::Class: \
14043 llvm_unreachable("Unexpected " Kind ": " #Class);
14044
14045#define NON_CANONICAL_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "non-canonical")
14046#define TYPE(Class, Base)
14047#include "clang/AST/TypeNodes.inc"
14048
14049#define SUGAR_FREE_TYPE(Class) UNEXPECTED_TYPE(Class, "sugar-free")
14050 SUGAR_FREE_TYPE(Builtin)
14051 SUGAR_FREE_TYPE(DeducedTemplateSpecialization)
14052 SUGAR_FREE_TYPE(DependentBitInt)
14053 SUGAR_FREE_TYPE(BitInt)
14054 SUGAR_FREE_TYPE(ObjCInterface)
14055 SUGAR_FREE_TYPE(SubstTemplateTypeParmPack)
14056 SUGAR_FREE_TYPE(SubstBuiltinTemplatePack)
14057 SUGAR_FREE_TYPE(UnresolvedUsing)
14058 SUGAR_FREE_TYPE(HLSLAttributedResource)
14059 SUGAR_FREE_TYPE(HLSLInlineSpirv)
14060#undef SUGAR_FREE_TYPE
14061#define NON_UNIQUE_TYPE(Class) UNEXPECTED_TYPE(Class, "non-unique")
14062 NON_UNIQUE_TYPE(TypeOfExpr)
14063 NON_UNIQUE_TYPE(VariableArray)
14064#undef NON_UNIQUE_TYPE
14065
14066 UNEXPECTED_TYPE(TypeOf, "sugar")
14067
14068#undef UNEXPECTED_TYPE
14069
14070 case Type::Auto: {
14071 const auto *AX = cast<AutoType>(Val: X), *AY = cast<AutoType>(Val: Y);
14072 assert(AX->getDeducedType().isNull());
14073 assert(AY->getDeducedType().isNull());
14074 assert(AX->getKeyword() == AY->getKeyword());
14075 assert(AX->isInstantiationDependentType() ==
14076 AY->isInstantiationDependentType());
14077 auto As = getCommonTemplateArguments(Ctx, Xs: AX->getTypeConstraintArguments(),
14078 Ys: AY->getTypeConstraintArguments());
14079 return Ctx.getAutoType(DeducedType: QualType(), Keyword: AX->getKeyword(),
14080 IsDependent: AX->isInstantiationDependentType(),
14081 IsPack: AX->containsUnexpandedParameterPack(),
14082 TypeConstraintConcept: getCommonDeclChecked(X: AX->getTypeConstraintConcept(),
14083 Y: AY->getTypeConstraintConcept()),
14084 TypeConstraintArgs: As);
14085 }
14086 case Type::IncompleteArray: {
14087 const auto *AX = cast<IncompleteArrayType>(Val: X),
14088 *AY = cast<IncompleteArrayType>(Val: Y);
14089 return Ctx.getIncompleteArrayType(
14090 elementType: getCommonArrayElementType(Ctx, X: AX, QX, Y: AY, QY),
14091 ASM: getCommonSizeModifier(X: AX, Y: AY), elementTypeQuals: getCommonIndexTypeCVRQualifiers(X: AX, Y: AY));
14092 }
14093 case Type::DependentSizedArray: {
14094 const auto *AX = cast<DependentSizedArrayType>(Val: X),
14095 *AY = cast<DependentSizedArrayType>(Val: Y);
14096 return Ctx.getDependentSizedArrayType(
14097 elementType: getCommonArrayElementType(Ctx, X: AX, QX, Y: AY, QY),
14098 numElements: getCommonSizeExpr(Ctx, X: AX, Y: AY), ASM: getCommonSizeModifier(X: AX, Y: AY),
14099 elementTypeQuals: getCommonIndexTypeCVRQualifiers(X: AX, Y: AY));
14100 }
14101 case Type::ConstantArray: {
14102 const auto *AX = cast<ConstantArrayType>(Val: X),
14103 *AY = cast<ConstantArrayType>(Val: Y);
14104 assert(AX->getSize() == AY->getSize());
14105 const Expr *SizeExpr = Ctx.hasSameExpr(X: AX->getSizeExpr(), Y: AY->getSizeExpr())
14106 ? AX->getSizeExpr()
14107 : nullptr;
14108 return Ctx.getConstantArrayType(
14109 EltTy: getCommonArrayElementType(Ctx, X: AX, QX, Y: AY, QY), ArySizeIn: AX->getSize(), SizeExpr,
14110 ASM: getCommonSizeModifier(X: AX, Y: AY), IndexTypeQuals: getCommonIndexTypeCVRQualifiers(X: AX, Y: AY));
14111 }
14112 case Type::ArrayParameter: {
14113 const auto *AX = cast<ArrayParameterType>(Val: X),
14114 *AY = cast<ArrayParameterType>(Val: Y);
14115 assert(AX->getSize() == AY->getSize());
14116 const Expr *SizeExpr = Ctx.hasSameExpr(X: AX->getSizeExpr(), Y: AY->getSizeExpr())
14117 ? AX->getSizeExpr()
14118 : nullptr;
14119 auto ArrayTy = Ctx.getConstantArrayType(
14120 EltTy: getCommonArrayElementType(Ctx, X: AX, QX, Y: AY, QY), ArySizeIn: AX->getSize(), SizeExpr,
14121 ASM: getCommonSizeModifier(X: AX, Y: AY), IndexTypeQuals: getCommonIndexTypeCVRQualifiers(X: AX, Y: AY));
14122 return Ctx.getArrayParameterType(Ty: ArrayTy);
14123 }
14124 case Type::Atomic: {
14125 const auto *AX = cast<AtomicType>(Val: X), *AY = cast<AtomicType>(Val: Y);
14126 return Ctx.getAtomicType(
14127 T: Ctx.getCommonSugaredType(X: AX->getValueType(), Y: AY->getValueType()));
14128 }
14129 case Type::Complex: {
14130 const auto *CX = cast<ComplexType>(Val: X), *CY = cast<ComplexType>(Val: Y);
14131 return Ctx.getComplexType(T: getCommonArrayElementType(Ctx, X: CX, QX, Y: CY, QY));
14132 }
14133 case Type::Pointer: {
14134 const auto *PX = cast<PointerType>(Val: X), *PY = cast<PointerType>(Val: Y);
14135 return Ctx.getPointerType(T: getCommonPointeeType(Ctx, X: PX, Y: PY));
14136 }
14137 case Type::BlockPointer: {
14138 const auto *PX = cast<BlockPointerType>(Val: X), *PY = cast<BlockPointerType>(Val: Y);
14139 return Ctx.getBlockPointerType(T: getCommonPointeeType(Ctx, X: PX, Y: PY));
14140 }
14141 case Type::ObjCObjectPointer: {
14142 const auto *PX = cast<ObjCObjectPointerType>(Val: X),
14143 *PY = cast<ObjCObjectPointerType>(Val: Y);
14144 return Ctx.getObjCObjectPointerType(ObjectT: getCommonPointeeType(Ctx, X: PX, Y: PY));
14145 }
14146 case Type::MemberPointer: {
14147 const auto *PX = cast<MemberPointerType>(Val: X),
14148 *PY = cast<MemberPointerType>(Val: Y);
14149 assert(declaresSameEntity(PX->getMostRecentCXXRecordDecl(),
14150 PY->getMostRecentCXXRecordDecl()));
14151 return Ctx.getMemberPointerType(
14152 T: getCommonPointeeType(Ctx, X: PX, Y: PY),
14153 Qualifier: getCommonQualifier(Ctx, X: PX, Y: PY, /*IsSame=*/true),
14154 Cls: PX->getMostRecentCXXRecordDecl());
14155 }
14156 case Type::LValueReference: {
14157 const auto *PX = cast<LValueReferenceType>(Val: X),
14158 *PY = cast<LValueReferenceType>(Val: Y);
14159 // FIXME: Preserve PointeeTypeAsWritten.
14160 return Ctx.getLValueReferenceType(T: getCommonPointeeType(Ctx, X: PX, Y: PY),
14161 SpelledAsLValue: PX->isSpelledAsLValue() ||
14162 PY->isSpelledAsLValue());
14163 }
14164 case Type::RValueReference: {
14165 const auto *PX = cast<RValueReferenceType>(Val: X),
14166 *PY = cast<RValueReferenceType>(Val: Y);
14167 // FIXME: Preserve PointeeTypeAsWritten.
14168 return Ctx.getRValueReferenceType(T: getCommonPointeeType(Ctx, X: PX, Y: PY));
14169 }
14170 case Type::DependentAddressSpace: {
14171 const auto *PX = cast<DependentAddressSpaceType>(Val: X),
14172 *PY = cast<DependentAddressSpaceType>(Val: Y);
14173 assert(Ctx.hasSameExpr(PX->getAddrSpaceExpr(), PY->getAddrSpaceExpr()));
14174 return Ctx.getDependentAddressSpaceType(PointeeType: getCommonPointeeType(Ctx, X: PX, Y: PY),
14175 AddrSpaceExpr: PX->getAddrSpaceExpr(),
14176 AttrLoc: getCommonAttrLoc(X: PX, Y: PY));
14177 }
14178 case Type::FunctionNoProto: {
14179 const auto *FX = cast<FunctionNoProtoType>(Val: X),
14180 *FY = cast<FunctionNoProtoType>(Val: Y);
14181 assert(FX->getExtInfo() == FY->getExtInfo());
14182 return Ctx.getFunctionNoProtoType(
14183 ResultTy: Ctx.getCommonSugaredType(X: FX->getReturnType(), Y: FY->getReturnType()),
14184 Info: FX->getExtInfo());
14185 }
14186 case Type::FunctionProto: {
14187 const auto *FX = cast<FunctionProtoType>(Val: X),
14188 *FY = cast<FunctionProtoType>(Val: Y);
14189 FunctionProtoType::ExtProtoInfo EPIX = FX->getExtProtoInfo(),
14190 EPIY = FY->getExtProtoInfo();
14191 assert(EPIX.ExtInfo == EPIY.ExtInfo);
14192 assert(!EPIX.ExtParameterInfos == !EPIY.ExtParameterInfos);
14193 assert(!EPIX.ExtParameterInfos ||
14194 llvm::equal(
14195 llvm::ArrayRef(EPIX.ExtParameterInfos, FX->getNumParams()),
14196 llvm::ArrayRef(EPIY.ExtParameterInfos, FY->getNumParams())));
14197 assert(EPIX.RefQualifier == EPIY.RefQualifier);
14198 assert(EPIX.TypeQuals == EPIY.TypeQuals);
14199 assert(EPIX.Variadic == EPIY.Variadic);
14200
14201 // FIXME: Can we handle an empty EllipsisLoc?
14202 // Use emtpy EllipsisLoc if X and Y differ.
14203
14204 EPIX.HasTrailingReturn = EPIX.HasTrailingReturn && EPIY.HasTrailingReturn;
14205
14206 QualType R =
14207 Ctx.getCommonSugaredType(X: FX->getReturnType(), Y: FY->getReturnType());
14208 auto P = getCommonTypes(Ctx, Xs: FX->param_types(), Ys: FY->param_types(),
14209 /*Unqualified=*/true);
14210
14211 SmallVector<QualType, 8> Exceptions;
14212 EPIX.ExceptionSpec = Ctx.mergeExceptionSpecs(
14213 ESI1: EPIX.ExceptionSpec, ESI2: EPIY.ExceptionSpec, ExceptionTypeStorage&: Exceptions, AcceptDependent: true);
14214 return Ctx.getFunctionType(ResultTy: R, Args: P, EPI: EPIX);
14215 }
14216 case Type::ObjCObject: {
14217 const auto *OX = cast<ObjCObjectType>(Val: X), *OY = cast<ObjCObjectType>(Val: Y);
14218 assert(
14219 std::equal(OX->getProtocols().begin(), OX->getProtocols().end(),
14220 OY->getProtocols().begin(), OY->getProtocols().end(),
14221 [](const ObjCProtocolDecl *P0, const ObjCProtocolDecl *P1) {
14222 return P0->getCanonicalDecl() == P1->getCanonicalDecl();
14223 }) &&
14224 "protocol lists must be the same");
14225 auto TAs = getCommonTypes(Ctx, Xs: OX->getTypeArgsAsWritten(),
14226 Ys: OY->getTypeArgsAsWritten());
14227 return Ctx.getObjCObjectType(
14228 baseType: Ctx.getCommonSugaredType(X: OX->getBaseType(), Y: OY->getBaseType()), typeArgs: TAs,
14229 protocols: OX->getProtocols(),
14230 isKindOf: OX->isKindOfTypeAsWritten() && OY->isKindOfTypeAsWritten());
14231 }
14232 case Type::ConstantMatrix: {
14233 const auto *MX = cast<ConstantMatrixType>(Val: X),
14234 *MY = cast<ConstantMatrixType>(Val: Y);
14235 assert(MX->getNumRows() == MY->getNumRows());
14236 assert(MX->getNumColumns() == MY->getNumColumns());
14237 return Ctx.getConstantMatrixType(ElementTy: getCommonElementType(Ctx, X: MX, Y: MY),
14238 NumRows: MX->getNumRows(), NumColumns: MX->getNumColumns());
14239 }
14240 case Type::DependentSizedMatrix: {
14241 const auto *MX = cast<DependentSizedMatrixType>(Val: X),
14242 *MY = cast<DependentSizedMatrixType>(Val: Y);
14243 assert(Ctx.hasSameExpr(MX->getRowExpr(), MY->getRowExpr()));
14244 assert(Ctx.hasSameExpr(MX->getColumnExpr(), MY->getColumnExpr()));
14245 return Ctx.getDependentSizedMatrixType(
14246 ElementTy: getCommonElementType(Ctx, X: MX, Y: MY), RowExpr: MX->getRowExpr(),
14247 ColumnExpr: MX->getColumnExpr(), AttrLoc: getCommonAttrLoc(X: MX, Y: MY));
14248 }
14249 case Type::Vector: {
14250 const auto *VX = cast<VectorType>(Val: X), *VY = cast<VectorType>(Val: Y);
14251 assert(VX->getNumElements() == VY->getNumElements());
14252 assert(VX->getVectorKind() == VY->getVectorKind());
14253 return Ctx.getVectorType(vecType: getCommonElementType(Ctx, X: VX, Y: VY),
14254 NumElts: VX->getNumElements(), VecKind: VX->getVectorKind());
14255 }
14256 case Type::ExtVector: {
14257 const auto *VX = cast<ExtVectorType>(Val: X), *VY = cast<ExtVectorType>(Val: Y);
14258 assert(VX->getNumElements() == VY->getNumElements());
14259 return Ctx.getExtVectorType(vecType: getCommonElementType(Ctx, X: VX, Y: VY),
14260 NumElts: VX->getNumElements());
14261 }
14262 case Type::DependentSizedExtVector: {
14263 const auto *VX = cast<DependentSizedExtVectorType>(Val: X),
14264 *VY = cast<DependentSizedExtVectorType>(Val: Y);
14265 return Ctx.getDependentSizedExtVectorType(vecType: getCommonElementType(Ctx, X: VX, Y: VY),
14266 SizeExpr: getCommonSizeExpr(Ctx, X: VX, Y: VY),
14267 AttrLoc: getCommonAttrLoc(X: VX, Y: VY));
14268 }
14269 case Type::DependentVector: {
14270 const auto *VX = cast<DependentVectorType>(Val: X),
14271 *VY = cast<DependentVectorType>(Val: Y);
14272 assert(VX->getVectorKind() == VY->getVectorKind());
14273 return Ctx.getDependentVectorType(
14274 VecType: getCommonElementType(Ctx, X: VX, Y: VY), SizeExpr: getCommonSizeExpr(Ctx, X: VX, Y: VY),
14275 AttrLoc: getCommonAttrLoc(X: VX, Y: VY), VecKind: VX->getVectorKind());
14276 }
14277 case Type::Enum:
14278 case Type::Record:
14279 case Type::InjectedClassName: {
14280 const auto *TX = cast<TagType>(Val: X), *TY = cast<TagType>(Val: Y);
14281 return Ctx.getTagType(Keyword: ::getCommonTypeKeyword(X: TX, Y: TY, /*IsSame=*/false),
14282 Qualifier: ::getCommonQualifier(Ctx, X: TX, Y: TY, /*IsSame=*/false),
14283 TD: ::getCommonDeclChecked(X: TX->getDecl(), Y: TY->getDecl()),
14284 /*OwnedTag=*/OwnsTag: false);
14285 }
14286 case Type::TemplateSpecialization: {
14287 const auto *TX = cast<TemplateSpecializationType>(Val: X),
14288 *TY = cast<TemplateSpecializationType>(Val: Y);
14289 auto As = getCommonTemplateArguments(Ctx, Xs: TX->template_arguments(),
14290 Ys: TY->template_arguments());
14291 return Ctx.getTemplateSpecializationType(
14292 Keyword: getCommonTypeKeyword(X: TX, Y: TY, /*IsSame=*/false),
14293 Template: ::getCommonTemplateNameChecked(Ctx, X: TX->getTemplateName(),
14294 Y: TY->getTemplateName(),
14295 /*IgnoreDeduced=*/true),
14296 SpecifiedArgs: As, /*CanonicalArgs=*/{}, Underlying: X->getCanonicalTypeInternal());
14297 }
14298 case Type::Decltype: {
14299 const auto *DX = cast<DecltypeType>(Val: X);
14300 [[maybe_unused]] const auto *DY = cast<DecltypeType>(Val: Y);
14301 assert(DX->isDependentType());
14302 assert(DY->isDependentType());
14303 assert(Ctx.hasSameExpr(DX->getUnderlyingExpr(), DY->getUnderlyingExpr()));
14304 // As Decltype is not uniqued, building a common type would be wasteful.
14305 return QualType(DX, 0);
14306 }
14307 case Type::PackIndexing: {
14308 const auto *DX = cast<PackIndexingType>(Val: X);
14309 [[maybe_unused]] const auto *DY = cast<PackIndexingType>(Val: Y);
14310 assert(DX->isDependentType());
14311 assert(DY->isDependentType());
14312 assert(Ctx.hasSameExpr(DX->getIndexExpr(), DY->getIndexExpr()));
14313 return QualType(DX, 0);
14314 }
14315 case Type::DependentName: {
14316 const auto *NX = cast<DependentNameType>(Val: X),
14317 *NY = cast<DependentNameType>(Val: Y);
14318 assert(NX->getIdentifier() == NY->getIdentifier());
14319 return Ctx.getDependentNameType(
14320 Keyword: getCommonTypeKeyword(X: NX, Y: NY, /*IsSame=*/true),
14321 NNS: getCommonQualifier(Ctx, X: NX, Y: NY, /*IsSame=*/true), Name: NX->getIdentifier());
14322 }
14323 case Type::UnaryTransform: {
14324 const auto *TX = cast<UnaryTransformType>(Val: X),
14325 *TY = cast<UnaryTransformType>(Val: Y);
14326 assert(TX->getUTTKind() == TY->getUTTKind());
14327 return Ctx.getUnaryTransformType(
14328 BaseType: Ctx.getCommonSugaredType(X: TX->getBaseType(), Y: TY->getBaseType()),
14329 UnderlyingType: Ctx.getCommonSugaredType(X: TX->getUnderlyingType(),
14330 Y: TY->getUnderlyingType()),
14331 Kind: TX->getUTTKind());
14332 }
14333 case Type::PackExpansion: {
14334 const auto *PX = cast<PackExpansionType>(Val: X),
14335 *PY = cast<PackExpansionType>(Val: Y);
14336 assert(PX->getNumExpansions() == PY->getNumExpansions());
14337 return Ctx.getPackExpansionType(
14338 Pattern: Ctx.getCommonSugaredType(X: PX->getPattern(), Y: PY->getPattern()),
14339 NumExpansions: PX->getNumExpansions(), ExpectPackInType: false);
14340 }
14341 case Type::Pipe: {
14342 const auto *PX = cast<PipeType>(Val: X), *PY = cast<PipeType>(Val: Y);
14343 assert(PX->isReadOnly() == PY->isReadOnly());
14344 auto MP = PX->isReadOnly() ? &ASTContext::getReadPipeType
14345 : &ASTContext::getWritePipeType;
14346 return (Ctx.*MP)(getCommonElementType(Ctx, X: PX, Y: PY));
14347 }
14348 case Type::TemplateTypeParm: {
14349 const auto *TX = cast<TemplateTypeParmType>(Val: X),
14350 *TY = cast<TemplateTypeParmType>(Val: Y);
14351 assert(TX->getDepth() == TY->getDepth());
14352 assert(TX->getIndex() == TY->getIndex());
14353 assert(TX->isParameterPack() == TY->isParameterPack());
14354 return Ctx.getTemplateTypeParmType(
14355 Depth: TX->getDepth(), Index: TX->getIndex(), ParameterPack: TX->isParameterPack(),
14356 TTPDecl: getCommonDecl(X: TX->getDecl(), Y: TY->getDecl()));
14357 }
14358 }
14359 llvm_unreachable("Unknown Type Class");
14360}
14361
14362static QualType getCommonSugarTypeNode(const ASTContext &Ctx, const Type *X,
14363 const Type *Y,
14364 SplitQualType Underlying) {
14365 Type::TypeClass TC = X->getTypeClass();
14366 if (TC != Y->getTypeClass())
14367 return QualType();
14368 switch (TC) {
14369#define UNEXPECTED_TYPE(Class, Kind) \
14370 case Type::Class: \
14371 llvm_unreachable("Unexpected " Kind ": " #Class);
14372#define TYPE(Class, Base)
14373#define DEPENDENT_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "dependent")
14374#include "clang/AST/TypeNodes.inc"
14375
14376#define CANONICAL_TYPE(Class) UNEXPECTED_TYPE(Class, "canonical")
14377 CANONICAL_TYPE(Atomic)
14378 CANONICAL_TYPE(BitInt)
14379 CANONICAL_TYPE(BlockPointer)
14380 CANONICAL_TYPE(Builtin)
14381 CANONICAL_TYPE(Complex)
14382 CANONICAL_TYPE(ConstantArray)
14383 CANONICAL_TYPE(ArrayParameter)
14384 CANONICAL_TYPE(ConstantMatrix)
14385 CANONICAL_TYPE(Enum)
14386 CANONICAL_TYPE(ExtVector)
14387 CANONICAL_TYPE(FunctionNoProto)
14388 CANONICAL_TYPE(FunctionProto)
14389 CANONICAL_TYPE(IncompleteArray)
14390 CANONICAL_TYPE(HLSLAttributedResource)
14391 CANONICAL_TYPE(HLSLInlineSpirv)
14392 CANONICAL_TYPE(LValueReference)
14393 CANONICAL_TYPE(ObjCInterface)
14394 CANONICAL_TYPE(ObjCObject)
14395 CANONICAL_TYPE(ObjCObjectPointer)
14396 CANONICAL_TYPE(Pipe)
14397 CANONICAL_TYPE(Pointer)
14398 CANONICAL_TYPE(Record)
14399 CANONICAL_TYPE(RValueReference)
14400 CANONICAL_TYPE(VariableArray)
14401 CANONICAL_TYPE(Vector)
14402#undef CANONICAL_TYPE
14403
14404#undef UNEXPECTED_TYPE
14405
14406 case Type::Adjusted: {
14407 const auto *AX = cast<AdjustedType>(Val: X), *AY = cast<AdjustedType>(Val: Y);
14408 QualType OX = AX->getOriginalType(), OY = AY->getOriginalType();
14409 if (!Ctx.hasSameType(T1: OX, T2: OY))
14410 return QualType();
14411 // FIXME: It's inefficient to have to unify the original types.
14412 return Ctx.getAdjustedType(Orig: Ctx.getCommonSugaredType(X: OX, Y: OY),
14413 New: Ctx.getQualifiedType(split: Underlying));
14414 }
14415 case Type::Decayed: {
14416 const auto *DX = cast<DecayedType>(Val: X), *DY = cast<DecayedType>(Val: Y);
14417 QualType OX = DX->getOriginalType(), OY = DY->getOriginalType();
14418 if (!Ctx.hasSameType(T1: OX, T2: OY))
14419 return QualType();
14420 // FIXME: It's inefficient to have to unify the original types.
14421 return Ctx.getDecayedType(Orig: Ctx.getCommonSugaredType(X: OX, Y: OY),
14422 Decayed: Ctx.getQualifiedType(split: Underlying));
14423 }
14424 case Type::Attributed: {
14425 const auto *AX = cast<AttributedType>(Val: X), *AY = cast<AttributedType>(Val: Y);
14426 AttributedType::Kind Kind = AX->getAttrKind();
14427 if (Kind != AY->getAttrKind())
14428 return QualType();
14429 QualType MX = AX->getModifiedType(), MY = AY->getModifiedType();
14430 if (!Ctx.hasSameType(T1: MX, T2: MY))
14431 return QualType();
14432 // FIXME: It's inefficient to have to unify the modified types.
14433 return Ctx.getAttributedType(attrKind: Kind, modifiedType: Ctx.getCommonSugaredType(X: MX, Y: MY),
14434 equivalentType: Ctx.getQualifiedType(split: Underlying),
14435 attr: AX->getAttr());
14436 }
14437 case Type::BTFTagAttributed: {
14438 const auto *BX = cast<BTFTagAttributedType>(Val: X);
14439 const BTFTypeTagAttr *AX = BX->getAttr();
14440 // The attribute is not uniqued, so just compare the tag.
14441 if (AX->getBTFTypeTag() !=
14442 cast<BTFTagAttributedType>(Val: Y)->getAttr()->getBTFTypeTag())
14443 return QualType();
14444 return Ctx.getBTFTagAttributedType(BTFAttr: AX, Wrapped: Ctx.getQualifiedType(split: Underlying));
14445 }
14446 case Type::Auto: {
14447 const auto *AX = cast<AutoType>(Val: X), *AY = cast<AutoType>(Val: Y);
14448
14449 AutoTypeKeyword KW = AX->getKeyword();
14450 if (KW != AY->getKeyword())
14451 return QualType();
14452
14453 TemplateDecl *CD = ::getCommonDecl(X: AX->getTypeConstraintConcept(),
14454 Y: AY->getTypeConstraintConcept());
14455 SmallVector<TemplateArgument, 8> As;
14456 if (CD &&
14457 getCommonTemplateArguments(Ctx, R&: As, Xs: AX->getTypeConstraintArguments(),
14458 Ys: AY->getTypeConstraintArguments())) {
14459 CD = nullptr; // The arguments differ, so make it unconstrained.
14460 As.clear();
14461 }
14462
14463 // Both auto types can't be dependent, otherwise they wouldn't have been
14464 // sugar. This implies they can't contain unexpanded packs either.
14465 return Ctx.getAutoType(DeducedType: Ctx.getQualifiedType(split: Underlying), Keyword: AX->getKeyword(),
14466 /*IsDependent=*/false, /*IsPack=*/false, TypeConstraintConcept: CD, TypeConstraintArgs: As);
14467 }
14468 case Type::PackIndexing:
14469 case Type::Decltype:
14470 return QualType();
14471 case Type::DeducedTemplateSpecialization:
14472 // FIXME: Try to merge these.
14473 return QualType();
14474 case Type::MacroQualified: {
14475 const auto *MX = cast<MacroQualifiedType>(Val: X),
14476 *MY = cast<MacroQualifiedType>(Val: Y);
14477 const IdentifierInfo *IX = MX->getMacroIdentifier();
14478 if (IX != MY->getMacroIdentifier())
14479 return QualType();
14480 return Ctx.getMacroQualifiedType(UnderlyingTy: Ctx.getQualifiedType(split: Underlying), MacroII: IX);
14481 }
14482 case Type::SubstTemplateTypeParm: {
14483 const auto *SX = cast<SubstTemplateTypeParmType>(Val: X),
14484 *SY = cast<SubstTemplateTypeParmType>(Val: Y);
14485 Decl *CD =
14486 ::getCommonDecl(X: SX->getAssociatedDecl(), Y: SY->getAssociatedDecl());
14487 if (!CD)
14488 return QualType();
14489 unsigned Index = SX->getIndex();
14490 if (Index != SY->getIndex())
14491 return QualType();
14492 auto PackIndex = SX->getPackIndex();
14493 if (PackIndex != SY->getPackIndex())
14494 return QualType();
14495 return Ctx.getSubstTemplateTypeParmType(Replacement: Ctx.getQualifiedType(split: Underlying),
14496 AssociatedDecl: CD, Index, PackIndex,
14497 Final: SX->getFinal() && SY->getFinal());
14498 }
14499 case Type::ObjCTypeParam:
14500 // FIXME: Try to merge these.
14501 return QualType();
14502 case Type::Paren:
14503 return Ctx.getParenType(InnerType: Ctx.getQualifiedType(split: Underlying));
14504
14505 case Type::TemplateSpecialization: {
14506 const auto *TX = cast<TemplateSpecializationType>(Val: X),
14507 *TY = cast<TemplateSpecializationType>(Val: Y);
14508 TemplateName CTN =
14509 ::getCommonTemplateName(Ctx, X: TX->getTemplateName(),
14510 Y: TY->getTemplateName(), /*IgnoreDeduced=*/true);
14511 if (!CTN.getAsVoidPointer())
14512 return QualType();
14513 SmallVector<TemplateArgument, 8> As;
14514 if (getCommonTemplateArguments(Ctx, R&: As, Xs: TX->template_arguments(),
14515 Ys: TY->template_arguments()))
14516 return QualType();
14517 return Ctx.getTemplateSpecializationType(
14518 Keyword: getCommonTypeKeyword(X: TX, Y: TY, /*IsSame=*/false), Template: CTN, SpecifiedArgs: As,
14519 /*CanonicalArgs=*/{}, Underlying: Ctx.getQualifiedType(split: Underlying));
14520 }
14521 case Type::Typedef: {
14522 const auto *TX = cast<TypedefType>(Val: X), *TY = cast<TypedefType>(Val: Y);
14523 const TypedefNameDecl *CD = ::getCommonDecl(X: TX->getDecl(), Y: TY->getDecl());
14524 if (!CD)
14525 return QualType();
14526 return Ctx.getTypedefType(
14527 Keyword: ::getCommonTypeKeyword(X: TX, Y: TY, /*IsSame=*/false),
14528 Qualifier: ::getCommonQualifier(Ctx, X: TX, Y: TY, /*IsSame=*/false), Decl: CD,
14529 UnderlyingType: Ctx.getQualifiedType(split: Underlying));
14530 }
14531 case Type::TypeOf: {
14532 // The common sugar between two typeof expressions, where one is
14533 // potentially a typeof_unqual and the other is not, we unify to the
14534 // qualified type as that retains the most information along with the type.
14535 // We only return a typeof_unqual type when both types are unqual types.
14536 TypeOfKind Kind = TypeOfKind::Qualified;
14537 if (cast<TypeOfType>(Val: X)->getKind() == cast<TypeOfType>(Val: Y)->getKind() &&
14538 cast<TypeOfType>(Val: X)->getKind() == TypeOfKind::Unqualified)
14539 Kind = TypeOfKind::Unqualified;
14540 return Ctx.getTypeOfType(tofType: Ctx.getQualifiedType(split: Underlying), Kind);
14541 }
14542 case Type::TypeOfExpr:
14543 return QualType();
14544
14545 case Type::UnaryTransform: {
14546 const auto *UX = cast<UnaryTransformType>(Val: X),
14547 *UY = cast<UnaryTransformType>(Val: Y);
14548 UnaryTransformType::UTTKind KX = UX->getUTTKind();
14549 if (KX != UY->getUTTKind())
14550 return QualType();
14551 QualType BX = UX->getBaseType(), BY = UY->getBaseType();
14552 if (!Ctx.hasSameType(T1: BX, T2: BY))
14553 return QualType();
14554 // FIXME: It's inefficient to have to unify the base types.
14555 return Ctx.getUnaryTransformType(BaseType: Ctx.getCommonSugaredType(X: BX, Y: BY),
14556 UnderlyingType: Ctx.getQualifiedType(split: Underlying), Kind: KX);
14557 }
14558 case Type::Using: {
14559 const auto *UX = cast<UsingType>(Val: X), *UY = cast<UsingType>(Val: Y);
14560 const UsingShadowDecl *CD = ::getCommonDecl(X: UX->getDecl(), Y: UY->getDecl());
14561 if (!CD)
14562 return QualType();
14563 return Ctx.getUsingType(Keyword: ::getCommonTypeKeyword(X: UX, Y: UY, /*IsSame=*/false),
14564 Qualifier: ::getCommonQualifier(Ctx, X: UX, Y: UY, /*IsSame=*/false),
14565 D: CD, UnderlyingType: Ctx.getQualifiedType(split: Underlying));
14566 }
14567 case Type::MemberPointer: {
14568 const auto *PX = cast<MemberPointerType>(Val: X),
14569 *PY = cast<MemberPointerType>(Val: Y);
14570 CXXRecordDecl *Cls = PX->getMostRecentCXXRecordDecl();
14571 assert(Cls == PY->getMostRecentCXXRecordDecl());
14572 return Ctx.getMemberPointerType(
14573 T: ::getCommonPointeeType(Ctx, X: PX, Y: PY),
14574 Qualifier: ::getCommonQualifier(Ctx, X: PX, Y: PY, /*IsSame=*/false), Cls);
14575 }
14576 case Type::CountAttributed: {
14577 const auto *DX = cast<CountAttributedType>(Val: X),
14578 *DY = cast<CountAttributedType>(Val: Y);
14579 if (DX->isCountInBytes() != DY->isCountInBytes())
14580 return QualType();
14581 if (DX->isOrNull() != DY->isOrNull())
14582 return QualType();
14583 Expr *CEX = DX->getCountExpr();
14584 Expr *CEY = DY->getCountExpr();
14585 ArrayRef<clang::TypeCoupledDeclRefInfo> CDX = DX->getCoupledDecls();
14586 if (Ctx.hasSameExpr(X: CEX, Y: CEY))
14587 return Ctx.getCountAttributedType(WrappedTy: Ctx.getQualifiedType(split: Underlying), CountExpr: CEX,
14588 CountInBytes: DX->isCountInBytes(), OrNull: DX->isOrNull(),
14589 DependentDecls: CDX);
14590 if (!CEX->isIntegerConstantExpr(Ctx) || !CEY->isIntegerConstantExpr(Ctx))
14591 return QualType();
14592 // Two declarations with the same integer constant may still differ in their
14593 // expression pointers, so we need to evaluate them.
14594 llvm::APSInt VX = *CEX->getIntegerConstantExpr(Ctx);
14595 llvm::APSInt VY = *CEY->getIntegerConstantExpr(Ctx);
14596 if (VX != VY)
14597 return QualType();
14598 return Ctx.getCountAttributedType(WrappedTy: Ctx.getQualifiedType(split: Underlying), CountExpr: CEX,
14599 CountInBytes: DX->isCountInBytes(), OrNull: DX->isOrNull(),
14600 DependentDecls: CDX);
14601 }
14602 case Type::PredefinedSugar:
14603 assert(cast<PredefinedSugarType>(X)->getKind() !=
14604 cast<PredefinedSugarType>(Y)->getKind());
14605 return QualType();
14606 }
14607 llvm_unreachable("Unhandled Type Class");
14608}
14609
14610static auto unwrapSugar(SplitQualType &T, Qualifiers &QTotal) {
14611 SmallVector<SplitQualType, 8> R;
14612 while (true) {
14613 QTotal.addConsistentQualifiers(qs: T.Quals);
14614 QualType NT = T.Ty->getLocallyUnqualifiedSingleStepDesugaredType();
14615 if (NT == QualType(T.Ty, 0))
14616 break;
14617 R.push_back(Elt: T);
14618 T = NT.split();
14619 }
14620 return R;
14621}
14622
14623QualType ASTContext::getCommonSugaredType(QualType X, QualType Y,
14624 bool Unqualified) const {
14625 assert(Unqualified ? hasSameUnqualifiedType(X, Y) : hasSameType(X, Y));
14626 if (X == Y)
14627 return X;
14628 if (!Unqualified) {
14629 if (X.isCanonical())
14630 return X;
14631 if (Y.isCanonical())
14632 return Y;
14633 }
14634
14635 SplitQualType SX = X.split(), SY = Y.split();
14636 Qualifiers QX, QY;
14637 // Desugar SX and SY, setting the sugar and qualifiers aside into Xs and Ys,
14638 // until we reach their underlying "canonical nodes". Note these are not
14639 // necessarily canonical types, as they may still have sugared properties.
14640 // QX and QY will store the sum of all qualifiers in Xs and Ys respectively.
14641 auto Xs = ::unwrapSugar(T&: SX, QTotal&: QX), Ys = ::unwrapSugar(T&: SY, QTotal&: QY);
14642
14643 // If this is an ArrayType, the element qualifiers are interchangeable with
14644 // the top level qualifiers.
14645 // * In case the canonical nodes are the same, the elements types are already
14646 // the same.
14647 // * Otherwise, the element types will be made the same, and any different
14648 // element qualifiers will be moved up to the top level qualifiers, per
14649 // 'getCommonArrayElementType'.
14650 // In both cases, this means there may be top level qualifiers which differ
14651 // between X and Y. If so, these differing qualifiers are redundant with the
14652 // element qualifiers, and can be removed without changing the canonical type.
14653 // The desired behaviour is the same as for the 'Unqualified' case here:
14654 // treat the redundant qualifiers as sugar, remove the ones which are not
14655 // common to both sides.
14656 bool KeepCommonQualifiers = Unqualified || isa<ArrayType>(Val: SX.Ty);
14657
14658 if (SX.Ty != SY.Ty) {
14659 // The canonical nodes differ. Build a common canonical node out of the two,
14660 // unifying their sugar. This may recurse back here.
14661 SX.Ty =
14662 ::getCommonNonSugarTypeNode(Ctx: *this, X: SX.Ty, QX, Y: SY.Ty, QY).getTypePtr();
14663 } else {
14664 // The canonical nodes were identical: We may have desugared too much.
14665 // Add any common sugar back in.
14666 while (!Xs.empty() && !Ys.empty() && Xs.back().Ty == Ys.back().Ty) {
14667 QX -= SX.Quals;
14668 QY -= SY.Quals;
14669 SX = Xs.pop_back_val();
14670 SY = Ys.pop_back_val();
14671 }
14672 }
14673 if (KeepCommonQualifiers)
14674 QX = Qualifiers::removeCommonQualifiers(L&: QX, R&: QY);
14675 else
14676 assert(QX == QY);
14677
14678 // Even though the remaining sugar nodes in Xs and Ys differ, some may be
14679 // related. Walk up these nodes, unifying them and adding the result.
14680 while (!Xs.empty() && !Ys.empty()) {
14681 auto Underlying = SplitQualType(
14682 SX.Ty, Qualifiers::removeCommonQualifiers(L&: SX.Quals, R&: SY.Quals));
14683 SX = Xs.pop_back_val();
14684 SY = Ys.pop_back_val();
14685 SX.Ty = ::getCommonSugarTypeNode(Ctx: *this, X: SX.Ty, Y: SY.Ty, Underlying)
14686 .getTypePtrOrNull();
14687 // Stop at the first pair which is unrelated.
14688 if (!SX.Ty) {
14689 SX.Ty = Underlying.Ty;
14690 break;
14691 }
14692 QX -= Underlying.Quals;
14693 };
14694
14695 // Add back the missing accumulated qualifiers, which were stripped off
14696 // with the sugar nodes we could not unify.
14697 QualType R = getQualifiedType(T: SX.Ty, Qs: QX);
14698 assert(Unqualified ? hasSameUnqualifiedType(R, X) : hasSameType(R, X));
14699 return R;
14700}
14701
14702QualType ASTContext::getCorrespondingUnsaturatedType(QualType Ty) const {
14703 assert(Ty->isFixedPointType());
14704
14705 if (Ty->isUnsaturatedFixedPointType())
14706 return Ty;
14707
14708 switch (Ty->castAs<BuiltinType>()->getKind()) {
14709 default:
14710 llvm_unreachable("Not a saturated fixed point type!");
14711 case BuiltinType::SatShortAccum:
14712 return ShortAccumTy;
14713 case BuiltinType::SatAccum:
14714 return AccumTy;
14715 case BuiltinType::SatLongAccum:
14716 return LongAccumTy;
14717 case BuiltinType::SatUShortAccum:
14718 return UnsignedShortAccumTy;
14719 case BuiltinType::SatUAccum:
14720 return UnsignedAccumTy;
14721 case BuiltinType::SatULongAccum:
14722 return UnsignedLongAccumTy;
14723 case BuiltinType::SatShortFract:
14724 return ShortFractTy;
14725 case BuiltinType::SatFract:
14726 return FractTy;
14727 case BuiltinType::SatLongFract:
14728 return LongFractTy;
14729 case BuiltinType::SatUShortFract:
14730 return UnsignedShortFractTy;
14731 case BuiltinType::SatUFract:
14732 return UnsignedFractTy;
14733 case BuiltinType::SatULongFract:
14734 return UnsignedLongFractTy;
14735 }
14736}
14737
14738QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const {
14739 assert(Ty->isFixedPointType());
14740
14741 if (Ty->isSaturatedFixedPointType()) return Ty;
14742
14743 switch (Ty->castAs<BuiltinType>()->getKind()) {
14744 default:
14745 llvm_unreachable("Not a fixed point type!");
14746 case BuiltinType::ShortAccum:
14747 return SatShortAccumTy;
14748 case BuiltinType::Accum:
14749 return SatAccumTy;
14750 case BuiltinType::LongAccum:
14751 return SatLongAccumTy;
14752 case BuiltinType::UShortAccum:
14753 return SatUnsignedShortAccumTy;
14754 case BuiltinType::UAccum:
14755 return SatUnsignedAccumTy;
14756 case BuiltinType::ULongAccum:
14757 return SatUnsignedLongAccumTy;
14758 case BuiltinType::ShortFract:
14759 return SatShortFractTy;
14760 case BuiltinType::Fract:
14761 return SatFractTy;
14762 case BuiltinType::LongFract:
14763 return SatLongFractTy;
14764 case BuiltinType::UShortFract:
14765 return SatUnsignedShortFractTy;
14766 case BuiltinType::UFract:
14767 return SatUnsignedFractTy;
14768 case BuiltinType::ULongFract:
14769 return SatUnsignedLongFractTy;
14770 }
14771}
14772
14773LangAS ASTContext::getLangASForBuiltinAddressSpace(unsigned AS) const {
14774 if (LangOpts.OpenCL)
14775 return getTargetInfo().getOpenCLBuiltinAddressSpace(AS);
14776
14777 if (LangOpts.CUDA)
14778 return getTargetInfo().getCUDABuiltinAddressSpace(AS);
14779
14780 return getLangASFromTargetAS(TargetAS: AS);
14781}
14782
14783// Explicitly instantiate this in case a Redeclarable<T> is used from a TU that
14784// doesn't include ASTContext.h
14785template
14786clang::LazyGenerationalUpdatePtr<
14787 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::ValueType
14788clang::LazyGenerationalUpdatePtr<
14789 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue(
14790 const clang::ASTContext &Ctx, Decl *Value);
14791
14792unsigned char ASTContext::getFixedPointScale(QualType Ty) const {
14793 assert(Ty->isFixedPointType());
14794
14795 const TargetInfo &Target = getTargetInfo();
14796 switch (Ty->castAs<BuiltinType>()->getKind()) {
14797 default:
14798 llvm_unreachable("Not a fixed point type!");
14799 case BuiltinType::ShortAccum:
14800 case BuiltinType::SatShortAccum:
14801 return Target.getShortAccumScale();
14802 case BuiltinType::Accum:
14803 case BuiltinType::SatAccum:
14804 return Target.getAccumScale();
14805 case BuiltinType::LongAccum:
14806 case BuiltinType::SatLongAccum:
14807 return Target.getLongAccumScale();
14808 case BuiltinType::UShortAccum:
14809 case BuiltinType::SatUShortAccum:
14810 return Target.getUnsignedShortAccumScale();
14811 case BuiltinType::UAccum:
14812 case BuiltinType::SatUAccum:
14813 return Target.getUnsignedAccumScale();
14814 case BuiltinType::ULongAccum:
14815 case BuiltinType::SatULongAccum:
14816 return Target.getUnsignedLongAccumScale();
14817 case BuiltinType::ShortFract:
14818 case BuiltinType::SatShortFract:
14819 return Target.getShortFractScale();
14820 case BuiltinType::Fract:
14821 case BuiltinType::SatFract:
14822 return Target.getFractScale();
14823 case BuiltinType::LongFract:
14824 case BuiltinType::SatLongFract:
14825 return Target.getLongFractScale();
14826 case BuiltinType::UShortFract:
14827 case BuiltinType::SatUShortFract:
14828 return Target.getUnsignedShortFractScale();
14829 case BuiltinType::UFract:
14830 case BuiltinType::SatUFract:
14831 return Target.getUnsignedFractScale();
14832 case BuiltinType::ULongFract:
14833 case BuiltinType::SatULongFract:
14834 return Target.getUnsignedLongFractScale();
14835 }
14836}
14837
14838unsigned char ASTContext::getFixedPointIBits(QualType Ty) const {
14839 assert(Ty->isFixedPointType());
14840
14841 const TargetInfo &Target = getTargetInfo();
14842 switch (Ty->castAs<BuiltinType>()->getKind()) {
14843 default:
14844 llvm_unreachable("Not a fixed point type!");
14845 case BuiltinType::ShortAccum:
14846 case BuiltinType::SatShortAccum:
14847 return Target.getShortAccumIBits();
14848 case BuiltinType::Accum:
14849 case BuiltinType::SatAccum:
14850 return Target.getAccumIBits();
14851 case BuiltinType::LongAccum:
14852 case BuiltinType::SatLongAccum:
14853 return Target.getLongAccumIBits();
14854 case BuiltinType::UShortAccum:
14855 case BuiltinType::SatUShortAccum:
14856 return Target.getUnsignedShortAccumIBits();
14857 case BuiltinType::UAccum:
14858 case BuiltinType::SatUAccum:
14859 return Target.getUnsignedAccumIBits();
14860 case BuiltinType::ULongAccum:
14861 case BuiltinType::SatULongAccum:
14862 return Target.getUnsignedLongAccumIBits();
14863 case BuiltinType::ShortFract:
14864 case BuiltinType::SatShortFract:
14865 case BuiltinType::Fract:
14866 case BuiltinType::SatFract:
14867 case BuiltinType::LongFract:
14868 case BuiltinType::SatLongFract:
14869 case BuiltinType::UShortFract:
14870 case BuiltinType::SatUShortFract:
14871 case BuiltinType::UFract:
14872 case BuiltinType::SatUFract:
14873 case BuiltinType::ULongFract:
14874 case BuiltinType::SatULongFract:
14875 return 0;
14876 }
14877}
14878
14879llvm::FixedPointSemantics
14880ASTContext::getFixedPointSemantics(QualType Ty) const {
14881 assert((Ty->isFixedPointType() || Ty->isIntegerType()) &&
14882 "Can only get the fixed point semantics for a "
14883 "fixed point or integer type.");
14884 if (Ty->isIntegerType())
14885 return llvm::FixedPointSemantics::GetIntegerSemantics(
14886 Width: getIntWidth(T: Ty), IsSigned: Ty->isSignedIntegerType());
14887
14888 bool isSigned = Ty->isSignedFixedPointType();
14889 return llvm::FixedPointSemantics(
14890 static_cast<unsigned>(getTypeSize(T: Ty)), getFixedPointScale(Ty), isSigned,
14891 Ty->isSaturatedFixedPointType(),
14892 !isSigned && getTargetInfo().doUnsignedFixedPointTypesHavePadding());
14893}
14894
14895llvm::APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const {
14896 assert(Ty->isFixedPointType());
14897 return llvm::APFixedPoint::getMax(Sema: getFixedPointSemantics(Ty));
14898}
14899
14900llvm::APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const {
14901 assert(Ty->isFixedPointType());
14902 return llvm::APFixedPoint::getMin(Sema: getFixedPointSemantics(Ty));
14903}
14904
14905QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const {
14906 assert(Ty->isUnsignedFixedPointType() &&
14907 "Expected unsigned fixed point type");
14908
14909 switch (Ty->castAs<BuiltinType>()->getKind()) {
14910 case BuiltinType::UShortAccum:
14911 return ShortAccumTy;
14912 case BuiltinType::UAccum:
14913 return AccumTy;
14914 case BuiltinType::ULongAccum:
14915 return LongAccumTy;
14916 case BuiltinType::SatUShortAccum:
14917 return SatShortAccumTy;
14918 case BuiltinType::SatUAccum:
14919 return SatAccumTy;
14920 case BuiltinType::SatULongAccum:
14921 return SatLongAccumTy;
14922 case BuiltinType::UShortFract:
14923 return ShortFractTy;
14924 case BuiltinType::UFract:
14925 return FractTy;
14926 case BuiltinType::ULongFract:
14927 return LongFractTy;
14928 case BuiltinType::SatUShortFract:
14929 return SatShortFractTy;
14930 case BuiltinType::SatUFract:
14931 return SatFractTy;
14932 case BuiltinType::SatULongFract:
14933 return SatLongFractTy;
14934 default:
14935 llvm_unreachable("Unexpected unsigned fixed point type");
14936 }
14937}
14938
14939// Given a list of FMV features, return a concatenated list of the
14940// corresponding backend features (which may contain duplicates).
14941static std::vector<std::string> getFMVBackendFeaturesFor(
14942 const llvm::SmallVectorImpl<StringRef> &FMVFeatStrings) {
14943 std::vector<std::string> BackendFeats;
14944 llvm::AArch64::ExtensionSet FeatureBits;
14945 for (StringRef F : FMVFeatStrings)
14946 if (auto FMVExt = llvm::AArch64::parseFMVExtension(Extension: F))
14947 if (FMVExt->ID)
14948 FeatureBits.enable(E: *FMVExt->ID);
14949 FeatureBits.toLLVMFeatureList(Features&: BackendFeats);
14950 return BackendFeats;
14951}
14952
14953ParsedTargetAttr
14954ASTContext::filterFunctionTargetAttrs(const TargetAttr *TD) const {
14955 assert(TD != nullptr);
14956 ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(Str: TD->getFeaturesStr());
14957
14958 llvm::erase_if(C&: ParsedAttr.Features, P: [&](const std::string &Feat) {
14959 return !Target->isValidFeatureName(Feature: StringRef{Feat}.substr(Start: 1));
14960 });
14961 return ParsedAttr;
14962}
14963
14964void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
14965 const FunctionDecl *FD) const {
14966 if (FD)
14967 getFunctionFeatureMap(FeatureMap, GD: GlobalDecl().getWithDecl(D: FD));
14968 else
14969 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(),
14970 CPU: Target->getTargetOpts().CPU,
14971 FeatureVec: Target->getTargetOpts().Features);
14972}
14973
14974// Fills in the supplied string map with the set of target features for the
14975// passed in function.
14976void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
14977 GlobalDecl GD) const {
14978 StringRef TargetCPU = Target->getTargetOpts().CPU;
14979 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
14980 if (const auto *TD = FD->getAttr<TargetAttr>()) {
14981 ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD);
14982
14983 // Make a copy of the features as passed on the command line into the
14984 // beginning of the additional features from the function to override.
14985 // AArch64 handles command line option features in parseTargetAttr().
14986 if (!Target->getTriple().isAArch64())
14987 ParsedAttr.Features.insert(
14988 position: ParsedAttr.Features.begin(),
14989 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
14990 last: Target->getTargetOpts().FeaturesAsWritten.end());
14991
14992 if (ParsedAttr.CPU != "" && Target->isValidCPUName(Name: ParsedAttr.CPU))
14993 TargetCPU = ParsedAttr.CPU;
14994
14995 // Now populate the feature map, first with the TargetCPU which is either
14996 // the default or a new one from the target attribute string. Then we'll use
14997 // the passed in features (FeaturesAsWritten) along with the new ones from
14998 // the attribute.
14999 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU,
15000 FeatureVec: ParsedAttr.Features);
15001 } else if (const auto *SD = FD->getAttr<CPUSpecificAttr>()) {
15002 llvm::SmallVector<StringRef, 32> FeaturesTmp;
15003 Target->getCPUSpecificCPUDispatchFeatures(
15004 Name: SD->getCPUName(Index: GD.getMultiVersionIndex())->getName(), Features&: FeaturesTmp);
15005 std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end());
15006 Features.insert(position: Features.begin(),
15007 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
15008 last: Target->getTargetOpts().FeaturesAsWritten.end());
15009 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
15010 } else if (const auto *TC = FD->getAttr<TargetClonesAttr>()) {
15011 if (Target->getTriple().isAArch64()) {
15012 llvm::SmallVector<StringRef, 8> Feats;
15013 TC->getFeatures(Out&: Feats, Index: GD.getMultiVersionIndex());
15014 std::vector<std::string> Features = getFMVBackendFeaturesFor(FMVFeatStrings: Feats);
15015 Features.insert(position: Features.begin(),
15016 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
15017 last: Target->getTargetOpts().FeaturesAsWritten.end());
15018 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
15019 } else if (Target->getTriple().isRISCV()) {
15020 StringRef VersionStr = TC->getFeatureStr(Index: GD.getMultiVersionIndex());
15021 std::vector<std::string> Features;
15022 if (VersionStr != "default") {
15023 ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(Str: VersionStr);
15024 Features.insert(position: Features.begin(), first: ParsedAttr.Features.begin(),
15025 last: ParsedAttr.Features.end());
15026 }
15027 Features.insert(position: Features.begin(),
15028 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
15029 last: Target->getTargetOpts().FeaturesAsWritten.end());
15030 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
15031 } else {
15032 std::vector<std::string> Features;
15033 StringRef VersionStr = TC->getFeatureStr(Index: GD.getMultiVersionIndex());
15034 if (VersionStr.starts_with(Prefix: "arch="))
15035 TargetCPU = VersionStr.drop_front(N: sizeof("arch=") - 1);
15036 else if (VersionStr != "default")
15037 Features.push_back(x: (StringRef{"+"} + VersionStr).str());
15038 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
15039 }
15040 } else if (const auto *TV = FD->getAttr<TargetVersionAttr>()) {
15041 std::vector<std::string> Features;
15042 if (Target->getTriple().isRISCV()) {
15043 ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(Str: TV->getName());
15044 Features.insert(position: Features.begin(), first: ParsedAttr.Features.begin(),
15045 last: ParsedAttr.Features.end());
15046 } else {
15047 assert(Target->getTriple().isAArch64());
15048 llvm::SmallVector<StringRef, 8> Feats;
15049 TV->getFeatures(Out&: Feats);
15050 Features = getFMVBackendFeaturesFor(FMVFeatStrings: Feats);
15051 }
15052 Features.insert(position: Features.begin(),
15053 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
15054 last: Target->getTargetOpts().FeaturesAsWritten.end());
15055 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
15056 } else {
15057 FeatureMap = Target->getTargetOpts().FeatureMap;
15058 }
15059}
15060
15061static SYCLKernelInfo BuildSYCLKernelInfo(ASTContext &Context,
15062 CanQualType KernelNameType,
15063 const FunctionDecl *FD) {
15064 // Host and device compilation may use different ABIs and different ABIs
15065 // may allocate name mangling discriminators differently. A discriminator
15066 // override is used to ensure consistent discriminator allocation across
15067 // host and device compilation.
15068 auto DeviceDiscriminatorOverrider =
15069 [](ASTContext &Ctx, const NamedDecl *ND) -> UnsignedOrNone {
15070 if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: ND))
15071 if (RD->isLambda())
15072 return RD->getDeviceLambdaManglingNumber();
15073 return std::nullopt;
15074 };
15075 std::unique_ptr<MangleContext> MC{ItaniumMangleContext::create(
15076 Context, Diags&: Context.getDiagnostics(), Discriminator: DeviceDiscriminatorOverrider)};
15077
15078 // Construct a mangled name for the SYCL kernel caller offload entry point.
15079 // FIXME: The Itanium typeinfo mangling (_ZTS<type>) is currently used to
15080 // name the SYCL kernel caller offload entry point function. This mangling
15081 // does not suffice to clearly identify symbols that correspond to SYCL
15082 // kernel caller functions, nor is this mangling natural for targets that
15083 // use a non-Itanium ABI.
15084 std::string Buffer;
15085 Buffer.reserve(res_arg: 128);
15086 llvm::raw_string_ostream Out(Buffer);
15087 MC->mangleCanonicalTypeName(T: KernelNameType, Out);
15088 std::string KernelName = Out.str();
15089
15090 return {KernelNameType, FD, KernelName};
15091}
15092
15093void ASTContext::registerSYCLEntryPointFunction(FunctionDecl *FD) {
15094 // If the function declaration to register is invalid or dependent, the
15095 // registration attempt is ignored.
15096 if (FD->isInvalidDecl() || FD->isTemplated())
15097 return;
15098
15099 const auto *SKEPAttr = FD->getAttr<SYCLKernelEntryPointAttr>();
15100 assert(SKEPAttr && "Missing sycl_kernel_entry_point attribute");
15101
15102 // Be tolerant of multiple registration attempts so long as each attempt
15103 // is for the same entity. Callers are obligated to detect and diagnose
15104 // conflicting kernel names prior to calling this function.
15105 CanQualType KernelNameType = getCanonicalType(T: SKEPAttr->getKernelName());
15106 auto IT = SYCLKernels.find(Val: KernelNameType);
15107 assert((IT == SYCLKernels.end() ||
15108 declaresSameEntity(FD, IT->second.getKernelEntryPointDecl())) &&
15109 "SYCL kernel name conflict");
15110 (void)IT;
15111 SYCLKernels.insert(KV: std::make_pair(
15112 x&: KernelNameType, y: BuildSYCLKernelInfo(Context&: *this, KernelNameType, FD)));
15113}
15114
15115const SYCLKernelInfo &ASTContext::getSYCLKernelInfo(QualType T) const {
15116 CanQualType KernelNameType = getCanonicalType(T);
15117 return SYCLKernels.at(Val: KernelNameType);
15118}
15119
15120const SYCLKernelInfo *ASTContext::findSYCLKernelInfo(QualType T) const {
15121 CanQualType KernelNameType = getCanonicalType(T);
15122 auto IT = SYCLKernels.find(Val: KernelNameType);
15123 if (IT != SYCLKernels.end())
15124 return &IT->second;
15125 return nullptr;
15126}
15127
15128OMPTraitInfo &ASTContext::getNewOMPTraitInfo() {
15129 OMPTraitInfoVector.emplace_back(Args: new OMPTraitInfo());
15130 return *OMPTraitInfoVector.back();
15131}
15132
15133const StreamingDiagnostic &clang::
15134operator<<(const StreamingDiagnostic &DB,
15135 const ASTContext::SectionInfo &Section) {
15136 if (Section.Decl)
15137 return DB << Section.Decl;
15138 return DB << "a prior #pragma section";
15139}
15140
15141bool ASTContext::mayExternalize(const Decl *D) const {
15142 bool IsInternalVar =
15143 isa<VarDecl>(Val: D) &&
15144 basicGVALinkageForVariable(Context: *this, VD: cast<VarDecl>(Val: D)) == GVA_Internal;
15145 bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() &&
15146 !D->getAttr<CUDADeviceAttr>()->isImplicit()) ||
15147 (D->hasAttr<CUDAConstantAttr>() &&
15148 !D->getAttr<CUDAConstantAttr>()->isImplicit());
15149 // CUDA/HIP: managed variables need to be externalized since it is
15150 // a declaration in IR, therefore cannot have internal linkage. Kernels in
15151 // anonymous name space needs to be externalized to avoid duplicate symbols.
15152 return (IsInternalVar &&
15153 (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar)) ||
15154 (D->hasAttr<CUDAGlobalAttr>() &&
15155 basicGVALinkageForFunction(Context: *this, FD: cast<FunctionDecl>(Val: D)) ==
15156 GVA_Internal);
15157}
15158
15159bool ASTContext::shouldExternalize(const Decl *D) const {
15160 return mayExternalize(D) &&
15161 (D->hasAttr<HIPManagedAttr>() || D->hasAttr<CUDAGlobalAttr>() ||
15162 CUDADeviceVarODRUsedByHost.count(V: cast<VarDecl>(Val: D)));
15163}
15164
15165StringRef ASTContext::getCUIDHash() const {
15166 if (!CUIDHash.empty())
15167 return CUIDHash;
15168 if (LangOpts.CUID.empty())
15169 return StringRef();
15170 CUIDHash = llvm::utohexstr(X: llvm::MD5Hash(Str: LangOpts.CUID), /*LowerCase=*/true);
15171 return CUIDHash;
15172}
15173
15174const CXXRecordDecl *
15175ASTContext::baseForVTableAuthentication(const CXXRecordDecl *ThisClass) const {
15176 assert(ThisClass);
15177 assert(ThisClass->isPolymorphic());
15178 const CXXRecordDecl *PrimaryBase = ThisClass;
15179 while (1) {
15180 assert(PrimaryBase);
15181 assert(PrimaryBase->isPolymorphic());
15182 auto &Layout = getASTRecordLayout(D: PrimaryBase);
15183 auto Base = Layout.getPrimaryBase();
15184 if (!Base || Base == PrimaryBase || !Base->isPolymorphic())
15185 break;
15186 PrimaryBase = Base;
15187 }
15188 return PrimaryBase;
15189}
15190
15191bool ASTContext::useAbbreviatedThunkName(GlobalDecl VirtualMethodDecl,
15192 StringRef MangledName) {
15193 auto *Method = cast<CXXMethodDecl>(Val: VirtualMethodDecl.getDecl());
15194 assert(Method->isVirtual());
15195 bool DefaultIncludesPointerAuth =
15196 LangOpts.PointerAuthCalls || LangOpts.PointerAuthIntrinsics;
15197
15198 if (!DefaultIncludesPointerAuth)
15199 return true;
15200
15201 auto Existing = ThunksToBeAbbreviated.find(Val: VirtualMethodDecl);
15202 if (Existing != ThunksToBeAbbreviated.end())
15203 return Existing->second.contains(key: MangledName.str());
15204
15205 std::unique_ptr<MangleContext> Mangler(createMangleContext());
15206 llvm::StringMap<llvm::SmallVector<std::string, 2>> Thunks;
15207 auto VtableContext = getVTableContext();
15208 if (const auto *ThunkInfos = VtableContext->getThunkInfo(GD: VirtualMethodDecl)) {
15209 auto *Destructor = dyn_cast<CXXDestructorDecl>(Val: Method);
15210 for (const auto &Thunk : *ThunkInfos) {
15211 SmallString<256> ElidedName;
15212 llvm::raw_svector_ostream ElidedNameStream(ElidedName);
15213 if (Destructor)
15214 Mangler->mangleCXXDtorThunk(DD: Destructor, Type: VirtualMethodDecl.getDtorType(),
15215 Thunk, /* elideOverrideInfo */ ElideOverrideInfo: true,
15216 ElidedNameStream);
15217 else
15218 Mangler->mangleThunk(MD: Method, Thunk, /* elideOverrideInfo */ ElideOverrideInfo: true,
15219 ElidedNameStream);
15220 SmallString<256> MangledName;
15221 llvm::raw_svector_ostream mangledNameStream(MangledName);
15222 if (Destructor)
15223 Mangler->mangleCXXDtorThunk(DD: Destructor, Type: VirtualMethodDecl.getDtorType(),
15224 Thunk, /* elideOverrideInfo */ ElideOverrideInfo: false,
15225 mangledNameStream);
15226 else
15227 Mangler->mangleThunk(MD: Method, Thunk, /* elideOverrideInfo */ ElideOverrideInfo: false,
15228 mangledNameStream);
15229
15230 Thunks[ElidedName].push_back(Elt: std::string(MangledName));
15231 }
15232 }
15233 llvm::StringSet<> SimplifiedThunkNames;
15234 for (auto &ThunkList : Thunks) {
15235 llvm::sort(C&: ThunkList.second);
15236 SimplifiedThunkNames.insert(key: ThunkList.second[0]);
15237 }
15238 bool Result = SimplifiedThunkNames.contains(key: MangledName);
15239 ThunksToBeAbbreviated[VirtualMethodDecl] = std::move(SimplifiedThunkNames);
15240 return Result;
15241}
15242