1//===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the ASTContext interface.
10//
11//===----------------------------------------------------------------------===//
12
13#include "clang/AST/ASTContext.h"
14#include "ByteCode/Context.h"
15#include "CXXABI.h"
16#include "clang/AST/APValue.h"
17#include "clang/AST/ASTConcept.h"
18#include "clang/AST/ASTMutationListener.h"
19#include "clang/AST/ASTStructuralEquivalence.h"
20#include "clang/AST/ASTTypeTraits.h"
21#include "clang/AST/Attr.h"
22#include "clang/AST/AttrIterator.h"
23#include "clang/AST/CharUnits.h"
24#include "clang/AST/Comment.h"
25#include "clang/AST/Decl.h"
26#include "clang/AST/DeclBase.h"
27#include "clang/AST/DeclCXX.h"
28#include "clang/AST/DeclContextInternals.h"
29#include "clang/AST/DeclObjC.h"
30#include "clang/AST/DeclOpenMP.h"
31#include "clang/AST/DeclTemplate.h"
32#include "clang/AST/DeclarationName.h"
33#include "clang/AST/DependenceFlags.h"
34#include "clang/AST/Expr.h"
35#include "clang/AST/ExprCXX.h"
36#include "clang/AST/ExternalASTSource.h"
37#include "clang/AST/Mangle.h"
38#include "clang/AST/MangleNumberingContext.h"
39#include "clang/AST/NestedNameSpecifier.h"
40#include "clang/AST/ParentMapContext.h"
41#include "clang/AST/RawCommentList.h"
42#include "clang/AST/RecordLayout.h"
43#include "clang/AST/Stmt.h"
44#include "clang/AST/TemplateBase.h"
45#include "clang/AST/TemplateName.h"
46#include "clang/AST/Type.h"
47#include "clang/AST/TypeLoc.h"
48#include "clang/AST/UnresolvedSet.h"
49#include "clang/AST/VTableBuilder.h"
50#include "clang/Basic/AddressSpaces.h"
51#include "clang/Basic/Builtins.h"
52#include "clang/Basic/CommentOptions.h"
53#include "clang/Basic/ExceptionSpecificationType.h"
54#include "clang/Basic/IdentifierTable.h"
55#include "clang/Basic/LLVM.h"
56#include "clang/Basic/LangOptions.h"
57#include "clang/Basic/Linkage.h"
58#include "clang/Basic/Module.h"
59#include "clang/Basic/NoSanitizeList.h"
60#include "clang/Basic/ObjCRuntime.h"
61#include "clang/Basic/ProfileList.h"
62#include "clang/Basic/SourceLocation.h"
63#include "clang/Basic/SourceManager.h"
64#include "clang/Basic/Specifiers.h"
65#include "clang/Basic/TargetCXXABI.h"
66#include "clang/Basic/TargetInfo.h"
67#include "clang/Basic/XRayLists.h"
68#include "llvm/ADT/APFixedPoint.h"
69#include "llvm/ADT/APInt.h"
70#include "llvm/ADT/APSInt.h"
71#include "llvm/ADT/ArrayRef.h"
72#include "llvm/ADT/DenseMap.h"
73#include "llvm/ADT/DenseSet.h"
74#include "llvm/ADT/FoldingSet.h"
75#include "llvm/ADT/PointerUnion.h"
76#include "llvm/ADT/STLExtras.h"
77#include "llvm/ADT/SmallPtrSet.h"
78#include "llvm/ADT/SmallVector.h"
79#include "llvm/ADT/StringExtras.h"
80#include "llvm/ADT/StringRef.h"
81#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
82#include "llvm/Support/Capacity.h"
83#include "llvm/Support/Casting.h"
84#include "llvm/Support/Compiler.h"
85#include "llvm/Support/ErrorHandling.h"
86#include "llvm/Support/MD5.h"
87#include "llvm/Support/MathExtras.h"
88#include "llvm/Support/SipHash.h"
89#include "llvm/Support/raw_ostream.h"
90#include "llvm/TargetParser/AArch64TargetParser.h"
91#include "llvm/TargetParser/Triple.h"
92#include <algorithm>
93#include <cassert>
94#include <cstddef>
95#include <cstdint>
96#include <cstdlib>
97#include <map>
98#include <memory>
99#include <optional>
100#include <string>
101#include <tuple>
102#include <utility>
103
104using namespace clang;
105
106enum FloatingRank {
107 BFloat16Rank,
108 Float16Rank,
109 HalfRank,
110 FloatRank,
111 DoubleRank,
112 LongDoubleRank,
113 Float128Rank,
114 Ibm128Rank
115};
116
117/// \returns The locations that are relevant when searching for Doc comments
118/// related to \p D.
119static SmallVector<SourceLocation, 2>
120getDeclLocsForCommentSearch(const Decl *D, SourceManager &SourceMgr) {
121 assert(D);
122
123 // User can not attach documentation to implicit declarations.
124 if (D->isImplicit())
125 return {};
126
127 // User can not attach documentation to implicit instantiations.
128 if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
129 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
130 return {};
131 }
132
133 if (const auto *VD = dyn_cast<VarDecl>(Val: D)) {
134 if (VD->isStaticDataMember() &&
135 VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
136 return {};
137 }
138
139 if (const auto *CRD = dyn_cast<CXXRecordDecl>(Val: D)) {
140 if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
141 return {};
142 }
143
144 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(Val: D)) {
145 TemplateSpecializationKind TSK = CTSD->getSpecializationKind();
146 if (TSK == TSK_ImplicitInstantiation ||
147 TSK == TSK_Undeclared)
148 return {};
149 }
150
151 if (const auto *ED = dyn_cast<EnumDecl>(Val: D)) {
152 if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
153 return {};
154 }
155 if (const auto *TD = dyn_cast<TagDecl>(Val: D)) {
156 // When tag declaration (but not definition!) is part of the
157 // decl-specifier-seq of some other declaration, it doesn't get comment
158 if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition())
159 return {};
160 }
161 // TODO: handle comments for function parameters properly.
162 if (isa<ParmVarDecl>(Val: D))
163 return {};
164
165 // TODO: we could look up template parameter documentation in the template
166 // documentation.
167 if (isa<TemplateTypeParmDecl>(Val: D) ||
168 isa<NonTypeTemplateParmDecl>(Val: D) ||
169 isa<TemplateTemplateParmDecl>(Val: D))
170 return {};
171
172 SmallVector<SourceLocation, 2> Locations;
173 // Find declaration location.
174 // For Objective-C declarations we generally don't expect to have multiple
175 // declarators, thus use declaration starting location as the "declaration
176 // location".
177 // For all other declarations multiple declarators are used quite frequently,
178 // so we use the location of the identifier as the "declaration location".
179 SourceLocation BaseLocation;
180 if (isa<ObjCMethodDecl>(Val: D) || isa<ObjCContainerDecl>(Val: D) ||
181 isa<ObjCPropertyDecl>(Val: D) || isa<RedeclarableTemplateDecl>(Val: D) ||
182 isa<ClassTemplateSpecializationDecl>(Val: D) ||
183 // Allow association with Y across {} in `typedef struct X {} Y`.
184 isa<TypedefDecl>(Val: D))
185 BaseLocation = D->getBeginLoc();
186 else
187 BaseLocation = D->getLocation();
188
189 if (!D->getLocation().isMacroID()) {
190 Locations.emplace_back(Args&: BaseLocation);
191 } else {
192 const auto *DeclCtx = D->getDeclContext();
193
194 // When encountering definitions generated from a macro (that are not
195 // contained by another declaration in the macro) we need to try and find
196 // the comment at the location of the expansion but if there is no comment
197 // there we should retry to see if there is a comment inside the macro as
198 // well. To this end we return first BaseLocation to first look at the
199 // expansion site, the second value is the spelling location of the
200 // beginning of the declaration defined inside the macro.
201 if (!(DeclCtx &&
202 Decl::castFromDeclContext(DeclCtx)->getLocation().isMacroID())) {
203 Locations.emplace_back(Args: SourceMgr.getExpansionLoc(Loc: BaseLocation));
204 }
205
206 // We use Decl::getBeginLoc() and not just BaseLocation here to ensure that
207 // we don't refer to the macro argument location at the expansion site (this
208 // can happen if the name's spelling is provided via macro argument), and
209 // always to the declaration itself.
210 Locations.emplace_back(Args: SourceMgr.getSpellingLoc(Loc: D->getBeginLoc()));
211 }
212
213 return Locations;
214}
215
216RawComment *ASTContext::getRawCommentForDeclNoCacheImpl(
217 const Decl *D, const SourceLocation RepresentativeLocForDecl,
218 const std::map<unsigned, RawComment *> &CommentsInTheFile) const {
219 // If the declaration doesn't map directly to a location in a file, we
220 // can't find the comment.
221 if (RepresentativeLocForDecl.isInvalid() ||
222 !RepresentativeLocForDecl.isFileID())
223 return nullptr;
224
225 // If there are no comments anywhere, we won't find anything.
226 if (CommentsInTheFile.empty())
227 return nullptr;
228
229 // Decompose the location for the declaration and find the beginning of the
230 // file buffer.
231 const FileIDAndOffset DeclLocDecomp =
232 SourceMgr.getDecomposedLoc(Loc: RepresentativeLocForDecl);
233
234 // Slow path.
235 auto OffsetCommentBehindDecl =
236 CommentsInTheFile.lower_bound(x: DeclLocDecomp.second);
237
238 // First check whether we have a trailing comment.
239 if (OffsetCommentBehindDecl != CommentsInTheFile.end()) {
240 RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second;
241 if ((CommentBehindDecl->isDocumentation() ||
242 LangOpts.CommentOpts.ParseAllComments) &&
243 CommentBehindDecl->isTrailingComment() &&
244 (isa<FieldDecl>(Val: D) || isa<EnumConstantDecl>(Val: D) || isa<VarDecl>(Val: D) ||
245 isa<ObjCMethodDecl>(Val: D) || isa<ObjCPropertyDecl>(Val: D))) {
246
247 // Check that Doxygen trailing comment comes after the declaration, starts
248 // on the same line and in the same file as the declaration.
249 if (SourceMgr.getLineNumber(FID: DeclLocDecomp.first, FilePos: DeclLocDecomp.second) ==
250 Comments.getCommentBeginLine(C: CommentBehindDecl, File: DeclLocDecomp.first,
251 Offset: OffsetCommentBehindDecl->first)) {
252 return CommentBehindDecl;
253 }
254 }
255 }
256
257 // The comment just after the declaration was not a trailing comment.
258 // Let's look at the previous comment.
259 if (OffsetCommentBehindDecl == CommentsInTheFile.begin())
260 return nullptr;
261
262 auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl;
263 RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second;
264
265 // Check that we actually have a non-member Doxygen comment.
266 if (!(CommentBeforeDecl->isDocumentation() ||
267 LangOpts.CommentOpts.ParseAllComments) ||
268 CommentBeforeDecl->isTrailingComment())
269 return nullptr;
270
271 // Decompose the end of the comment.
272 const unsigned CommentEndOffset =
273 Comments.getCommentEndOffset(C: CommentBeforeDecl);
274
275 // Get the corresponding buffer.
276 bool Invalid = false;
277 const char *Buffer = SourceMgr.getBufferData(FID: DeclLocDecomp.first,
278 Invalid: &Invalid).data();
279 if (Invalid)
280 return nullptr;
281
282 // Extract text between the comment and declaration.
283 StringRef Text(Buffer + CommentEndOffset,
284 DeclLocDecomp.second - CommentEndOffset);
285
286 // There should be no other declarations or preprocessor directives between
287 // comment and declaration.
288 if (Text.find_last_of(Chars: ";{}#@") != StringRef::npos)
289 return nullptr;
290
291 return CommentBeforeDecl;
292}
293
294RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
295 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr);
296
297 for (const auto DeclLoc : DeclLocs) {
298 // If the declaration doesn't map directly to a location in a file, we
299 // can't find the comment.
300 if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
301 continue;
302
303 if (ExternalSource && !CommentsLoaded) {
304 ExternalSource->ReadComments();
305 CommentsLoaded = true;
306 }
307
308 if (Comments.empty())
309 continue;
310
311 const FileID File = SourceMgr.getDecomposedLoc(Loc: DeclLoc).first;
312 if (!File.isValid())
313 continue;
314
315 const auto CommentsInThisFile = Comments.getCommentsInFile(File);
316 if (!CommentsInThisFile || CommentsInThisFile->empty())
317 continue;
318
319 if (RawComment *Comment =
320 getRawCommentForDeclNoCacheImpl(D, RepresentativeLocForDecl: DeclLoc, CommentsInTheFile: *CommentsInThisFile))
321 return Comment;
322 }
323
324 return nullptr;
325}
326
327void ASTContext::addComment(const RawComment &RC) {
328 assert(LangOpts.RetainCommentsFromSystemHeaders ||
329 !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin()));
330 Comments.addComment(RC, CommentOpts: LangOpts.CommentOpts, Allocator&: BumpAlloc);
331}
332
333const RawComment *ASTContext::getRawCommentForAnyRedecl(
334 const Decl *D,
335 const Decl **OriginalDecl) const {
336 if (!D) {
337 if (OriginalDecl)
338 OriginalDecl = nullptr;
339 return nullptr;
340 }
341
342 D = &adjustDeclToTemplate(D: *D);
343
344 // Any comment directly attached to D?
345 {
346 auto DeclComment = DeclRawComments.find(Val: D);
347 if (DeclComment != DeclRawComments.end()) {
348 if (OriginalDecl)
349 *OriginalDecl = D;
350 return DeclComment->second;
351 }
352 }
353
354 // Any comment attached to any redeclaration of D?
355 const Decl *CanonicalD = D->getCanonicalDecl();
356 if (!CanonicalD)
357 return nullptr;
358
359 {
360 auto RedeclComment = RedeclChainComments.find(Val: CanonicalD);
361 if (RedeclComment != RedeclChainComments.end()) {
362 if (OriginalDecl)
363 *OriginalDecl = RedeclComment->second;
364 auto CommentAtRedecl = DeclRawComments.find(Val: RedeclComment->second);
365 assert(CommentAtRedecl != DeclRawComments.end() &&
366 "This decl is supposed to have comment attached.");
367 return CommentAtRedecl->second;
368 }
369 }
370
371 // Any redeclarations of D that we haven't checked for comments yet?
372 const Decl *LastCheckedRedecl = [&]() {
373 const Decl *LastChecked = CommentlessRedeclChains.lookup(Val: CanonicalD);
374 bool CanUseCommentlessCache = false;
375 if (LastChecked) {
376 for (auto *Redecl : CanonicalD->redecls()) {
377 if (Redecl == D) {
378 CanUseCommentlessCache = true;
379 break;
380 }
381 if (Redecl == LastChecked)
382 break;
383 }
384 }
385 // FIXME: This could be improved so that even if CanUseCommentlessCache
386 // is false, once we've traversed past CanonicalD we still skip ahead
387 // LastChecked.
388 return CanUseCommentlessCache ? LastChecked : nullptr;
389 }();
390
391 for (const Decl *Redecl : D->redecls()) {
392 assert(Redecl);
393 // Skip all redeclarations that have been checked previously.
394 if (LastCheckedRedecl) {
395 if (LastCheckedRedecl == Redecl) {
396 LastCheckedRedecl = nullptr;
397 }
398 continue;
399 }
400 const RawComment *RedeclComment = getRawCommentForDeclNoCache(D: Redecl);
401 if (RedeclComment) {
402 cacheRawCommentForDecl(OriginalD: *Redecl, Comment: *RedeclComment);
403 if (OriginalDecl)
404 *OriginalDecl = Redecl;
405 return RedeclComment;
406 }
407 CommentlessRedeclChains[CanonicalD] = Redecl;
408 }
409
410 if (OriginalDecl)
411 *OriginalDecl = nullptr;
412 return nullptr;
413}
414
415void ASTContext::cacheRawCommentForDecl(const Decl &OriginalD,
416 const RawComment &Comment) const {
417 assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments);
418 DeclRawComments.try_emplace(Key: &OriginalD, Args: &Comment);
419 const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl();
420 RedeclChainComments.try_emplace(Key: CanonicalDecl, Args: &OriginalD);
421 CommentlessRedeclChains.erase(Val: CanonicalDecl);
422}
423
424static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod,
425 SmallVectorImpl<const NamedDecl *> &Redeclared) {
426 const DeclContext *DC = ObjCMethod->getDeclContext();
427 if (const auto *IMD = dyn_cast<ObjCImplDecl>(Val: DC)) {
428 const ObjCInterfaceDecl *ID = IMD->getClassInterface();
429 if (!ID)
430 return;
431 // Add redeclared method here.
432 for (const auto *Ext : ID->known_extensions()) {
433 if (ObjCMethodDecl *RedeclaredMethod =
434 Ext->getMethod(Sel: ObjCMethod->getSelector(),
435 isInstance: ObjCMethod->isInstanceMethod()))
436 Redeclared.push_back(Elt: RedeclaredMethod);
437 }
438 }
439}
440
441void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls,
442 const Preprocessor *PP) {
443 if (Comments.empty() || Decls.empty())
444 return;
445
446 FileID File;
447 for (const Decl *D : Decls) {
448 if (D->isInvalidDecl())
449 continue;
450
451 D = &adjustDeclToTemplate(D: *D);
452 SourceLocation Loc = D->getLocation();
453 if (Loc.isValid()) {
454 // See if there are any new comments that are not attached to a decl.
455 // The location doesn't have to be precise - we care only about the file.
456 File = SourceMgr.getDecomposedLoc(Loc).first;
457 break;
458 }
459 }
460
461 if (File.isInvalid())
462 return;
463
464 auto CommentsInThisFile = Comments.getCommentsInFile(File);
465 if (!CommentsInThisFile || CommentsInThisFile->empty() ||
466 CommentsInThisFile->rbegin()->second->isAttached())
467 return;
468
469 // There is at least one comment not attached to a decl.
470 // Maybe it should be attached to one of Decls?
471 //
472 // Note that this way we pick up not only comments that precede the
473 // declaration, but also comments that *follow* the declaration -- thanks to
474 // the lookahead in the lexer: we've consumed the semicolon and looked
475 // ahead through comments.
476 for (const Decl *D : Decls) {
477 assert(D);
478 if (D->isInvalidDecl())
479 continue;
480
481 D = &adjustDeclToTemplate(D: *D);
482
483 if (DeclRawComments.count(Val: D) > 0)
484 continue;
485
486 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr);
487
488 for (const auto DeclLoc : DeclLocs) {
489 if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
490 continue;
491
492 if (RawComment *const DocComment = getRawCommentForDeclNoCacheImpl(
493 D, RepresentativeLocForDecl: DeclLoc, CommentsInTheFile: *CommentsInThisFile)) {
494 cacheRawCommentForDecl(OriginalD: *D, Comment: *DocComment);
495 comments::FullComment *FC = DocComment->parse(Context: *this, PP, D);
496 ParsedComments[D->getCanonicalDecl()] = FC;
497 break;
498 }
499 }
500 }
501}
502
503comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC,
504 const Decl *D) const {
505 auto *ThisDeclInfo = new (*this) comments::DeclInfo;
506 ThisDeclInfo->CommentDecl = D;
507 ThisDeclInfo->IsFilled = false;
508 ThisDeclInfo->fill();
509 ThisDeclInfo->CommentDecl = FC->getDecl();
510 if (!ThisDeclInfo->TemplateParameters)
511 ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters;
512 comments::FullComment *CFC =
513 new (*this) comments::FullComment(FC->getBlocks(),
514 ThisDeclInfo);
515 return CFC;
516}
517
518comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const {
519 const RawComment *RC = getRawCommentForDeclNoCache(D);
520 return RC ? RC->parse(Context: *this, PP: nullptr, D) : nullptr;
521}
522
523comments::FullComment *ASTContext::getCommentForDecl(
524 const Decl *D,
525 const Preprocessor *PP) const {
526 if (!D || D->isInvalidDecl())
527 return nullptr;
528 D = &adjustDeclToTemplate(D: *D);
529
530 const Decl *Canonical = D->getCanonicalDecl();
531 llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos =
532 ParsedComments.find(Val: Canonical);
533
534 if (Pos != ParsedComments.end()) {
535 if (Canonical != D) {
536 comments::FullComment *FC = Pos->second;
537 comments::FullComment *CFC = cloneFullComment(FC, D);
538 return CFC;
539 }
540 return Pos->second;
541 }
542
543 const Decl *OriginalDecl = nullptr;
544
545 const RawComment *RC = getRawCommentForAnyRedecl(D, OriginalDecl: &OriginalDecl);
546 if (!RC) {
547 if (isa<ObjCMethodDecl>(Val: D) || isa<FunctionDecl>(Val: D)) {
548 SmallVector<const NamedDecl*, 8> Overridden;
549 const auto *OMD = dyn_cast<ObjCMethodDecl>(Val: D);
550 if (OMD && OMD->isPropertyAccessor())
551 if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl())
552 if (comments::FullComment *FC = getCommentForDecl(D: PDecl, PP))
553 return cloneFullComment(FC, D);
554 if (OMD)
555 addRedeclaredMethods(ObjCMethod: OMD, Redeclared&: Overridden);
556 getOverriddenMethods(Method: dyn_cast<NamedDecl>(Val: D), Overridden);
557 for (unsigned i = 0, e = Overridden.size(); i < e; i++)
558 if (comments::FullComment *FC = getCommentForDecl(D: Overridden[i], PP))
559 return cloneFullComment(FC, D);
560 }
561 else if (const auto *TD = dyn_cast<TypedefNameDecl>(Val: D)) {
562 // Attach any tag type's documentation to its typedef if latter
563 // does not have one of its own.
564 QualType QT = TD->getUnderlyingType();
565 if (const auto *TT = QT->getAs<TagType>())
566 if (comments::FullComment *FC = getCommentForDecl(D: TT->getDecl(), PP))
567 return cloneFullComment(FC, D);
568 }
569 else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(Val: D)) {
570 while (IC->getSuperClass()) {
571 IC = IC->getSuperClass();
572 if (comments::FullComment *FC = getCommentForDecl(D: IC, PP))
573 return cloneFullComment(FC, D);
574 }
575 }
576 else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(Val: D)) {
577 if (const ObjCInterfaceDecl *IC = CD->getClassInterface())
578 if (comments::FullComment *FC = getCommentForDecl(D: IC, PP))
579 return cloneFullComment(FC, D);
580 }
581 else if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: D)) {
582 if (!(RD = RD->getDefinition()))
583 return nullptr;
584 // Check non-virtual bases.
585 for (const auto &I : RD->bases()) {
586 if (I.isVirtual() || (I.getAccessSpecifier() != AS_public))
587 continue;
588 QualType Ty = I.getType();
589 if (Ty.isNull())
590 continue;
591 if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) {
592 if (!(NonVirtualBase= NonVirtualBase->getDefinition()))
593 continue;
594
595 if (comments::FullComment *FC = getCommentForDecl(D: (NonVirtualBase), PP))
596 return cloneFullComment(FC, D);
597 }
598 }
599 // Check virtual bases.
600 for (const auto &I : RD->vbases()) {
601 if (I.getAccessSpecifier() != AS_public)
602 continue;
603 QualType Ty = I.getType();
604 if (Ty.isNull())
605 continue;
606 if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) {
607 if (!(VirtualBase= VirtualBase->getDefinition()))
608 continue;
609 if (comments::FullComment *FC = getCommentForDecl(D: (VirtualBase), PP))
610 return cloneFullComment(FC, D);
611 }
612 }
613 }
614 return nullptr;
615 }
616
617 // If the RawComment was attached to other redeclaration of this Decl, we
618 // should parse the comment in context of that other Decl. This is important
619 // because comments can contain references to parameter names which can be
620 // different across redeclarations.
621 if (D != OriginalDecl && OriginalDecl)
622 return getCommentForDecl(D: OriginalDecl, PP);
623
624 comments::FullComment *FC = RC->parse(Context: *this, PP, D);
625 ParsedComments[Canonical] = FC;
626 return FC;
627}
628
629void ASTContext::CanonicalTemplateTemplateParm::Profile(
630 llvm::FoldingSetNodeID &ID, const ASTContext &C,
631 TemplateTemplateParmDecl *Parm) {
632 ID.AddInteger(I: Parm->getDepth());
633 ID.AddInteger(I: Parm->getPosition());
634 ID.AddBoolean(B: Parm->isParameterPack());
635 ID.AddInteger(I: Parm->templateParameterKind());
636
637 TemplateParameterList *Params = Parm->getTemplateParameters();
638 ID.AddInteger(I: Params->size());
639 for (TemplateParameterList::const_iterator P = Params->begin(),
640 PEnd = Params->end();
641 P != PEnd; ++P) {
642 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Val: *P)) {
643 ID.AddInteger(I: 0);
644 ID.AddBoolean(B: TTP->isParameterPack());
645 ID.AddInteger(
646 I: TTP->getNumExpansionParameters().toInternalRepresentation());
647 continue;
648 }
649
650 if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Val: *P)) {
651 ID.AddInteger(I: 1);
652 ID.AddBoolean(B: NTTP->isParameterPack());
653 ID.AddPointer(Ptr: C.getUnconstrainedType(T: C.getCanonicalType(T: NTTP->getType()))
654 .getAsOpaquePtr());
655 if (NTTP->isExpandedParameterPack()) {
656 ID.AddBoolean(B: true);
657 ID.AddInteger(I: NTTP->getNumExpansionTypes());
658 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
659 QualType T = NTTP->getExpansionType(I);
660 ID.AddPointer(Ptr: T.getCanonicalType().getAsOpaquePtr());
661 }
662 } else
663 ID.AddBoolean(B: false);
664 continue;
665 }
666
667 auto *TTP = cast<TemplateTemplateParmDecl>(Val: *P);
668 ID.AddInteger(I: 2);
669 Profile(ID, C, Parm: TTP);
670 }
671}
672
673TemplateTemplateParmDecl *
674ASTContext::getCanonicalTemplateTemplateParmDecl(
675 TemplateTemplateParmDecl *TTP) const {
676 // Check if we already have a canonical template template parameter.
677 llvm::FoldingSetNodeID ID;
678 CanonicalTemplateTemplateParm::Profile(ID, C: *this, Parm: TTP);
679 void *InsertPos = nullptr;
680 CanonicalTemplateTemplateParm *Canonical
681 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
682 if (Canonical)
683 return Canonical->getParam();
684
685 // Build a canonical template parameter list.
686 TemplateParameterList *Params = TTP->getTemplateParameters();
687 SmallVector<NamedDecl *, 4> CanonParams;
688 CanonParams.reserve(N: Params->size());
689 for (TemplateParameterList::const_iterator P = Params->begin(),
690 PEnd = Params->end();
691 P != PEnd; ++P) {
692 // Note that, per C++20 [temp.over.link]/6, when determining whether
693 // template-parameters are equivalent, constraints are ignored.
694 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Val: *P)) {
695 TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create(
696 C: *this, DC: getTranslationUnitDecl(), KeyLoc: SourceLocation(), NameLoc: SourceLocation(),
697 D: TTP->getDepth(), P: TTP->getIndex(), Id: nullptr, Typename: false,
698 ParameterPack: TTP->isParameterPack(), /*HasTypeConstraint=*/false,
699 NumExpanded: TTP->getNumExpansionParameters());
700 CanonParams.push_back(Elt: NewTTP);
701 } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Val: *P)) {
702 QualType T = getUnconstrainedType(T: getCanonicalType(T: NTTP->getType()));
703 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T);
704 NonTypeTemplateParmDecl *Param;
705 if (NTTP->isExpandedParameterPack()) {
706 SmallVector<QualType, 2> ExpandedTypes;
707 SmallVector<TypeSourceInfo *, 2> ExpandedTInfos;
708 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
709 ExpandedTypes.push_back(Elt: getCanonicalType(T: NTTP->getExpansionType(I)));
710 ExpandedTInfos.push_back(
711 Elt: getTrivialTypeSourceInfo(T: ExpandedTypes.back()));
712 }
713
714 Param = NonTypeTemplateParmDecl::Create(C: *this, DC: getTranslationUnitDecl(),
715 StartLoc: SourceLocation(),
716 IdLoc: SourceLocation(),
717 D: NTTP->getDepth(),
718 P: NTTP->getPosition(), Id: nullptr,
719 T,
720 TInfo,
721 ExpandedTypes,
722 ExpandedTInfos);
723 } else {
724 Param = NonTypeTemplateParmDecl::Create(C: *this, DC: getTranslationUnitDecl(),
725 StartLoc: SourceLocation(),
726 IdLoc: SourceLocation(),
727 D: NTTP->getDepth(),
728 P: NTTP->getPosition(), Id: nullptr,
729 T,
730 ParameterPack: NTTP->isParameterPack(),
731 TInfo);
732 }
733 CanonParams.push_back(Elt: Param);
734 } else
735 CanonParams.push_back(Elt: getCanonicalTemplateTemplateParmDecl(
736 TTP: cast<TemplateTemplateParmDecl>(Val: *P)));
737 }
738
739 TemplateTemplateParmDecl *CanonTTP = TemplateTemplateParmDecl::Create(
740 C: *this, DC: getTranslationUnitDecl(), L: SourceLocation(), D: TTP->getDepth(),
741 P: TTP->getPosition(), ParameterPack: TTP->isParameterPack(), Id: nullptr,
742 ParameterKind: TTP->templateParameterKind(),
743 /*Typename=*/false,
744 Params: TemplateParameterList::Create(C: *this, TemplateLoc: SourceLocation(), LAngleLoc: SourceLocation(),
745 Params: CanonParams, RAngleLoc: SourceLocation(),
746 /*RequiresClause=*/nullptr));
747
748 // Get the new insert position for the node we care about.
749 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
750 assert(!Canonical && "Shouldn't be in the map!");
751 (void)Canonical;
752
753 // Create the canonical template template parameter entry.
754 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP);
755 CanonTemplateTemplateParms.InsertNode(N: Canonical, InsertPos);
756 return CanonTTP;
757}
758
759TemplateTemplateParmDecl *
760ASTContext::findCanonicalTemplateTemplateParmDeclInternal(
761 TemplateTemplateParmDecl *TTP) const {
762 llvm::FoldingSetNodeID ID;
763 CanonicalTemplateTemplateParm::Profile(ID, C: *this, Parm: TTP);
764 void *InsertPos = nullptr;
765 CanonicalTemplateTemplateParm *Canonical =
766 CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
767 return Canonical ? Canonical->getParam() : nullptr;
768}
769
770TemplateTemplateParmDecl *
771ASTContext::insertCanonicalTemplateTemplateParmDeclInternal(
772 TemplateTemplateParmDecl *CanonTTP) const {
773 llvm::FoldingSetNodeID ID;
774 CanonicalTemplateTemplateParm::Profile(ID, C: *this, Parm: CanonTTP);
775 void *InsertPos = nullptr;
776 if (auto *Existing =
777 CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos))
778 return Existing->getParam();
779 CanonTemplateTemplateParms.InsertNode(
780 N: new (*this) CanonicalTemplateTemplateParm(CanonTTP), InsertPos);
781 return CanonTTP;
782}
783
784/// For the purposes of overflow pattern exclusion, does this match the
785/// while(i--) pattern?
786static bool matchesPostDecrInWhile(const UnaryOperator *UO, ASTContext &Ctx) {
787 if (UO->getOpcode() != UO_PostDec)
788 return false;
789
790 if (!UO->getType()->isUnsignedIntegerType())
791 return false;
792
793 // -fsanitize-undefined-ignore-overflow-pattern=unsigned-post-decr-while
794 if (!Ctx.getLangOpts().isOverflowPatternExcluded(
795 Kind: LangOptions::OverflowPatternExclusionKind::PostDecrInWhile))
796 return false;
797
798 // all Parents (usually just one) must be a WhileStmt
799 return llvm::all_of(
800 Range: Ctx.getParentMapContext().getParents(Node: *UO),
801 P: [](const DynTypedNode &P) { return P.get<WhileStmt>() != nullptr; });
802}
803
804bool ASTContext::isUnaryOverflowPatternExcluded(const UnaryOperator *UO) {
805 // -fsanitize-undefined-ignore-overflow-pattern=negated-unsigned-const
806 // ... like -1UL;
807 if (UO->getOpcode() == UO_Minus &&
808 getLangOpts().isOverflowPatternExcluded(
809 Kind: LangOptions::OverflowPatternExclusionKind::NegUnsignedConst) &&
810 UO->isIntegerConstantExpr(Ctx: *this)) {
811 return true;
812 }
813
814 if (matchesPostDecrInWhile(UO, Ctx&: *this))
815 return true;
816
817 return false;
818}
819
820/// Check if a type can have its sanitizer instrumentation elided based on its
821/// presence within an ignorelist.
822bool ASTContext::isTypeIgnoredBySanitizer(const SanitizerMask &Mask,
823 const QualType &Ty) const {
824 std::string TyName = Ty.getUnqualifiedType().getAsString(Policy: getPrintingPolicy());
825 return NoSanitizeL->containsType(Mask, MangledTypeName: TyName);
826}
827
828TargetCXXABI::Kind ASTContext::getCXXABIKind() const {
829 auto Kind = getTargetInfo().getCXXABI().getKind();
830 return getLangOpts().CXXABI.value_or(u&: Kind);
831}
832
833CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
834 if (!LangOpts.CPlusPlus) return nullptr;
835
836 switch (getCXXABIKind()) {
837 case TargetCXXABI::AppleARM64:
838 case TargetCXXABI::Fuchsia:
839 case TargetCXXABI::GenericARM: // Same as Itanium at this level
840 case TargetCXXABI::iOS:
841 case TargetCXXABI::WatchOS:
842 case TargetCXXABI::GenericAArch64:
843 case TargetCXXABI::GenericMIPS:
844 case TargetCXXABI::GenericItanium:
845 case TargetCXXABI::WebAssembly:
846 case TargetCXXABI::XL:
847 return CreateItaniumCXXABI(Ctx&: *this);
848 case TargetCXXABI::Microsoft:
849 return CreateMicrosoftCXXABI(Ctx&: *this);
850 }
851 llvm_unreachable("Invalid CXXABI type!");
852}
853
854interp::Context &ASTContext::getInterpContext() const {
855 if (!InterpContext) {
856 InterpContext.reset(p: new interp::Context(const_cast<ASTContext &>(*this)));
857 }
858 return *InterpContext;
859}
860
861ParentMapContext &ASTContext::getParentMapContext() {
862 if (!ParentMapCtx)
863 ParentMapCtx.reset(p: new ParentMapContext(*this));
864 return *ParentMapCtx;
865}
866
867static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI,
868 const LangOptions &LangOpts) {
869 switch (LangOpts.getAddressSpaceMapMangling()) {
870 case LangOptions::ASMM_Target:
871 return TI.useAddressSpaceMapMangling();
872 case LangOptions::ASMM_On:
873 return true;
874 case LangOptions::ASMM_Off:
875 return false;
876 }
877 llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything.");
878}
879
880ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM,
881 IdentifierTable &idents, SelectorTable &sels,
882 Builtin::Context &builtins, TranslationUnitKind TUKind)
883 : ConstantArrayTypes(this_(), ConstantArrayTypesLog2InitSize),
884 DependentSizedArrayTypes(this_()), DependentSizedExtVectorTypes(this_()),
885 DependentAddressSpaceTypes(this_()), DependentVectorTypes(this_()),
886 DependentSizedMatrixTypes(this_()),
887 FunctionProtoTypes(this_(), FunctionProtoTypesLog2InitSize),
888 DependentTypeOfExprTypes(this_()), DependentDecltypeTypes(this_()),
889 DependentPackIndexingTypes(this_()), TemplateSpecializationTypes(this_()),
890 DependentBitIntTypes(this_()), SubstTemplateTemplateParmPacks(this_()),
891 DeducedTemplates(this_()), ArrayParameterTypes(this_()),
892 CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts),
893 NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)),
894 XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles,
895 LangOpts.XRayNeverInstrumentFiles,
896 LangOpts.XRayAttrListFiles, SM)),
897 ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)),
898 PrintingPolicy(LOpts), Idents(idents), Selectors(sels),
899 BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this),
900 Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts),
901 CompCategories(this_()), LastSDM(nullptr, 0) {
902 addTranslationUnitDecl();
903}
904
905void ASTContext::cleanup() {
906 // Release the DenseMaps associated with DeclContext objects.
907 // FIXME: Is this the ideal solution?
908 ReleaseDeclContextMaps();
909
910 // Call all of the deallocation functions on all of their targets.
911 for (auto &Pair : Deallocations)
912 (Pair.first)(Pair.second);
913 Deallocations.clear();
914
915 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
916 // because they can contain DenseMaps.
917 for (llvm::DenseMap<const ObjCInterfaceDecl *,
918 const ASTRecordLayout *>::iterator
919 I = ObjCLayouts.begin(),
920 E = ObjCLayouts.end();
921 I != E;)
922 // Increment in loop to prevent using deallocated memory.
923 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
924 R->Destroy(Ctx&: *this);
925 ObjCLayouts.clear();
926
927 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
928 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
929 // Increment in loop to prevent using deallocated memory.
930 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
931 R->Destroy(Ctx&: *this);
932 }
933 ASTRecordLayouts.clear();
934
935 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(),
936 AEnd = DeclAttrs.end();
937 A != AEnd; ++A)
938 A->second->~AttrVec();
939 DeclAttrs.clear();
940
941 for (const auto &Value : ModuleInitializers)
942 Value.second->~PerModuleInitializers();
943 ModuleInitializers.clear();
944
945 XRayFilter.reset();
946 NoSanitizeL.reset();
947}
948
949ASTContext::~ASTContext() { cleanup(); }
950
951void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) {
952 TraversalScope = TopLevelDecls;
953 getParentMapContext().clear();
954}
955
956void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const {
957 Deallocations.push_back(Elt: {Callback, Data});
958}
959
960void
961ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) {
962 ExternalSource = std::move(Source);
963}
964
965void ASTContext::PrintStats() const {
966 llvm::errs() << "\n*** AST Context Stats:\n";
967 llvm::errs() << " " << Types.size() << " types total.\n";
968
969 unsigned counts[] = {
970#define TYPE(Name, Parent) 0,
971#define ABSTRACT_TYPE(Name, Parent)
972#include "clang/AST/TypeNodes.inc"
973 0 // Extra
974 };
975
976 for (unsigned i = 0, e = Types.size(); i != e; ++i) {
977 Type *T = Types[i];
978 counts[(unsigned)T->getTypeClass()]++;
979 }
980
981 unsigned Idx = 0;
982 unsigned TotalBytes = 0;
983#define TYPE(Name, Parent) \
984 if (counts[Idx]) \
985 llvm::errs() << " " << counts[Idx] << " " << #Name \
986 << " types, " << sizeof(Name##Type) << " each " \
987 << "(" << counts[Idx] * sizeof(Name##Type) \
988 << " bytes)\n"; \
989 TotalBytes += counts[Idx] * sizeof(Name##Type); \
990 ++Idx;
991#define ABSTRACT_TYPE(Name, Parent)
992#include "clang/AST/TypeNodes.inc"
993
994 llvm::errs() << "Total bytes = " << TotalBytes << "\n";
995
996 // Implicit special member functions.
997 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/"
998 << NumImplicitDefaultConstructors
999 << " implicit default constructors created\n";
1000 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/"
1001 << NumImplicitCopyConstructors
1002 << " implicit copy constructors created\n";
1003 if (getLangOpts().CPlusPlus)
1004 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/"
1005 << NumImplicitMoveConstructors
1006 << " implicit move constructors created\n";
1007 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/"
1008 << NumImplicitCopyAssignmentOperators
1009 << " implicit copy assignment operators created\n";
1010 if (getLangOpts().CPlusPlus)
1011 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/"
1012 << NumImplicitMoveAssignmentOperators
1013 << " implicit move assignment operators created\n";
1014 llvm::errs() << NumImplicitDestructorsDeclared << "/"
1015 << NumImplicitDestructors
1016 << " implicit destructors created\n";
1017
1018 if (ExternalSource) {
1019 llvm::errs() << "\n";
1020 ExternalSource->PrintStats();
1021 }
1022
1023 BumpAlloc.PrintStats();
1024}
1025
1026void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M,
1027 bool NotifyListeners) {
1028 if (NotifyListeners)
1029 if (auto *Listener = getASTMutationListener();
1030 Listener && !ND->isUnconditionallyVisible())
1031 Listener->RedefinedHiddenDefinition(D: ND, M);
1032
1033 MergedDefModules[cast<NamedDecl>(Val: ND->getCanonicalDecl())].push_back(NewVal: M);
1034}
1035
1036void ASTContext::deduplicateMergedDefinitionsFor(NamedDecl *ND) {
1037 auto It = MergedDefModules.find(Val: cast<NamedDecl>(Val: ND->getCanonicalDecl()));
1038 if (It == MergedDefModules.end())
1039 return;
1040
1041 auto &Merged = It->second;
1042 llvm::DenseSet<Module*> Found;
1043 for (Module *&M : Merged)
1044 if (!Found.insert(V: M).second)
1045 M = nullptr;
1046 llvm::erase(C&: Merged, V: nullptr);
1047}
1048
1049ArrayRef<Module *>
1050ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) {
1051 auto MergedIt =
1052 MergedDefModules.find(Val: cast<NamedDecl>(Val: Def->getCanonicalDecl()));
1053 if (MergedIt == MergedDefModules.end())
1054 return {};
1055 return MergedIt->second;
1056}
1057
1058void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) {
1059 if (LazyInitializers.empty())
1060 return;
1061
1062 auto *Source = Ctx.getExternalSource();
1063 assert(Source && "lazy initializers but no external source");
1064
1065 auto LazyInits = std::move(LazyInitializers);
1066 LazyInitializers.clear();
1067
1068 for (auto ID : LazyInits)
1069 Initializers.push_back(Elt: Source->GetExternalDecl(ID));
1070
1071 assert(LazyInitializers.empty() &&
1072 "GetExternalDecl for lazy module initializer added more inits");
1073}
1074
1075void ASTContext::addModuleInitializer(Module *M, Decl *D) {
1076 // One special case: if we add a module initializer that imports another
1077 // module, and that module's only initializer is an ImportDecl, simplify.
1078 if (const auto *ID = dyn_cast<ImportDecl>(Val: D)) {
1079 auto It = ModuleInitializers.find(Val: ID->getImportedModule());
1080
1081 // Maybe the ImportDecl does nothing at all. (Common case.)
1082 if (It == ModuleInitializers.end())
1083 return;
1084
1085 // Maybe the ImportDecl only imports another ImportDecl.
1086 auto &Imported = *It->second;
1087 if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) {
1088 Imported.resolve(Ctx&: *this);
1089 auto *OnlyDecl = Imported.Initializers.front();
1090 if (isa<ImportDecl>(Val: OnlyDecl))
1091 D = OnlyDecl;
1092 }
1093 }
1094
1095 auto *&Inits = ModuleInitializers[M];
1096 if (!Inits)
1097 Inits = new (*this) PerModuleInitializers;
1098 Inits->Initializers.push_back(Elt: D);
1099}
1100
1101void ASTContext::addLazyModuleInitializers(Module *M,
1102 ArrayRef<GlobalDeclID> IDs) {
1103 auto *&Inits = ModuleInitializers[M];
1104 if (!Inits)
1105 Inits = new (*this) PerModuleInitializers;
1106 Inits->LazyInitializers.insert(I: Inits->LazyInitializers.end(),
1107 From: IDs.begin(), To: IDs.end());
1108}
1109
1110ArrayRef<Decl *> ASTContext::getModuleInitializers(Module *M) {
1111 auto It = ModuleInitializers.find(Val: M);
1112 if (It == ModuleInitializers.end())
1113 return {};
1114
1115 auto *Inits = It->second;
1116 Inits->resolve(Ctx&: *this);
1117 return Inits->Initializers;
1118}
1119
1120void ASTContext::setCurrentNamedModule(Module *M) {
1121 assert(M->isNamedModule());
1122 assert(!CurrentCXXNamedModule &&
1123 "We should set named module for ASTContext for only once");
1124 CurrentCXXNamedModule = M;
1125}
1126
1127bool ASTContext::isInSameModule(const Module *M1, const Module *M2) const {
1128 if (!M1 != !M2)
1129 return false;
1130
1131 /// Get the representative module for M. The representative module is the
1132 /// first module unit for a specific primary module name. So that the module
1133 /// units have the same representative module belongs to the same module.
1134 ///
1135 /// The process is helpful to reduce the expensive string operations.
1136 auto GetRepresentativeModule = [this](const Module *M) {
1137 auto Iter = SameModuleLookupSet.find(Val: M);
1138 if (Iter != SameModuleLookupSet.end())
1139 return Iter->second;
1140
1141 const Module *RepresentativeModule =
1142 PrimaryModuleNameMap.try_emplace(Key: M->getPrimaryModuleInterfaceName(), Args&: M)
1143 .first->second;
1144 SameModuleLookupSet[M] = RepresentativeModule;
1145 return RepresentativeModule;
1146 };
1147
1148 assert(M1 && "Shouldn't call `isInSameModule` if both M1 and M2 are none.");
1149 return GetRepresentativeModule(M1) == GetRepresentativeModule(M2);
1150}
1151
1152ExternCContextDecl *ASTContext::getExternCContextDecl() const {
1153 if (!ExternCContext)
1154 ExternCContext = ExternCContextDecl::Create(C: *this, TU: getTranslationUnitDecl());
1155
1156 return ExternCContext;
1157}
1158
1159BuiltinTemplateDecl *
1160ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK,
1161 const IdentifierInfo *II) const {
1162 auto *BuiltinTemplate =
1163 BuiltinTemplateDecl::Create(C: *this, DC: getTranslationUnitDecl(), Name: II, BTK);
1164 BuiltinTemplate->setImplicit();
1165 getTranslationUnitDecl()->addDecl(D: BuiltinTemplate);
1166
1167 return BuiltinTemplate;
1168}
1169
1170#define BuiltinTemplate(BTName) \
1171 BuiltinTemplateDecl *ASTContext::get##BTName##Decl() const { \
1172 if (!Decl##BTName) \
1173 Decl##BTName = \
1174 buildBuiltinTemplateDecl(BTK##BTName, get##BTName##Name()); \
1175 return Decl##BTName; \
1176 }
1177#include "clang/Basic/BuiltinTemplates.inc"
1178
1179RecordDecl *ASTContext::buildImplicitRecord(StringRef Name,
1180 RecordDecl::TagKind TK) const {
1181 SourceLocation Loc;
1182 RecordDecl *NewDecl;
1183 if (getLangOpts().CPlusPlus)
1184 NewDecl = CXXRecordDecl::Create(C: *this, TK, DC: getTranslationUnitDecl(), StartLoc: Loc,
1185 IdLoc: Loc, Id: &Idents.get(Name));
1186 else
1187 NewDecl = RecordDecl::Create(C: *this, TK, DC: getTranslationUnitDecl(), StartLoc: Loc, IdLoc: Loc,
1188 Id: &Idents.get(Name));
1189 NewDecl->setImplicit();
1190 NewDecl->addAttr(A: TypeVisibilityAttr::CreateImplicit(
1191 Ctx&: const_cast<ASTContext &>(*this), Visibility: TypeVisibilityAttr::Default));
1192 return NewDecl;
1193}
1194
1195TypedefDecl *ASTContext::buildImplicitTypedef(QualType T,
1196 StringRef Name) const {
1197 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T);
1198 TypedefDecl *NewDecl = TypedefDecl::Create(
1199 C&: const_cast<ASTContext &>(*this), DC: getTranslationUnitDecl(),
1200 StartLoc: SourceLocation(), IdLoc: SourceLocation(), Id: &Idents.get(Name), TInfo);
1201 NewDecl->setImplicit();
1202 return NewDecl;
1203}
1204
1205TypedefDecl *ASTContext::getInt128Decl() const {
1206 if (!Int128Decl)
1207 Int128Decl = buildImplicitTypedef(T: Int128Ty, Name: "__int128_t");
1208 return Int128Decl;
1209}
1210
1211TypedefDecl *ASTContext::getUInt128Decl() const {
1212 if (!UInt128Decl)
1213 UInt128Decl = buildImplicitTypedef(T: UnsignedInt128Ty, Name: "__uint128_t");
1214 return UInt128Decl;
1215}
1216
1217void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) {
1218 auto *Ty = new (*this, alignof(BuiltinType)) BuiltinType(K);
1219 R = CanQualType::CreateUnsafe(Other: QualType(Ty, 0));
1220 Types.push_back(Elt: Ty);
1221}
1222
1223void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
1224 const TargetInfo *AuxTarget) {
1225 assert((!this->Target || this->Target == &Target) &&
1226 "Incorrect target reinitialization");
1227 assert(VoidTy.isNull() && "Context reinitialized?");
1228
1229 this->Target = &Target;
1230 this->AuxTarget = AuxTarget;
1231
1232 ABI.reset(p: createCXXABI(T: Target));
1233 AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(TI: Target, LangOpts);
1234
1235 // C99 6.2.5p19.
1236 InitBuiltinType(R&: VoidTy, K: BuiltinType::Void);
1237
1238 // C99 6.2.5p2.
1239 InitBuiltinType(R&: BoolTy, K: BuiltinType::Bool);
1240 // C99 6.2.5p3.
1241 if (LangOpts.CharIsSigned)
1242 InitBuiltinType(R&: CharTy, K: BuiltinType::Char_S);
1243 else
1244 InitBuiltinType(R&: CharTy, K: BuiltinType::Char_U);
1245 // C99 6.2.5p4.
1246 InitBuiltinType(R&: SignedCharTy, K: BuiltinType::SChar);
1247 InitBuiltinType(R&: ShortTy, K: BuiltinType::Short);
1248 InitBuiltinType(R&: IntTy, K: BuiltinType::Int);
1249 InitBuiltinType(R&: LongTy, K: BuiltinType::Long);
1250 InitBuiltinType(R&: LongLongTy, K: BuiltinType::LongLong);
1251
1252 // C99 6.2.5p6.
1253 InitBuiltinType(R&: UnsignedCharTy, K: BuiltinType::UChar);
1254 InitBuiltinType(R&: UnsignedShortTy, K: BuiltinType::UShort);
1255 InitBuiltinType(R&: UnsignedIntTy, K: BuiltinType::UInt);
1256 InitBuiltinType(R&: UnsignedLongTy, K: BuiltinType::ULong);
1257 InitBuiltinType(R&: UnsignedLongLongTy, K: BuiltinType::ULongLong);
1258
1259 // C99 6.2.5p10.
1260 InitBuiltinType(R&: FloatTy, K: BuiltinType::Float);
1261 InitBuiltinType(R&: DoubleTy, K: BuiltinType::Double);
1262 InitBuiltinType(R&: LongDoubleTy, K: BuiltinType::LongDouble);
1263
1264 // GNU extension, __float128 for IEEE quadruple precision
1265 InitBuiltinType(R&: Float128Ty, K: BuiltinType::Float128);
1266
1267 // __ibm128 for IBM extended precision
1268 InitBuiltinType(R&: Ibm128Ty, K: BuiltinType::Ibm128);
1269
1270 // C11 extension ISO/IEC TS 18661-3
1271 InitBuiltinType(R&: Float16Ty, K: BuiltinType::Float16);
1272
1273 // ISO/IEC JTC1 SC22 WG14 N1169 Extension
1274 InitBuiltinType(R&: ShortAccumTy, K: BuiltinType::ShortAccum);
1275 InitBuiltinType(R&: AccumTy, K: BuiltinType::Accum);
1276 InitBuiltinType(R&: LongAccumTy, K: BuiltinType::LongAccum);
1277 InitBuiltinType(R&: UnsignedShortAccumTy, K: BuiltinType::UShortAccum);
1278 InitBuiltinType(R&: UnsignedAccumTy, K: BuiltinType::UAccum);
1279 InitBuiltinType(R&: UnsignedLongAccumTy, K: BuiltinType::ULongAccum);
1280 InitBuiltinType(R&: ShortFractTy, K: BuiltinType::ShortFract);
1281 InitBuiltinType(R&: FractTy, K: BuiltinType::Fract);
1282 InitBuiltinType(R&: LongFractTy, K: BuiltinType::LongFract);
1283 InitBuiltinType(R&: UnsignedShortFractTy, K: BuiltinType::UShortFract);
1284 InitBuiltinType(R&: UnsignedFractTy, K: BuiltinType::UFract);
1285 InitBuiltinType(R&: UnsignedLongFractTy, K: BuiltinType::ULongFract);
1286 InitBuiltinType(R&: SatShortAccumTy, K: BuiltinType::SatShortAccum);
1287 InitBuiltinType(R&: SatAccumTy, K: BuiltinType::SatAccum);
1288 InitBuiltinType(R&: SatLongAccumTy, K: BuiltinType::SatLongAccum);
1289 InitBuiltinType(R&: SatUnsignedShortAccumTy, K: BuiltinType::SatUShortAccum);
1290 InitBuiltinType(R&: SatUnsignedAccumTy, K: BuiltinType::SatUAccum);
1291 InitBuiltinType(R&: SatUnsignedLongAccumTy, K: BuiltinType::SatULongAccum);
1292 InitBuiltinType(R&: SatShortFractTy, K: BuiltinType::SatShortFract);
1293 InitBuiltinType(R&: SatFractTy, K: BuiltinType::SatFract);
1294 InitBuiltinType(R&: SatLongFractTy, K: BuiltinType::SatLongFract);
1295 InitBuiltinType(R&: SatUnsignedShortFractTy, K: BuiltinType::SatUShortFract);
1296 InitBuiltinType(R&: SatUnsignedFractTy, K: BuiltinType::SatUFract);
1297 InitBuiltinType(R&: SatUnsignedLongFractTy, K: BuiltinType::SatULongFract);
1298
1299 // GNU extension, 128-bit integers.
1300 InitBuiltinType(R&: Int128Ty, K: BuiltinType::Int128);
1301 InitBuiltinType(R&: UnsignedInt128Ty, K: BuiltinType::UInt128);
1302
1303 // C++ 3.9.1p5
1304 if (TargetInfo::isTypeSigned(T: Target.getWCharType()))
1305 InitBuiltinType(R&: WCharTy, K: BuiltinType::WChar_S);
1306 else // -fshort-wchar makes wchar_t be unsigned.
1307 InitBuiltinType(R&: WCharTy, K: BuiltinType::WChar_U);
1308 if (LangOpts.CPlusPlus && LangOpts.WChar)
1309 WideCharTy = WCharTy;
1310 else {
1311 // C99 (or C++ using -fno-wchar).
1312 WideCharTy = getFromTargetType(Type: Target.getWCharType());
1313 }
1314
1315 WIntTy = getFromTargetType(Type: Target.getWIntType());
1316
1317 // C++20 (proposed)
1318 InitBuiltinType(R&: Char8Ty, K: BuiltinType::Char8);
1319
1320 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1321 InitBuiltinType(R&: Char16Ty, K: BuiltinType::Char16);
1322 else // C99
1323 Char16Ty = getFromTargetType(Type: Target.getChar16Type());
1324
1325 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1326 InitBuiltinType(R&: Char32Ty, K: BuiltinType::Char32);
1327 else // C99
1328 Char32Ty = getFromTargetType(Type: Target.getChar32Type());
1329
1330 // Placeholder type for type-dependent expressions whose type is
1331 // completely unknown. No code should ever check a type against
1332 // DependentTy and users should never see it; however, it is here to
1333 // help diagnose failures to properly check for type-dependent
1334 // expressions.
1335 InitBuiltinType(R&: DependentTy, K: BuiltinType::Dependent);
1336
1337 // Placeholder type for functions.
1338 InitBuiltinType(R&: OverloadTy, K: BuiltinType::Overload);
1339
1340 // Placeholder type for bound members.
1341 InitBuiltinType(R&: BoundMemberTy, K: BuiltinType::BoundMember);
1342
1343 // Placeholder type for unresolved templates.
1344 InitBuiltinType(R&: UnresolvedTemplateTy, K: BuiltinType::UnresolvedTemplate);
1345
1346 // Placeholder type for pseudo-objects.
1347 InitBuiltinType(R&: PseudoObjectTy, K: BuiltinType::PseudoObject);
1348
1349 // "any" type; useful for debugger-like clients.
1350 InitBuiltinType(R&: UnknownAnyTy, K: BuiltinType::UnknownAny);
1351
1352 // Placeholder type for unbridged ARC casts.
1353 InitBuiltinType(R&: ARCUnbridgedCastTy, K: BuiltinType::ARCUnbridgedCast);
1354
1355 // Placeholder type for builtin functions.
1356 InitBuiltinType(R&: BuiltinFnTy, K: BuiltinType::BuiltinFn);
1357
1358 // Placeholder type for OMP array sections.
1359 if (LangOpts.OpenMP) {
1360 InitBuiltinType(R&: ArraySectionTy, K: BuiltinType::ArraySection);
1361 InitBuiltinType(R&: OMPArrayShapingTy, K: BuiltinType::OMPArrayShaping);
1362 InitBuiltinType(R&: OMPIteratorTy, K: BuiltinType::OMPIterator);
1363 }
1364 // Placeholder type for OpenACC array sections, if we are ALSO in OMP mode,
1365 // don't bother, as we're just using the same type as OMP.
1366 if (LangOpts.OpenACC && !LangOpts.OpenMP) {
1367 InitBuiltinType(R&: ArraySectionTy, K: BuiltinType::ArraySection);
1368 }
1369 if (LangOpts.MatrixTypes)
1370 InitBuiltinType(R&: IncompleteMatrixIdxTy, K: BuiltinType::IncompleteMatrixIdx);
1371
1372 // Builtin types for 'id', 'Class', and 'SEL'.
1373 InitBuiltinType(R&: ObjCBuiltinIdTy, K: BuiltinType::ObjCId);
1374 InitBuiltinType(R&: ObjCBuiltinClassTy, K: BuiltinType::ObjCClass);
1375 InitBuiltinType(R&: ObjCBuiltinSelTy, K: BuiltinType::ObjCSel);
1376
1377 if (LangOpts.OpenCL) {
1378#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
1379 InitBuiltinType(SingletonId, BuiltinType::Id);
1380#include "clang/Basic/OpenCLImageTypes.def"
1381
1382 InitBuiltinType(R&: OCLSamplerTy, K: BuiltinType::OCLSampler);
1383 InitBuiltinType(R&: OCLEventTy, K: BuiltinType::OCLEvent);
1384 InitBuiltinType(R&: OCLClkEventTy, K: BuiltinType::OCLClkEvent);
1385 InitBuiltinType(R&: OCLQueueTy, K: BuiltinType::OCLQueue);
1386 InitBuiltinType(R&: OCLReserveIDTy, K: BuiltinType::OCLReserveID);
1387
1388#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
1389 InitBuiltinType(Id##Ty, BuiltinType::Id);
1390#include "clang/Basic/OpenCLExtensionTypes.def"
1391 }
1392
1393 if (LangOpts.HLSL) {
1394#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) \
1395 InitBuiltinType(SingletonId, BuiltinType::Id);
1396#include "clang/Basic/HLSLIntangibleTypes.def"
1397 }
1398
1399 if (Target.hasAArch64ACLETypes() ||
1400 (AuxTarget && AuxTarget->hasAArch64ACLETypes())) {
1401#define SVE_TYPE(Name, Id, SingletonId) \
1402 InitBuiltinType(SingletonId, BuiltinType::Id);
1403#include "clang/Basic/AArch64ACLETypes.def"
1404 }
1405
1406 if (Target.getTriple().isPPC64()) {
1407#define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
1408 InitBuiltinType(Id##Ty, BuiltinType::Id);
1409#include "clang/Basic/PPCTypes.def"
1410#define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
1411 InitBuiltinType(Id##Ty, BuiltinType::Id);
1412#include "clang/Basic/PPCTypes.def"
1413 }
1414
1415 if (Target.hasRISCVVTypes()) {
1416#define RVV_TYPE(Name, Id, SingletonId) \
1417 InitBuiltinType(SingletonId, BuiltinType::Id);
1418#include "clang/Basic/RISCVVTypes.def"
1419 }
1420
1421 if (Target.getTriple().isWasm() && Target.hasFeature(Feature: "reference-types")) {
1422#define WASM_TYPE(Name, Id, SingletonId) \
1423 InitBuiltinType(SingletonId, BuiltinType::Id);
1424#include "clang/Basic/WebAssemblyReferenceTypes.def"
1425 }
1426
1427 if (Target.getTriple().isAMDGPU() ||
1428 (AuxTarget && AuxTarget->getTriple().isAMDGPU())) {
1429#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) \
1430 InitBuiltinType(SingletonId, BuiltinType::Id);
1431#include "clang/Basic/AMDGPUTypes.def"
1432 }
1433
1434 // Builtin type for __objc_yes and __objc_no
1435 ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ?
1436 SignedCharTy : BoolTy);
1437
1438 ObjCConstantStringType = QualType();
1439
1440 ObjCSuperType = QualType();
1441
1442 // void * type
1443 if (LangOpts.OpenCLGenericAddressSpace) {
1444 auto Q = VoidTy.getQualifiers();
1445 Q.setAddressSpace(LangAS::opencl_generic);
1446 VoidPtrTy = getPointerType(T: getCanonicalType(
1447 T: getQualifiedType(T: VoidTy.getUnqualifiedType(), Qs: Q)));
1448 } else {
1449 VoidPtrTy = getPointerType(T: VoidTy);
1450 }
1451
1452 // nullptr type (C++0x 2.14.7)
1453 InitBuiltinType(R&: NullPtrTy, K: BuiltinType::NullPtr);
1454
1455 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16
1456 InitBuiltinType(R&: HalfTy, K: BuiltinType::Half);
1457
1458 InitBuiltinType(R&: BFloat16Ty, K: BuiltinType::BFloat16);
1459
1460 // Builtin type used to help define __builtin_va_list.
1461 VaListTagDecl = nullptr;
1462
1463 // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls.
1464 if (LangOpts.MicrosoftExt || LangOpts.Borland) {
1465 MSGuidTagDecl = buildImplicitRecord(Name: "_GUID");
1466 getTranslationUnitDecl()->addDecl(D: MSGuidTagDecl);
1467 }
1468}
1469
1470DiagnosticsEngine &ASTContext::getDiagnostics() const {
1471 return SourceMgr.getDiagnostics();
1472}
1473
1474AttrVec& ASTContext::getDeclAttrs(const Decl *D) {
1475 AttrVec *&Result = DeclAttrs[D];
1476 if (!Result) {
1477 void *Mem = Allocate(Size: sizeof(AttrVec));
1478 Result = new (Mem) AttrVec;
1479 }
1480
1481 return *Result;
1482}
1483
1484/// Erase the attributes corresponding to the given declaration.
1485void ASTContext::eraseDeclAttrs(const Decl *D) {
1486 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(Val: D);
1487 if (Pos != DeclAttrs.end()) {
1488 Pos->second->~AttrVec();
1489 DeclAttrs.erase(I: Pos);
1490 }
1491}
1492
1493// FIXME: Remove ?
1494MemberSpecializationInfo *
1495ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) {
1496 assert(Var->isStaticDataMember() && "Not a static data member");
1497 return getTemplateOrSpecializationInfo(Var)
1498 .dyn_cast<MemberSpecializationInfo *>();
1499}
1500
1501ASTContext::TemplateOrSpecializationInfo
1502ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) {
1503 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos =
1504 TemplateOrInstantiation.find(Val: Var);
1505 if (Pos == TemplateOrInstantiation.end())
1506 return {};
1507
1508 return Pos->second;
1509}
1510
1511void
1512ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl,
1513 TemplateSpecializationKind TSK,
1514 SourceLocation PointOfInstantiation) {
1515 assert(Inst->isStaticDataMember() && "Not a static data member");
1516 assert(Tmpl->isStaticDataMember() && "Not a static data member");
1517 setTemplateOrSpecializationInfo(Inst, TSI: new (*this) MemberSpecializationInfo(
1518 Tmpl, TSK, PointOfInstantiation));
1519}
1520
1521void
1522ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst,
1523 TemplateOrSpecializationInfo TSI) {
1524 assert(!TemplateOrInstantiation[Inst] &&
1525 "Already noted what the variable was instantiated from");
1526 TemplateOrInstantiation[Inst] = TSI;
1527}
1528
1529NamedDecl *
1530ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) {
1531 return InstantiatedFromUsingDecl.lookup(Val: UUD);
1532}
1533
1534void
1535ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) {
1536 assert((isa<UsingDecl>(Pattern) ||
1537 isa<UnresolvedUsingValueDecl>(Pattern) ||
1538 isa<UnresolvedUsingTypenameDecl>(Pattern)) &&
1539 "pattern decl is not a using decl");
1540 assert((isa<UsingDecl>(Inst) ||
1541 isa<UnresolvedUsingValueDecl>(Inst) ||
1542 isa<UnresolvedUsingTypenameDecl>(Inst)) &&
1543 "instantiation did not produce a using decl");
1544 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists");
1545 InstantiatedFromUsingDecl[Inst] = Pattern;
1546}
1547
1548UsingEnumDecl *
1549ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) {
1550 return InstantiatedFromUsingEnumDecl.lookup(Val: UUD);
1551}
1552
1553void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst,
1554 UsingEnumDecl *Pattern) {
1555 assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists");
1556 InstantiatedFromUsingEnumDecl[Inst] = Pattern;
1557}
1558
1559UsingShadowDecl *
1560ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) {
1561 return InstantiatedFromUsingShadowDecl.lookup(Val: Inst);
1562}
1563
1564void
1565ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst,
1566 UsingShadowDecl *Pattern) {
1567 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists");
1568 InstantiatedFromUsingShadowDecl[Inst] = Pattern;
1569}
1570
1571FieldDecl *
1572ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) const {
1573 return InstantiatedFromUnnamedFieldDecl.lookup(Val: Field);
1574}
1575
1576void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst,
1577 FieldDecl *Tmpl) {
1578 assert((!Inst->getDeclName() || Inst->isPlaceholderVar(getLangOpts())) &&
1579 "Instantiated field decl is not unnamed");
1580 assert((!Inst->getDeclName() || Inst->isPlaceholderVar(getLangOpts())) &&
1581 "Template field decl is not unnamed");
1582 assert(!InstantiatedFromUnnamedFieldDecl[Inst] &&
1583 "Already noted what unnamed field was instantiated from");
1584
1585 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl;
1586}
1587
1588ASTContext::overridden_cxx_method_iterator
1589ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const {
1590 return overridden_methods(Method).begin();
1591}
1592
1593ASTContext::overridden_cxx_method_iterator
1594ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const {
1595 return overridden_methods(Method).end();
1596}
1597
1598unsigned
1599ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const {
1600 auto Range = overridden_methods(Method);
1601 return Range.end() - Range.begin();
1602}
1603
1604ASTContext::overridden_method_range
1605ASTContext::overridden_methods(const CXXMethodDecl *Method) const {
1606 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos =
1607 OverriddenMethods.find(Val: Method->getCanonicalDecl());
1608 if (Pos == OverriddenMethods.end())
1609 return overridden_method_range(nullptr, nullptr);
1610 return overridden_method_range(Pos->second.begin(), Pos->second.end());
1611}
1612
1613void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method,
1614 const CXXMethodDecl *Overridden) {
1615 assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl());
1616 OverriddenMethods[Method].push_back(NewVal: Overridden);
1617}
1618
1619void ASTContext::getOverriddenMethods(
1620 const NamedDecl *D,
1621 SmallVectorImpl<const NamedDecl *> &Overridden) const {
1622 assert(D);
1623
1624 if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(Val: D)) {
1625 Overridden.append(in_start: overridden_methods_begin(Method: CXXMethod),
1626 in_end: overridden_methods_end(Method: CXXMethod));
1627 return;
1628 }
1629
1630 const auto *Method = dyn_cast<ObjCMethodDecl>(Val: D);
1631 if (!Method)
1632 return;
1633
1634 SmallVector<const ObjCMethodDecl *, 8> OverDecls;
1635 Method->getOverriddenMethods(Overridden&: OverDecls);
1636 Overridden.append(in_start: OverDecls.begin(), in_end: OverDecls.end());
1637}
1638
1639std::optional<ASTContext::CXXRecordDeclRelocationInfo>
1640ASTContext::getRelocationInfoForCXXRecord(const CXXRecordDecl *RD) const {
1641 assert(RD);
1642 CXXRecordDecl *D = RD->getDefinition();
1643 auto it = RelocatableClasses.find(Val: D);
1644 if (it != RelocatableClasses.end())
1645 return it->getSecond();
1646 return std::nullopt;
1647}
1648
1649void ASTContext::setRelocationInfoForCXXRecord(
1650 const CXXRecordDecl *RD, CXXRecordDeclRelocationInfo Info) {
1651 assert(RD);
1652 CXXRecordDecl *D = RD->getDefinition();
1653 assert(RelocatableClasses.find(D) == RelocatableClasses.end());
1654 RelocatableClasses.insert(KV: {D, Info});
1655}
1656
1657static bool primaryBaseHaseAddressDiscriminatedVTableAuthentication(
1658 const ASTContext &Context, const CXXRecordDecl *Class) {
1659 if (!Class->isPolymorphic())
1660 return false;
1661 const CXXRecordDecl *BaseType = Context.baseForVTableAuthentication(ThisClass: Class);
1662 using AuthAttr = VTablePointerAuthenticationAttr;
1663 const AuthAttr *ExplicitAuth = BaseType->getAttr<AuthAttr>();
1664 if (!ExplicitAuth)
1665 return Context.getLangOpts().PointerAuthVTPtrAddressDiscrimination;
1666 AuthAttr::AddressDiscriminationMode AddressDiscrimination =
1667 ExplicitAuth->getAddressDiscrimination();
1668 if (AddressDiscrimination == AuthAttr::DefaultAddressDiscrimination)
1669 return Context.getLangOpts().PointerAuthVTPtrAddressDiscrimination;
1670 return AddressDiscrimination == AuthAttr::AddressDiscrimination;
1671}
1672
1673ASTContext::PointerAuthContent
1674ASTContext::findPointerAuthContent(QualType T) const {
1675 assert(isPointerAuthenticationAvailable());
1676
1677 T = T.getCanonicalType();
1678 if (T->isDependentType())
1679 return PointerAuthContent::None;
1680
1681 if (T.hasAddressDiscriminatedPointerAuth())
1682 return PointerAuthContent::AddressDiscriminatedData;
1683 const RecordDecl *RD = T->getAsRecordDecl();
1684 if (!RD)
1685 return PointerAuthContent::None;
1686
1687 if (RD->isInvalidDecl())
1688 return PointerAuthContent::None;
1689
1690 if (auto Existing = RecordContainsAddressDiscriminatedPointerAuth.find(Val: RD);
1691 Existing != RecordContainsAddressDiscriminatedPointerAuth.end())
1692 return Existing->second;
1693
1694 PointerAuthContent Result = PointerAuthContent::None;
1695
1696 auto SaveResultAndReturn = [&]() -> PointerAuthContent {
1697 auto [ResultIter, DidAdd] =
1698 RecordContainsAddressDiscriminatedPointerAuth.try_emplace(Key: RD, Args&: Result);
1699 (void)ResultIter;
1700 (void)DidAdd;
1701 assert(DidAdd);
1702 return Result;
1703 };
1704 auto ShouldContinueAfterUpdate = [&](PointerAuthContent NewResult) {
1705 static_assert(PointerAuthContent::None <
1706 PointerAuthContent::AddressDiscriminatedVTable);
1707 static_assert(PointerAuthContent::AddressDiscriminatedVTable <
1708 PointerAuthContent::AddressDiscriminatedData);
1709 if (NewResult > Result)
1710 Result = NewResult;
1711 return Result != PointerAuthContent::AddressDiscriminatedData;
1712 };
1713 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) {
1714 if (primaryBaseHaseAddressDiscriminatedVTableAuthentication(Context: *this, Class: CXXRD) &&
1715 !ShouldContinueAfterUpdate(
1716 PointerAuthContent::AddressDiscriminatedVTable))
1717 return SaveResultAndReturn();
1718 for (auto Base : CXXRD->bases()) {
1719 if (!ShouldContinueAfterUpdate(findPointerAuthContent(T: Base.getType())))
1720 return SaveResultAndReturn();
1721 }
1722 }
1723 for (auto *FieldDecl : RD->fields()) {
1724 if (!ShouldContinueAfterUpdate(
1725 findPointerAuthContent(T: FieldDecl->getType())))
1726 return SaveResultAndReturn();
1727 }
1728 return SaveResultAndReturn();
1729}
1730
1731void ASTContext::addedLocalImportDecl(ImportDecl *Import) {
1732 assert(!Import->getNextLocalImport() &&
1733 "Import declaration already in the chain");
1734 assert(!Import->isFromASTFile() && "Non-local import declaration");
1735 if (!FirstLocalImport) {
1736 FirstLocalImport = Import;
1737 LastLocalImport = Import;
1738 return;
1739 }
1740
1741 LastLocalImport->setNextLocalImport(Import);
1742 LastLocalImport = Import;
1743}
1744
1745//===----------------------------------------------------------------------===//
1746// Type Sizing and Analysis
1747//===----------------------------------------------------------------------===//
1748
1749/// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
1750/// scalar floating point type.
1751const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
1752 switch (T->castAs<BuiltinType>()->getKind()) {
1753 default:
1754 llvm_unreachable("Not a floating point type!");
1755 case BuiltinType::BFloat16:
1756 return Target->getBFloat16Format();
1757 case BuiltinType::Float16:
1758 return Target->getHalfFormat();
1759 case BuiltinType::Half:
1760 return Target->getHalfFormat();
1761 case BuiltinType::Float: return Target->getFloatFormat();
1762 case BuiltinType::Double: return Target->getDoubleFormat();
1763 case BuiltinType::Ibm128:
1764 return Target->getIbm128Format();
1765 case BuiltinType::LongDouble:
1766 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice)
1767 return AuxTarget->getLongDoubleFormat();
1768 return Target->getLongDoubleFormat();
1769 case BuiltinType::Float128:
1770 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice)
1771 return AuxTarget->getFloat128Format();
1772 return Target->getFloat128Format();
1773 }
1774}
1775
1776CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
1777 unsigned Align = Target->getCharWidth();
1778
1779 const unsigned AlignFromAttr = D->getMaxAlignment();
1780 if (AlignFromAttr)
1781 Align = AlignFromAttr;
1782
1783 // __attribute__((aligned)) can increase or decrease alignment
1784 // *except* on a struct or struct member, where it only increases
1785 // alignment unless 'packed' is also specified.
1786 //
1787 // It is an error for alignas to decrease alignment, so we can
1788 // ignore that possibility; Sema should diagnose it.
1789 bool UseAlignAttrOnly;
1790 if (const FieldDecl *FD = dyn_cast<FieldDecl>(Val: D))
1791 UseAlignAttrOnly =
1792 FD->hasAttr<PackedAttr>() || FD->getParent()->hasAttr<PackedAttr>();
1793 else
1794 UseAlignAttrOnly = AlignFromAttr != 0;
1795 // If we're using the align attribute only, just ignore everything
1796 // else about the declaration and its type.
1797 if (UseAlignAttrOnly) {
1798 // do nothing
1799 } else if (const auto *VD = dyn_cast<ValueDecl>(Val: D)) {
1800 QualType T = VD->getType();
1801 if (const auto *RT = T->getAs<ReferenceType>()) {
1802 if (ForAlignof)
1803 T = RT->getPointeeType();
1804 else
1805 T = getPointerType(T: RT->getPointeeType());
1806 }
1807 QualType BaseT = getBaseElementType(QT: T);
1808 if (T->isFunctionType())
1809 Align = getTypeInfoImpl(T: T.getTypePtr()).Align;
1810 else if (!BaseT->isIncompleteType()) {
1811 // Adjust alignments of declarations with array type by the
1812 // large-array alignment on the target.
1813 if (const ArrayType *arrayType = getAsArrayType(T)) {
1814 unsigned MinWidth = Target->getLargeArrayMinWidth();
1815 if (!ForAlignof && MinWidth) {
1816 if (isa<VariableArrayType>(Val: arrayType))
1817 Align = std::max(a: Align, b: Target->getLargeArrayAlign());
1818 else if (isa<ConstantArrayType>(Val: arrayType) &&
1819 MinWidth <= getTypeSize(T: cast<ConstantArrayType>(Val: arrayType)))
1820 Align = std::max(a: Align, b: Target->getLargeArrayAlign());
1821 }
1822 }
1823 Align = std::max(a: Align, b: getPreferredTypeAlign(T: T.getTypePtr()));
1824 if (BaseT.getQualifiers().hasUnaligned())
1825 Align = Target->getCharWidth();
1826 }
1827
1828 // Ensure minimum alignment for global variables.
1829 if (const auto *VD = dyn_cast<VarDecl>(Val: D))
1830 if (VD->hasGlobalStorage() && !ForAlignof) {
1831 uint64_t TypeSize =
1832 !BaseT->isIncompleteType() ? getTypeSize(T: T.getTypePtr()) : 0;
1833 Align = std::max(a: Align, b: getMinGlobalAlignOfVar(Size: TypeSize, VD));
1834 }
1835
1836 // Fields can be subject to extra alignment constraints, like if
1837 // the field is packed, the struct is packed, or the struct has a
1838 // a max-field-alignment constraint (#pragma pack). So calculate
1839 // the actual alignment of the field within the struct, and then
1840 // (as we're expected to) constrain that by the alignment of the type.
1841 if (const auto *Field = dyn_cast<FieldDecl>(Val: VD)) {
1842 const RecordDecl *Parent = Field->getParent();
1843 // We can only produce a sensible answer if the record is valid.
1844 if (!Parent->isInvalidDecl()) {
1845 const ASTRecordLayout &Layout = getASTRecordLayout(D: Parent);
1846
1847 // Start with the record's overall alignment.
1848 unsigned FieldAlign = toBits(CharSize: Layout.getAlignment());
1849
1850 // Use the GCD of that and the offset within the record.
1851 uint64_t Offset = Layout.getFieldOffset(FieldNo: Field->getFieldIndex());
1852 if (Offset > 0) {
1853 // Alignment is always a power of 2, so the GCD will be a power of 2,
1854 // which means we get to do this crazy thing instead of Euclid's.
1855 uint64_t LowBitOfOffset = Offset & (~Offset + 1);
1856 if (LowBitOfOffset < FieldAlign)
1857 FieldAlign = static_cast<unsigned>(LowBitOfOffset);
1858 }
1859
1860 Align = std::min(a: Align, b: FieldAlign);
1861 }
1862 }
1863 }
1864
1865 // Some targets have hard limitation on the maximum requestable alignment in
1866 // aligned attribute for static variables.
1867 const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute();
1868 const auto *VD = dyn_cast<VarDecl>(Val: D);
1869 if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static)
1870 Align = std::min(a: Align, b: MaxAlignedAttr);
1871
1872 return toCharUnitsFromBits(BitSize: Align);
1873}
1874
1875CharUnits ASTContext::getExnObjectAlignment() const {
1876 return toCharUnitsFromBits(BitSize: Target->getExnObjectAlignment());
1877}
1878
1879// getTypeInfoDataSizeInChars - Return the size of a type, in
1880// chars. If the type is a record, its data size is returned. This is
1881// the size of the memcpy that's performed when assigning this type
1882// using a trivial copy/move assignment operator.
1883TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const {
1884 TypeInfoChars Info = getTypeInfoInChars(T);
1885
1886 // In C++, objects can sometimes be allocated into the tail padding
1887 // of a base-class subobject. We decide whether that's possible
1888 // during class layout, so here we can just trust the layout results.
1889 if (getLangOpts().CPlusPlus) {
1890 if (const auto *RD = T->getAsCXXRecordDecl(); RD && !RD->isInvalidDecl()) {
1891 const ASTRecordLayout &layout = getASTRecordLayout(D: RD);
1892 Info.Width = layout.getDataSize();
1893 }
1894 }
1895
1896 return Info;
1897}
1898
1899/// getConstantArrayInfoInChars - Performing the computation in CharUnits
1900/// instead of in bits prevents overflowing the uint64_t for some large arrays.
1901TypeInfoChars
1902static getConstantArrayInfoInChars(const ASTContext &Context,
1903 const ConstantArrayType *CAT) {
1904 TypeInfoChars EltInfo = Context.getTypeInfoInChars(T: CAT->getElementType());
1905 uint64_t Size = CAT->getZExtSize();
1906 assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <=
1907 (uint64_t)(-1)/Size) &&
1908 "Overflow in array type char size evaluation");
1909 uint64_t Width = EltInfo.Width.getQuantity() * Size;
1910 unsigned Align = EltInfo.Align.getQuantity();
1911 if (!Context.getTargetInfo().getCXXABI().isMicrosoft() ||
1912 Context.getTargetInfo().getPointerWidth(AddrSpace: LangAS::Default) == 64)
1913 Width = llvm::alignTo(Value: Width, Align);
1914 return TypeInfoChars(CharUnits::fromQuantity(Quantity: Width),
1915 CharUnits::fromQuantity(Quantity: Align),
1916 EltInfo.AlignRequirement);
1917}
1918
1919TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const {
1920 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: T))
1921 return getConstantArrayInfoInChars(Context: *this, CAT);
1922 TypeInfo Info = getTypeInfo(T);
1923 return TypeInfoChars(toCharUnitsFromBits(BitSize: Info.Width),
1924 toCharUnitsFromBits(BitSize: Info.Align), Info.AlignRequirement);
1925}
1926
1927TypeInfoChars ASTContext::getTypeInfoInChars(QualType T) const {
1928 return getTypeInfoInChars(T: T.getTypePtr());
1929}
1930
1931bool ASTContext::isPromotableIntegerType(QualType T) const {
1932 // HLSL doesn't promote all small integer types to int, it
1933 // just uses the rank-based promotion rules for all types.
1934 if (getLangOpts().HLSL)
1935 return false;
1936
1937 if (const auto *BT = T->getAs<BuiltinType>())
1938 switch (BT->getKind()) {
1939 case BuiltinType::Bool:
1940 case BuiltinType::Char_S:
1941 case BuiltinType::Char_U:
1942 case BuiltinType::SChar:
1943 case BuiltinType::UChar:
1944 case BuiltinType::Short:
1945 case BuiltinType::UShort:
1946 case BuiltinType::WChar_S:
1947 case BuiltinType::WChar_U:
1948 case BuiltinType::Char8:
1949 case BuiltinType::Char16:
1950 case BuiltinType::Char32:
1951 return true;
1952 default:
1953 return false;
1954 }
1955
1956 // Enumerated types are promotable to their compatible integer types
1957 // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2).
1958 if (const auto *ED = T->getAsEnumDecl()) {
1959 if (T->isDependentType() || ED->getPromotionType().isNull() ||
1960 ED->isScoped())
1961 return false;
1962
1963 return true;
1964 }
1965
1966 // OverflowBehaviorTypes are promotable if their underlying type is promotable
1967 if (const auto *OBT = T->getAs<OverflowBehaviorType>()) {
1968 return isPromotableIntegerType(T: OBT->getUnderlyingType());
1969 }
1970
1971 return false;
1972}
1973
1974bool ASTContext::isAlignmentRequired(const Type *T) const {
1975 return getTypeInfo(T).AlignRequirement != AlignRequirementKind::None;
1976}
1977
1978bool ASTContext::isAlignmentRequired(QualType T) const {
1979 return isAlignmentRequired(T: T.getTypePtr());
1980}
1981
1982unsigned ASTContext::getTypeAlignIfKnown(QualType T,
1983 bool NeedsPreferredAlignment) const {
1984 // An alignment on a typedef overrides anything else.
1985 if (const auto *TT = T->getAs<TypedefType>())
1986 if (unsigned Align = TT->getDecl()->getMaxAlignment())
1987 return Align;
1988
1989 // If we have an (array of) complete type, we're done.
1990 T = getBaseElementType(QT: T);
1991 if (!T->isIncompleteType())
1992 return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T);
1993
1994 // If we had an array type, its element type might be a typedef
1995 // type with an alignment attribute.
1996 if (const auto *TT = T->getAs<TypedefType>())
1997 if (unsigned Align = TT->getDecl()->getMaxAlignment())
1998 return Align;
1999
2000 // Otherwise, see if the declaration of the type had an attribute.
2001 if (const auto *TD = T->getAsTagDecl())
2002 return TD->getMaxAlignment();
2003
2004 return 0;
2005}
2006
2007TypeInfo ASTContext::getTypeInfo(const Type *T) const {
2008 TypeInfoMap::iterator I = MemoizedTypeInfo.find(Val: T);
2009 if (I != MemoizedTypeInfo.end())
2010 return I->second;
2011
2012 // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup.
2013 TypeInfo TI = getTypeInfoImpl(T);
2014 MemoizedTypeInfo[T] = TI;
2015 return TI;
2016}
2017
2018/// getTypeInfoImpl - Return the size of the specified type, in bits. This
2019/// method does not work on incomplete types.
2020///
2021/// FIXME: Pointers into different addr spaces could have different sizes and
2022/// alignment requirements: getPointerInfo should take an AddrSpace, this
2023/// should take a QualType, &c.
2024TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
2025 uint64_t Width = 0;
2026 unsigned Align = 8;
2027 AlignRequirementKind AlignRequirement = AlignRequirementKind::None;
2028 LangAS AS = LangAS::Default;
2029 switch (T->getTypeClass()) {
2030#define TYPE(Class, Base)
2031#define ABSTRACT_TYPE(Class, Base)
2032#define NON_CANONICAL_TYPE(Class, Base)
2033#define DEPENDENT_TYPE(Class, Base) case Type::Class:
2034#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \
2035 case Type::Class: \
2036 assert(!T->isDependentType() && "should not see dependent types here"); \
2037 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr());
2038#include "clang/AST/TypeNodes.inc"
2039 llvm_unreachable("Should not see dependent types");
2040
2041 case Type::FunctionNoProto:
2042 case Type::FunctionProto:
2043 // GCC extension: alignof(function) = 32 bits
2044 Width = 0;
2045 Align = 32;
2046 break;
2047
2048 case Type::IncompleteArray:
2049 case Type::VariableArray:
2050 case Type::ConstantArray:
2051 case Type::ArrayParameter: {
2052 // Model non-constant sized arrays as size zero, but track the alignment.
2053 uint64_t Size = 0;
2054 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: T))
2055 Size = CAT->getZExtSize();
2056
2057 TypeInfo EltInfo = getTypeInfo(T: cast<ArrayType>(Val: T)->getElementType());
2058 assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&
2059 "Overflow in array type bit size evaluation");
2060 Width = EltInfo.Width * Size;
2061 Align = EltInfo.Align;
2062 AlignRequirement = EltInfo.AlignRequirement;
2063 if (!getTargetInfo().getCXXABI().isMicrosoft() ||
2064 getTargetInfo().getPointerWidth(AddrSpace: LangAS::Default) == 64)
2065 Width = llvm::alignTo(Value: Width, Align);
2066 break;
2067 }
2068
2069 case Type::ExtVector:
2070 case Type::Vector: {
2071 const auto *VT = cast<VectorType>(Val: T);
2072 TypeInfo EltInfo = getTypeInfo(T: VT->getElementType());
2073 Width = VT->isPackedVectorBoolType(ctx: *this)
2074 ? VT->getNumElements()
2075 : EltInfo.Width * VT->getNumElements();
2076 // Enforce at least byte size and alignment.
2077 Width = std::max<unsigned>(a: 8, b: Width);
2078 Align = std::max<unsigned>(a: 8, b: Width);
2079
2080 // If the alignment is not a power of 2, round up to the next power of 2.
2081 // This happens for non-power-of-2 length vectors.
2082 if (Align & (Align-1)) {
2083 Align = llvm::bit_ceil(Value: Align);
2084 Width = llvm::alignTo(Value: Width, Align);
2085 }
2086 // Adjust the alignment based on the target max.
2087 uint64_t TargetVectorAlign = Target->getMaxVectorAlign();
2088 if (TargetVectorAlign && TargetVectorAlign < Align)
2089 Align = TargetVectorAlign;
2090 if (VT->getVectorKind() == VectorKind::SveFixedLengthData)
2091 // Adjust the alignment for fixed-length SVE vectors. This is important
2092 // for non-power-of-2 vector lengths.
2093 Align = 128;
2094 else if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
2095 // Adjust the alignment for fixed-length SVE predicates.
2096 Align = 16;
2097 else if (VT->getVectorKind() == VectorKind::RVVFixedLengthData ||
2098 VT->getVectorKind() == VectorKind::RVVFixedLengthMask ||
2099 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_1 ||
2100 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_2 ||
2101 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_4)
2102 // Adjust the alignment for fixed-length RVV vectors.
2103 Align = std::min<unsigned>(a: 64, b: Width);
2104 break;
2105 }
2106
2107 case Type::ConstantMatrix: {
2108 const auto *MT = cast<ConstantMatrixType>(Val: T);
2109 TypeInfo ElementInfo = getTypeInfo(T: MT->getElementType());
2110 // The internal layout of a matrix value is implementation defined.
2111 // Initially be ABI compatible with arrays with respect to alignment and
2112 // size.
2113 Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns();
2114 Align = ElementInfo.Align;
2115 break;
2116 }
2117
2118 case Type::Builtin:
2119 switch (cast<BuiltinType>(Val: T)->getKind()) {
2120 default: llvm_unreachable("Unknown builtin type!");
2121 case BuiltinType::Void:
2122 // GCC extension: alignof(void) = 8 bits.
2123 Width = 0;
2124 Align = 8;
2125 break;
2126 case BuiltinType::Bool:
2127 Width = Target->getBoolWidth();
2128 Align = Target->getBoolAlign();
2129 break;
2130 case BuiltinType::Char_S:
2131 case BuiltinType::Char_U:
2132 case BuiltinType::UChar:
2133 case BuiltinType::SChar:
2134 case BuiltinType::Char8:
2135 Width = Target->getCharWidth();
2136 Align = Target->getCharAlign();
2137 break;
2138 case BuiltinType::WChar_S:
2139 case BuiltinType::WChar_U:
2140 Width = Target->getWCharWidth();
2141 Align = Target->getWCharAlign();
2142 break;
2143 case BuiltinType::Char16:
2144 Width = Target->getChar16Width();
2145 Align = Target->getChar16Align();
2146 break;
2147 case BuiltinType::Char32:
2148 Width = Target->getChar32Width();
2149 Align = Target->getChar32Align();
2150 break;
2151 case BuiltinType::UShort:
2152 case BuiltinType::Short:
2153 Width = Target->getShortWidth();
2154 Align = Target->getShortAlign();
2155 break;
2156 case BuiltinType::UInt:
2157 case BuiltinType::Int:
2158 Width = Target->getIntWidth();
2159 Align = Target->getIntAlign();
2160 break;
2161 case BuiltinType::ULong:
2162 case BuiltinType::Long:
2163 Width = Target->getLongWidth();
2164 Align = Target->getLongAlign();
2165 break;
2166 case BuiltinType::ULongLong:
2167 case BuiltinType::LongLong:
2168 Width = Target->getLongLongWidth();
2169 Align = Target->getLongLongAlign();
2170 break;
2171 case BuiltinType::Int128:
2172 case BuiltinType::UInt128:
2173 Width = 128;
2174 Align = Target->getInt128Align();
2175 break;
2176 case BuiltinType::ShortAccum:
2177 case BuiltinType::UShortAccum:
2178 case BuiltinType::SatShortAccum:
2179 case BuiltinType::SatUShortAccum:
2180 Width = Target->getShortAccumWidth();
2181 Align = Target->getShortAccumAlign();
2182 break;
2183 case BuiltinType::Accum:
2184 case BuiltinType::UAccum:
2185 case BuiltinType::SatAccum:
2186 case BuiltinType::SatUAccum:
2187 Width = Target->getAccumWidth();
2188 Align = Target->getAccumAlign();
2189 break;
2190 case BuiltinType::LongAccum:
2191 case BuiltinType::ULongAccum:
2192 case BuiltinType::SatLongAccum:
2193 case BuiltinType::SatULongAccum:
2194 Width = Target->getLongAccumWidth();
2195 Align = Target->getLongAccumAlign();
2196 break;
2197 case BuiltinType::ShortFract:
2198 case BuiltinType::UShortFract:
2199 case BuiltinType::SatShortFract:
2200 case BuiltinType::SatUShortFract:
2201 Width = Target->getShortFractWidth();
2202 Align = Target->getShortFractAlign();
2203 break;
2204 case BuiltinType::Fract:
2205 case BuiltinType::UFract:
2206 case BuiltinType::SatFract:
2207 case BuiltinType::SatUFract:
2208 Width = Target->getFractWidth();
2209 Align = Target->getFractAlign();
2210 break;
2211 case BuiltinType::LongFract:
2212 case BuiltinType::ULongFract:
2213 case BuiltinType::SatLongFract:
2214 case BuiltinType::SatULongFract:
2215 Width = Target->getLongFractWidth();
2216 Align = Target->getLongFractAlign();
2217 break;
2218 case BuiltinType::BFloat16:
2219 if (Target->hasBFloat16Type()) {
2220 Width = Target->getBFloat16Width();
2221 Align = Target->getBFloat16Align();
2222 } else if ((getLangOpts().SYCLIsDevice ||
2223 (getLangOpts().OpenMP &&
2224 getLangOpts().OpenMPIsTargetDevice)) &&
2225 AuxTarget->hasBFloat16Type()) {
2226 Width = AuxTarget->getBFloat16Width();
2227 Align = AuxTarget->getBFloat16Align();
2228 }
2229 break;
2230 case BuiltinType::Float16:
2231 case BuiltinType::Half:
2232 if (Target->hasFloat16Type() || !getLangOpts().OpenMP ||
2233 !getLangOpts().OpenMPIsTargetDevice) {
2234 Width = Target->getHalfWidth();
2235 Align = Target->getHalfAlign();
2236 } else {
2237 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2238 "Expected OpenMP device compilation.");
2239 Width = AuxTarget->getHalfWidth();
2240 Align = AuxTarget->getHalfAlign();
2241 }
2242 break;
2243 case BuiltinType::Float:
2244 Width = Target->getFloatWidth();
2245 Align = Target->getFloatAlign();
2246 break;
2247 case BuiltinType::Double:
2248 Width = Target->getDoubleWidth();
2249 Align = Target->getDoubleAlign();
2250 break;
2251 case BuiltinType::Ibm128:
2252 Width = Target->getIbm128Width();
2253 Align = Target->getIbm128Align();
2254 break;
2255 case BuiltinType::LongDouble:
2256 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2257 (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() ||
2258 Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) {
2259 Width = AuxTarget->getLongDoubleWidth();
2260 Align = AuxTarget->getLongDoubleAlign();
2261 } else {
2262 Width = Target->getLongDoubleWidth();
2263 Align = Target->getLongDoubleAlign();
2264 }
2265 break;
2266 case BuiltinType::Float128:
2267 if (Target->hasFloat128Type() || !getLangOpts().OpenMP ||
2268 !getLangOpts().OpenMPIsTargetDevice) {
2269 Width = Target->getFloat128Width();
2270 Align = Target->getFloat128Align();
2271 } else {
2272 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2273 "Expected OpenMP device compilation.");
2274 Width = AuxTarget->getFloat128Width();
2275 Align = AuxTarget->getFloat128Align();
2276 }
2277 break;
2278 case BuiltinType::NullPtr:
2279 // C++ 3.9.1p11: sizeof(nullptr_t) == sizeof(void*)
2280 Width = Target->getPointerWidth(AddrSpace: LangAS::Default);
2281 Align = Target->getPointerAlign(AddrSpace: LangAS::Default);
2282 break;
2283 case BuiltinType::ObjCId:
2284 case BuiltinType::ObjCClass:
2285 case BuiltinType::ObjCSel:
2286 Width = Target->getPointerWidth(AddrSpace: LangAS::Default);
2287 Align = Target->getPointerAlign(AddrSpace: LangAS::Default);
2288 break;
2289 case BuiltinType::OCLSampler:
2290 case BuiltinType::OCLEvent:
2291 case BuiltinType::OCLClkEvent:
2292 case BuiltinType::OCLQueue:
2293 case BuiltinType::OCLReserveID:
2294#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
2295 case BuiltinType::Id:
2296#include "clang/Basic/OpenCLImageTypes.def"
2297#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
2298 case BuiltinType::Id:
2299#include "clang/Basic/OpenCLExtensionTypes.def"
2300 AS = Target->getOpenCLTypeAddrSpace(TK: getOpenCLTypeKind(T));
2301 Width = Target->getPointerWidth(AddrSpace: AS);
2302 Align = Target->getPointerAlign(AddrSpace: AS);
2303 break;
2304 // The SVE types are effectively target-specific. The length of an
2305 // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple
2306 // of 128 bits. There is one predicate bit for each vector byte, so the
2307 // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits.
2308 //
2309 // Because the length is only known at runtime, we use a dummy value
2310 // of 0 for the static length. The alignment values are those defined
2311 // by the Procedure Call Standard for the Arm Architecture.
2312#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId) \
2313 case BuiltinType::Id: \
2314 Width = 0; \
2315 Align = 128; \
2316 break;
2317#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId) \
2318 case BuiltinType::Id: \
2319 Width = 0; \
2320 Align = 16; \
2321 break;
2322#define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingletonId) \
2323 case BuiltinType::Id: \
2324 Width = 0; \
2325 Align = 16; \
2326 break;
2327#define SVE_SCALAR_TYPE(Name, MangledName, Id, SingletonId, Bits) \
2328 case BuiltinType::Id: \
2329 Width = Bits; \
2330 Align = Bits; \
2331 break;
2332#include "clang/Basic/AArch64ACLETypes.def"
2333#define PPC_VECTOR_TYPE(Name, Id, Size) \
2334 case BuiltinType::Id: \
2335 Width = Size; \
2336 Align = Size; \
2337 break;
2338#include "clang/Basic/PPCTypes.def"
2339#define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \
2340 IsFP, IsBF) \
2341 case BuiltinType::Id: \
2342 Width = 0; \
2343 Align = ElBits; \
2344 break;
2345#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \
2346 case BuiltinType::Id: \
2347 Width = 0; \
2348 Align = 8; \
2349 break;
2350#include "clang/Basic/RISCVVTypes.def"
2351#define WASM_TYPE(Name, Id, SingletonId) \
2352 case BuiltinType::Id: \
2353 Width = 0; \
2354 Align = 8; \
2355 break;
2356#include "clang/Basic/WebAssemblyReferenceTypes.def"
2357#define AMDGPU_TYPE(NAME, ID, SINGLETONID, WIDTH, ALIGN) \
2358 case BuiltinType::ID: \
2359 Width = WIDTH; \
2360 Align = ALIGN; \
2361 break;
2362#include "clang/Basic/AMDGPUTypes.def"
2363#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
2364#include "clang/Basic/HLSLIntangibleTypes.def"
2365 Width = Target->getPointerWidth(AddrSpace: LangAS::Default);
2366 Align = Target->getPointerAlign(AddrSpace: LangAS::Default);
2367 break;
2368 }
2369 break;
2370 case Type::ObjCObjectPointer:
2371 Width = Target->getPointerWidth(AddrSpace: LangAS::Default);
2372 Align = Target->getPointerAlign(AddrSpace: LangAS::Default);
2373 break;
2374 case Type::BlockPointer:
2375 AS = cast<BlockPointerType>(Val: T)->getPointeeType().getAddressSpace();
2376 Width = Target->getPointerWidth(AddrSpace: AS);
2377 Align = Target->getPointerAlign(AddrSpace: AS);
2378 break;
2379 case Type::LValueReference:
2380 case Type::RValueReference:
2381 // alignof and sizeof should never enter this code path here, so we go
2382 // the pointer route.
2383 AS = cast<ReferenceType>(Val: T)->getPointeeType().getAddressSpace();
2384 Width = Target->getPointerWidth(AddrSpace: AS);
2385 Align = Target->getPointerAlign(AddrSpace: AS);
2386 break;
2387 case Type::Pointer:
2388 AS = cast<PointerType>(Val: T)->getPointeeType().getAddressSpace();
2389 Width = Target->getPointerWidth(AddrSpace: AS);
2390 Align = Target->getPointerAlign(AddrSpace: AS);
2391 break;
2392 case Type::MemberPointer: {
2393 const auto *MPT = cast<MemberPointerType>(Val: T);
2394 CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT);
2395 Width = MPI.Width;
2396 Align = MPI.Align;
2397 break;
2398 }
2399 case Type::Complex: {
2400 // Complex types have the same alignment as their elements, but twice the
2401 // size.
2402 TypeInfo EltInfo = getTypeInfo(T: cast<ComplexType>(Val: T)->getElementType());
2403 Width = EltInfo.Width * 2;
2404 Align = EltInfo.Align;
2405 break;
2406 }
2407 case Type::ObjCObject:
2408 return getTypeInfo(T: cast<ObjCObjectType>(Val: T)->getBaseType().getTypePtr());
2409 case Type::Adjusted:
2410 case Type::Decayed:
2411 return getTypeInfo(T: cast<AdjustedType>(Val: T)->getAdjustedType().getTypePtr());
2412 case Type::ObjCInterface: {
2413 const auto *ObjCI = cast<ObjCInterfaceType>(Val: T);
2414 if (ObjCI->getDecl()->isInvalidDecl()) {
2415 Width = 8;
2416 Align = 8;
2417 break;
2418 }
2419 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(D: ObjCI->getDecl());
2420 Width = toBits(CharSize: Layout.getSize());
2421 Align = toBits(CharSize: Layout.getAlignment());
2422 break;
2423 }
2424 case Type::BitInt: {
2425 const auto *EIT = cast<BitIntType>(Val: T);
2426 Align = Target->getBitIntAlign(NumBits: EIT->getNumBits());
2427 Width = Target->getBitIntWidth(NumBits: EIT->getNumBits());
2428 break;
2429 }
2430 case Type::Record:
2431 case Type::Enum: {
2432 const auto *TT = cast<TagType>(Val: T);
2433 const TagDecl *TD = TT->getDecl()->getDefinitionOrSelf();
2434
2435 if (TD->isInvalidDecl()) {
2436 Width = 8;
2437 Align = 8;
2438 break;
2439 }
2440
2441 if (isa<EnumType>(Val: TT)) {
2442 const EnumDecl *ED = cast<EnumDecl>(Val: TD);
2443 TypeInfo Info =
2444 getTypeInfo(T: ED->getIntegerType()->getUnqualifiedDesugaredType());
2445 if (unsigned AttrAlign = ED->getMaxAlignment()) {
2446 Info.Align = AttrAlign;
2447 Info.AlignRequirement = AlignRequirementKind::RequiredByEnum;
2448 }
2449 return Info;
2450 }
2451
2452 const auto *RD = cast<RecordDecl>(Val: TD);
2453 const ASTRecordLayout &Layout = getASTRecordLayout(D: RD);
2454 Width = toBits(CharSize: Layout.getSize());
2455 Align = toBits(CharSize: Layout.getAlignment());
2456 AlignRequirement = RD->hasAttr<AlignedAttr>()
2457 ? AlignRequirementKind::RequiredByRecord
2458 : AlignRequirementKind::None;
2459 break;
2460 }
2461
2462 case Type::SubstTemplateTypeParm:
2463 return getTypeInfo(T: cast<SubstTemplateTypeParmType>(Val: T)->
2464 getReplacementType().getTypePtr());
2465
2466 case Type::Auto:
2467 case Type::DeducedTemplateSpecialization: {
2468 const auto *A = cast<DeducedType>(Val: T);
2469 assert(!A->getDeducedType().isNull() &&
2470 "cannot request the size of an undeduced or dependent auto type");
2471 return getTypeInfo(T: A->getDeducedType().getTypePtr());
2472 }
2473
2474 case Type::Paren:
2475 return getTypeInfo(T: cast<ParenType>(Val: T)->getInnerType().getTypePtr());
2476
2477 case Type::MacroQualified:
2478 return getTypeInfo(
2479 T: cast<MacroQualifiedType>(Val: T)->getUnderlyingType().getTypePtr());
2480
2481 case Type::ObjCTypeParam:
2482 return getTypeInfo(T: cast<ObjCTypeParamType>(Val: T)->desugar().getTypePtr());
2483
2484 case Type::Using:
2485 return getTypeInfo(T: cast<UsingType>(Val: T)->desugar().getTypePtr());
2486
2487 case Type::Typedef: {
2488 const auto *TT = cast<TypedefType>(Val: T);
2489 TypeInfo Info = getTypeInfo(T: TT->desugar().getTypePtr());
2490 // If the typedef has an aligned attribute on it, it overrides any computed
2491 // alignment we have. This violates the GCC documentation (which says that
2492 // attribute(aligned) can only round up) but matches its implementation.
2493 if (unsigned AttrAlign = TT->getDecl()->getMaxAlignment()) {
2494 Align = AttrAlign;
2495 AlignRequirement = AlignRequirementKind::RequiredByTypedef;
2496 } else {
2497 Align = Info.Align;
2498 AlignRequirement = Info.AlignRequirement;
2499 }
2500 Width = Info.Width;
2501 break;
2502 }
2503
2504 case Type::Attributed:
2505 return getTypeInfo(
2506 T: cast<AttributedType>(Val: T)->getEquivalentType().getTypePtr());
2507
2508 case Type::CountAttributed:
2509 return getTypeInfo(T: cast<CountAttributedType>(Val: T)->desugar().getTypePtr());
2510
2511 case Type::BTFTagAttributed:
2512 return getTypeInfo(
2513 T: cast<BTFTagAttributedType>(Val: T)->getWrappedType().getTypePtr());
2514
2515 case Type::OverflowBehavior:
2516 return getTypeInfo(
2517 T: cast<OverflowBehaviorType>(Val: T)->getUnderlyingType().getTypePtr());
2518
2519 case Type::HLSLAttributedResource:
2520 return getTypeInfo(
2521 T: cast<HLSLAttributedResourceType>(Val: T)->getWrappedType().getTypePtr());
2522
2523 case Type::HLSLInlineSpirv: {
2524 const auto *ST = cast<HLSLInlineSpirvType>(Val: T);
2525 // Size is specified in bytes, convert to bits
2526 Width = ST->getSize() * 8;
2527 Align = ST->getAlignment();
2528 if (Width == 0 && Align == 0) {
2529 // We are defaulting to laying out opaque SPIR-V types as 32-bit ints.
2530 Width = 32;
2531 Align = 32;
2532 }
2533 break;
2534 }
2535
2536 case Type::Atomic: {
2537 // Start with the base type information.
2538 TypeInfo Info = getTypeInfo(T: cast<AtomicType>(Val: T)->getValueType());
2539 Width = Info.Width;
2540 Align = Info.Align;
2541
2542 if (!Width) {
2543 // An otherwise zero-sized type should still generate an
2544 // atomic operation.
2545 Width = Target->getCharWidth();
2546 assert(Align);
2547 } else if (Width <= Target->getMaxAtomicPromoteWidth()) {
2548 // If the size of the type doesn't exceed the platform's max
2549 // atomic promotion width, make the size and alignment more
2550 // favorable to atomic operations:
2551
2552 // Round the size up to a power of 2.
2553 Width = llvm::bit_ceil(Value: Width);
2554
2555 // Set the alignment equal to the size.
2556 Align = static_cast<unsigned>(Width);
2557 }
2558 }
2559 break;
2560
2561 case Type::PredefinedSugar:
2562 return getTypeInfo(T: cast<PredefinedSugarType>(Val: T)->desugar().getTypePtr());
2563
2564 case Type::Pipe:
2565 Width = Target->getPointerWidth(AddrSpace: LangAS::opencl_global);
2566 Align = Target->getPointerAlign(AddrSpace: LangAS::opencl_global);
2567 break;
2568 }
2569
2570 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2");
2571 return TypeInfo(Width, Align, AlignRequirement);
2572}
2573
2574unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const {
2575 UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(Val: T);
2576 if (I != MemoizedUnadjustedAlign.end())
2577 return I->second;
2578
2579 unsigned UnadjustedAlign;
2580 if (const auto *RT = T->getAsCanonical<RecordType>()) {
2581 const ASTRecordLayout &Layout = getASTRecordLayout(D: RT->getDecl());
2582 UnadjustedAlign = toBits(CharSize: Layout.getUnadjustedAlignment());
2583 } else if (const auto *ObjCI = T->getAsCanonical<ObjCInterfaceType>()) {
2584 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(D: ObjCI->getDecl());
2585 UnadjustedAlign = toBits(CharSize: Layout.getUnadjustedAlignment());
2586 } else {
2587 UnadjustedAlign = getTypeAlign(T: T->getUnqualifiedDesugaredType());
2588 }
2589
2590 MemoizedUnadjustedAlign[T] = UnadjustedAlign;
2591 return UnadjustedAlign;
2592}
2593
2594unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const {
2595 unsigned SimdAlign = llvm::OpenMPIRBuilder::getOpenMPDefaultSimdAlign(
2596 TargetTriple: getTargetInfo().getTriple(), Features: Target->getTargetOpts().FeatureMap);
2597 return SimdAlign;
2598}
2599
2600/// toCharUnitsFromBits - Convert a size in bits to a size in characters.
2601CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const {
2602 return CharUnits::fromQuantity(Quantity: BitSize / getCharWidth());
2603}
2604
2605/// toBits - Convert a size in characters to a size in characters.
2606int64_t ASTContext::toBits(CharUnits CharSize) const {
2607 return CharSize.getQuantity() * getCharWidth();
2608}
2609
2610/// getTypeSizeInChars - Return the size of the specified type, in characters.
2611/// This method does not work on incomplete types.
2612CharUnits ASTContext::getTypeSizeInChars(QualType T) const {
2613 return getTypeInfoInChars(T).Width;
2614}
2615CharUnits ASTContext::getTypeSizeInChars(const Type *T) const {
2616 return getTypeInfoInChars(T).Width;
2617}
2618
2619/// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
2620/// characters. This method does not work on incomplete types.
2621CharUnits ASTContext::getTypeAlignInChars(QualType T) const {
2622 return toCharUnitsFromBits(BitSize: getTypeAlign(T));
2623}
2624CharUnits ASTContext::getTypeAlignInChars(const Type *T) const {
2625 return toCharUnitsFromBits(BitSize: getTypeAlign(T));
2626}
2627
2628/// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a
2629/// type, in characters, before alignment adjustments. This method does
2630/// not work on incomplete types.
2631CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const {
2632 return toCharUnitsFromBits(BitSize: getTypeUnadjustedAlign(T));
2633}
2634CharUnits ASTContext::getTypeUnadjustedAlignInChars(const Type *T) const {
2635 return toCharUnitsFromBits(BitSize: getTypeUnadjustedAlign(T));
2636}
2637
2638/// getPreferredTypeAlign - Return the "preferred" alignment of the specified
2639/// type for the current target in bits. This can be different than the ABI
2640/// alignment in cases where it is beneficial for performance or backwards
2641/// compatibility preserving to overalign a data type. (Note: despite the name,
2642/// the preferred alignment is ABI-impacting, and not an optimization.)
2643unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
2644 TypeInfo TI = getTypeInfo(T);
2645 unsigned ABIAlign = TI.Align;
2646
2647 T = T->getBaseElementTypeUnsafe();
2648
2649 // The preferred alignment of member pointers is that of a pointer.
2650 if (T->isMemberPointerType())
2651 return getPreferredTypeAlign(T: getPointerDiffType().getTypePtr());
2652
2653 if (!Target->allowsLargerPreferedTypeAlignment())
2654 return ABIAlign;
2655
2656 if (const auto *RD = T->getAsRecordDecl()) {
2657 // When used as part of a typedef, or together with a 'packed' attribute,
2658 // the 'aligned' attribute can be used to decrease alignment. Note that the
2659 // 'packed' case is already taken into consideration when computing the
2660 // alignment, we only need to handle the typedef case here.
2661 if (TI.AlignRequirement == AlignRequirementKind::RequiredByTypedef ||
2662 RD->isInvalidDecl())
2663 return ABIAlign;
2664
2665 unsigned PreferredAlign = static_cast<unsigned>(
2666 toBits(CharSize: getASTRecordLayout(D: RD).PreferredAlignment));
2667 assert(PreferredAlign >= ABIAlign &&
2668 "PreferredAlign should be at least as large as ABIAlign.");
2669 return PreferredAlign;
2670 }
2671
2672 // Double (and, for targets supporting AIX `power` alignment, long double) and
2673 // long long should be naturally aligned (despite requiring less alignment) if
2674 // possible.
2675 if (const auto *CT = T->getAs<ComplexType>())
2676 T = CT->getElementType().getTypePtr();
2677 if (const auto *ED = T->getAsEnumDecl())
2678 T = ED->getIntegerType().getTypePtr();
2679 if (T->isSpecificBuiltinType(K: BuiltinType::Double) ||
2680 T->isSpecificBuiltinType(K: BuiltinType::LongLong) ||
2681 T->isSpecificBuiltinType(K: BuiltinType::ULongLong) ||
2682 (T->isSpecificBuiltinType(K: BuiltinType::LongDouble) &&
2683 Target->defaultsToAIXPowerAlignment()))
2684 // Don't increase the alignment if an alignment attribute was specified on a
2685 // typedef declaration.
2686 if (!TI.isAlignRequired())
2687 return std::max(a: ABIAlign, b: (unsigned)getTypeSize(T));
2688
2689 return ABIAlign;
2690}
2691
2692/// getTargetDefaultAlignForAttributeAligned - Return the default alignment
2693/// for __attribute__((aligned)) on this target, to be used if no alignment
2694/// value is specified.
2695unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const {
2696 return getTargetInfo().getDefaultAlignForAttributeAligned();
2697}
2698
2699/// getAlignOfGlobalVar - Return the alignment in bits that should be given
2700/// to a global variable of the specified type.
2701unsigned ASTContext::getAlignOfGlobalVar(QualType T, const VarDecl *VD) const {
2702 uint64_t TypeSize = getTypeSize(T: T.getTypePtr());
2703 return std::max(a: getPreferredTypeAlign(T),
2704 b: getMinGlobalAlignOfVar(Size: TypeSize, VD));
2705}
2706
2707/// getAlignOfGlobalVarInChars - Return the alignment in characters that
2708/// should be given to a global variable of the specified type.
2709CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T,
2710 const VarDecl *VD) const {
2711 return toCharUnitsFromBits(BitSize: getAlignOfGlobalVar(T, VD));
2712}
2713
2714unsigned ASTContext::getMinGlobalAlignOfVar(uint64_t Size,
2715 const VarDecl *VD) const {
2716 // Make the default handling as that of a non-weak definition in the
2717 // current translation unit.
2718 bool HasNonWeakDef = !VD || (VD->hasDefinition() && !VD->isWeak());
2719 return getTargetInfo().getMinGlobalAlign(Size, HasNonWeakDef);
2720}
2721
2722CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const {
2723 CharUnits Offset = CharUnits::Zero();
2724 const ASTRecordLayout *Layout = &getASTRecordLayout(D: RD);
2725 while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) {
2726 Offset += Layout->getBaseClassOffset(Base);
2727 Layout = &getASTRecordLayout(D: Base);
2728 }
2729 return Offset;
2730}
2731
2732CharUnits ASTContext::getMemberPointerPathAdjustment(const APValue &MP) const {
2733 const ValueDecl *MPD = MP.getMemberPointerDecl();
2734 CharUnits ThisAdjustment = CharUnits::Zero();
2735 ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath();
2736 bool DerivedMember = MP.isMemberPointerToDerivedMember();
2737 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Val: MPD->getDeclContext());
2738 for (unsigned I = 0, N = Path.size(); I != N; ++I) {
2739 const CXXRecordDecl *Base = RD;
2740 const CXXRecordDecl *Derived = Path[I];
2741 if (DerivedMember)
2742 std::swap(a&: Base, b&: Derived);
2743 ThisAdjustment += getASTRecordLayout(D: Derived).getBaseClassOffset(Base);
2744 RD = Path[I];
2745 }
2746 if (DerivedMember)
2747 ThisAdjustment = -ThisAdjustment;
2748 return ThisAdjustment;
2749}
2750
2751/// DeepCollectObjCIvars -
2752/// This routine first collects all declared, but not synthesized, ivars in
2753/// super class and then collects all ivars, including those synthesized for
2754/// current class. This routine is used for implementation of current class
2755/// when all ivars, declared and synthesized are known.
2756void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI,
2757 bool leafClass,
2758 SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const {
2759 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
2760 DeepCollectObjCIvars(OI: SuperClass, leafClass: false, Ivars);
2761 if (!leafClass) {
2762 llvm::append_range(C&: Ivars, R: OI->ivars());
2763 } else {
2764 auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
2765 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
2766 Iv= Iv->getNextIvar())
2767 Ivars.push_back(Elt: Iv);
2768 }
2769}
2770
2771/// CollectInheritedProtocols - Collect all protocols in current class and
2772/// those inherited by it.
2773void ASTContext::CollectInheritedProtocols(const Decl *CDecl,
2774 llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) {
2775 if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(Val: CDecl)) {
2776 // We can use protocol_iterator here instead of
2777 // all_referenced_protocol_iterator since we are walking all categories.
2778 for (auto *Proto : OI->all_referenced_protocols()) {
2779 CollectInheritedProtocols(CDecl: Proto, Protocols);
2780 }
2781
2782 // Categories of this Interface.
2783 for (const auto *Cat : OI->visible_categories())
2784 CollectInheritedProtocols(CDecl: Cat, Protocols);
2785
2786 if (ObjCInterfaceDecl *SD = OI->getSuperClass())
2787 while (SD) {
2788 CollectInheritedProtocols(CDecl: SD, Protocols);
2789 SD = SD->getSuperClass();
2790 }
2791 } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(Val: CDecl)) {
2792 for (auto *Proto : OC->protocols()) {
2793 CollectInheritedProtocols(CDecl: Proto, Protocols);
2794 }
2795 } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(Val: CDecl)) {
2796 // Insert the protocol.
2797 if (!Protocols.insert(
2798 Ptr: const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second)
2799 return;
2800
2801 for (auto *Proto : OP->protocols())
2802 CollectInheritedProtocols(CDecl: Proto, Protocols);
2803 }
2804}
2805
2806static bool unionHasUniqueObjectRepresentations(const ASTContext &Context,
2807 const RecordDecl *RD,
2808 bool CheckIfTriviallyCopyable) {
2809 assert(RD->isUnion() && "Must be union type");
2810 CharUnits UnionSize =
2811 Context.getTypeSizeInChars(T: Context.getCanonicalTagType(TD: RD));
2812
2813 for (const auto *Field : RD->fields()) {
2814 if (!Context.hasUniqueObjectRepresentations(Ty: Field->getType(),
2815 CheckIfTriviallyCopyable))
2816 return false;
2817 CharUnits FieldSize = Context.getTypeSizeInChars(T: Field->getType());
2818 if (FieldSize != UnionSize)
2819 return false;
2820 }
2821 return !RD->field_empty();
2822}
2823
2824static int64_t getSubobjectOffset(const FieldDecl *Field,
2825 const ASTContext &Context,
2826 const clang::ASTRecordLayout & /*Layout*/) {
2827 return Context.getFieldOffset(FD: Field);
2828}
2829
2830static int64_t getSubobjectOffset(const CXXRecordDecl *RD,
2831 const ASTContext &Context,
2832 const clang::ASTRecordLayout &Layout) {
2833 return Context.toBits(CharSize: Layout.getBaseClassOffset(Base: RD));
2834}
2835
2836static std::optional<int64_t>
2837structHasUniqueObjectRepresentations(const ASTContext &Context,
2838 const RecordDecl *RD,
2839 bool CheckIfTriviallyCopyable);
2840
2841static std::optional<int64_t>
2842getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context,
2843 bool CheckIfTriviallyCopyable) {
2844 if (const auto *RD = Field->getType()->getAsRecordDecl();
2845 RD && !RD->isUnion())
2846 return structHasUniqueObjectRepresentations(Context, RD,
2847 CheckIfTriviallyCopyable);
2848
2849 // A _BitInt type may not be unique if it has padding bits
2850 // but if it is a bitfield the padding bits are not used.
2851 bool IsBitIntType = Field->getType()->isBitIntType();
2852 if (!Field->getType()->isReferenceType() && !IsBitIntType &&
2853 !Context.hasUniqueObjectRepresentations(Ty: Field->getType(),
2854 CheckIfTriviallyCopyable))
2855 return std::nullopt;
2856
2857 int64_t FieldSizeInBits =
2858 Context.toBits(CharSize: Context.getTypeSizeInChars(T: Field->getType()));
2859 if (Field->isBitField()) {
2860 // If we have explicit padding bits, they don't contribute bits
2861 // to the actual object representation, so return 0.
2862 if (Field->isUnnamedBitField())
2863 return 0;
2864
2865 int64_t BitfieldSize = Field->getBitWidthValue();
2866 if (IsBitIntType) {
2867 if ((unsigned)BitfieldSize >
2868 cast<BitIntType>(Val: Field->getType())->getNumBits())
2869 return std::nullopt;
2870 } else if (BitfieldSize > FieldSizeInBits) {
2871 return std::nullopt;
2872 }
2873 FieldSizeInBits = BitfieldSize;
2874 } else if (IsBitIntType && !Context.hasUniqueObjectRepresentations(
2875 Ty: Field->getType(), CheckIfTriviallyCopyable)) {
2876 return std::nullopt;
2877 }
2878 return FieldSizeInBits;
2879}
2880
2881static std::optional<int64_t>
2882getSubobjectSizeInBits(const CXXRecordDecl *RD, const ASTContext &Context,
2883 bool CheckIfTriviallyCopyable) {
2884 return structHasUniqueObjectRepresentations(Context, RD,
2885 CheckIfTriviallyCopyable);
2886}
2887
2888template <typename RangeT>
2889static std::optional<int64_t> structSubobjectsHaveUniqueObjectRepresentations(
2890 const RangeT &Subobjects, int64_t CurOffsetInBits,
2891 const ASTContext &Context, const clang::ASTRecordLayout &Layout,
2892 bool CheckIfTriviallyCopyable) {
2893 for (const auto *Subobject : Subobjects) {
2894 std::optional<int64_t> SizeInBits =
2895 getSubobjectSizeInBits(Subobject, Context, CheckIfTriviallyCopyable);
2896 if (!SizeInBits)
2897 return std::nullopt;
2898 if (*SizeInBits != 0) {
2899 int64_t Offset = getSubobjectOffset(Subobject, Context, Layout);
2900 if (Offset != CurOffsetInBits)
2901 return std::nullopt;
2902 CurOffsetInBits += *SizeInBits;
2903 }
2904 }
2905 return CurOffsetInBits;
2906}
2907
2908static std::optional<int64_t>
2909structHasUniqueObjectRepresentations(const ASTContext &Context,
2910 const RecordDecl *RD,
2911 bool CheckIfTriviallyCopyable) {
2912 assert(!RD->isUnion() && "Must be struct/class type");
2913 const auto &Layout = Context.getASTRecordLayout(D: RD);
2914
2915 int64_t CurOffsetInBits = 0;
2916 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(Val: RD)) {
2917 if (ClassDecl->isDynamicClass())
2918 return std::nullopt;
2919
2920 SmallVector<CXXRecordDecl *, 4> Bases;
2921 for (const auto &Base : ClassDecl->bases()) {
2922 // Empty types can be inherited from, and non-empty types can potentially
2923 // have tail padding, so just make sure there isn't an error.
2924 Bases.emplace_back(Args: Base.getType()->getAsCXXRecordDecl());
2925 }
2926
2927 llvm::sort(C&: Bases, Comp: [&](const CXXRecordDecl *L, const CXXRecordDecl *R) {
2928 return Layout.getBaseClassOffset(Base: L) < Layout.getBaseClassOffset(Base: R);
2929 });
2930
2931 std::optional<int64_t> OffsetAfterBases =
2932 structSubobjectsHaveUniqueObjectRepresentations(
2933 Subobjects: Bases, CurOffsetInBits, Context, Layout, CheckIfTriviallyCopyable);
2934 if (!OffsetAfterBases)
2935 return std::nullopt;
2936 CurOffsetInBits = *OffsetAfterBases;
2937 }
2938
2939 std::optional<int64_t> OffsetAfterFields =
2940 structSubobjectsHaveUniqueObjectRepresentations(
2941 Subobjects: RD->fields(), CurOffsetInBits, Context, Layout,
2942 CheckIfTriviallyCopyable);
2943 if (!OffsetAfterFields)
2944 return std::nullopt;
2945 CurOffsetInBits = *OffsetAfterFields;
2946
2947 return CurOffsetInBits;
2948}
2949
2950bool ASTContext::hasUniqueObjectRepresentations(
2951 QualType Ty, bool CheckIfTriviallyCopyable) const {
2952 // C++17 [meta.unary.prop]:
2953 // The predicate condition for a template specialization
2954 // has_unique_object_representations<T> shall be satisfied if and only if:
2955 // (9.1) - T is trivially copyable, and
2956 // (9.2) - any two objects of type T with the same value have the same
2957 // object representation, where:
2958 // - two objects of array or non-union class type are considered to have
2959 // the same value if their respective sequences of direct subobjects
2960 // have the same values, and
2961 // - two objects of union type are considered to have the same value if
2962 // they have the same active member and the corresponding members have
2963 // the same value.
2964 // The set of scalar types for which this condition holds is
2965 // implementation-defined. [ Note: If a type has padding bits, the condition
2966 // does not hold; otherwise, the condition holds true for unsigned integral
2967 // types. -- end note ]
2968 assert(!Ty.isNull() && "Null QualType sent to unique object rep check");
2969
2970 // Arrays are unique only if their element type is unique.
2971 if (Ty->isArrayType())
2972 return hasUniqueObjectRepresentations(Ty: getBaseElementType(QT: Ty),
2973 CheckIfTriviallyCopyable);
2974
2975 assert((Ty->isVoidType() || !Ty->isIncompleteType()) &&
2976 "hasUniqueObjectRepresentations should not be called with an "
2977 "incomplete type");
2978
2979 // (9.1) - T is trivially copyable...
2980 if (CheckIfTriviallyCopyable && !Ty.isTriviallyCopyableType(Context: *this))
2981 return false;
2982
2983 // All integrals and enums are unique.
2984 if (Ty->isIntegralOrEnumerationType()) {
2985 // Address discriminated integer types are not unique.
2986 if (Ty.hasAddressDiscriminatedPointerAuth())
2987 return false;
2988 // Except _BitInt types that have padding bits.
2989 if (const auto *BIT = Ty->getAs<BitIntType>())
2990 return getTypeSize(T: BIT) == BIT->getNumBits();
2991
2992 return true;
2993 }
2994
2995 // All other pointers are unique.
2996 if (Ty->isPointerType())
2997 return !Ty.hasAddressDiscriminatedPointerAuth();
2998
2999 if (const auto *MPT = Ty->getAs<MemberPointerType>())
3000 return !ABI->getMemberPointerInfo(MPT).HasPadding;
3001
3002 if (const auto *Record = Ty->getAsRecordDecl()) {
3003 if (Record->isInvalidDecl())
3004 return false;
3005
3006 if (Record->isUnion())
3007 return unionHasUniqueObjectRepresentations(Context: *this, RD: Record,
3008 CheckIfTriviallyCopyable);
3009
3010 std::optional<int64_t> StructSize = structHasUniqueObjectRepresentations(
3011 Context: *this, RD: Record, CheckIfTriviallyCopyable);
3012
3013 return StructSize && *StructSize == static_cast<int64_t>(getTypeSize(T: Ty));
3014 }
3015
3016 // FIXME: More cases to handle here (list by rsmith):
3017 // vectors (careful about, eg, vector of 3 foo)
3018 // _Complex int and friends
3019 // _Atomic T
3020 // Obj-C block pointers
3021 // Obj-C object pointers
3022 // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t,
3023 // clk_event_t, queue_t, reserve_id_t)
3024 // There're also Obj-C class types and the Obj-C selector type, but I think it
3025 // makes sense for those to return false here.
3026
3027 return false;
3028}
3029
3030unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const {
3031 unsigned count = 0;
3032 // Count ivars declared in class extension.
3033 for (const auto *Ext : OI->known_extensions())
3034 count += Ext->ivar_size();
3035
3036 // Count ivar defined in this class's implementation. This
3037 // includes synthesized ivars.
3038 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation())
3039 count += ImplDecl->ivar_size();
3040
3041 return count;
3042}
3043
3044bool ASTContext::isSentinelNullExpr(const Expr *E) {
3045 if (!E)
3046 return false;
3047
3048 // nullptr_t is always treated as null.
3049 if (E->getType()->isNullPtrType()) return true;
3050
3051 if (E->getType()->isAnyPointerType() &&
3052 E->IgnoreParenCasts()->isNullPointerConstant(Ctx&: *this,
3053 NPC: Expr::NPC_ValueDependentIsNull))
3054 return true;
3055
3056 // Unfortunately, __null has type 'int'.
3057 if (isa<GNUNullExpr>(Val: E)) return true;
3058
3059 return false;
3060}
3061
3062/// Get the implementation of ObjCInterfaceDecl, or nullptr if none
3063/// exists.
3064ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) {
3065 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
3066 I = ObjCImpls.find(Val: D);
3067 if (I != ObjCImpls.end())
3068 return cast<ObjCImplementationDecl>(Val: I->second);
3069 return nullptr;
3070}
3071
3072/// Get the implementation of ObjCCategoryDecl, or nullptr if none
3073/// exists.
3074ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) {
3075 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
3076 I = ObjCImpls.find(Val: D);
3077 if (I != ObjCImpls.end())
3078 return cast<ObjCCategoryImplDecl>(Val: I->second);
3079 return nullptr;
3080}
3081
3082/// Set the implementation of ObjCInterfaceDecl.
3083void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD,
3084 ObjCImplementationDecl *ImplD) {
3085 assert(IFaceD && ImplD && "Passed null params");
3086 ObjCImpls[IFaceD] = ImplD;
3087}
3088
3089/// Set the implementation of ObjCCategoryDecl.
3090void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD,
3091 ObjCCategoryImplDecl *ImplD) {
3092 assert(CatD && ImplD && "Passed null params");
3093 ObjCImpls[CatD] = ImplD;
3094}
3095
3096const ObjCMethodDecl *
3097ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const {
3098 return ObjCMethodRedecls.lookup(Val: MD);
3099}
3100
3101void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD,
3102 const ObjCMethodDecl *Redecl) {
3103 assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration");
3104 ObjCMethodRedecls[MD] = Redecl;
3105}
3106
3107const ObjCInterfaceDecl *ASTContext::getObjContainingInterface(
3108 const NamedDecl *ND) const {
3109 if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(Val: ND->getDeclContext()))
3110 return ID;
3111 if (const auto *CD = dyn_cast<ObjCCategoryDecl>(Val: ND->getDeclContext()))
3112 return CD->getClassInterface();
3113 if (const auto *IMD = dyn_cast<ObjCImplDecl>(Val: ND->getDeclContext()))
3114 return IMD->getClassInterface();
3115
3116 return nullptr;
3117}
3118
3119/// Get the copy initialization expression of VarDecl, or nullptr if
3120/// none exists.
3121BlockVarCopyInit ASTContext::getBlockVarCopyInit(const VarDecl *VD) const {
3122 assert(VD && "Passed null params");
3123 assert(VD->hasAttr<BlocksAttr>() &&
3124 "getBlockVarCopyInits - not __block var");
3125 auto I = BlockVarCopyInits.find(Val: VD);
3126 if (I != BlockVarCopyInits.end())
3127 return I->second;
3128 return {nullptr, false};
3129}
3130
3131/// Set the copy initialization expression of a block var decl.
3132void ASTContext::setBlockVarCopyInit(const VarDecl*VD, Expr *CopyExpr,
3133 bool CanThrow) {
3134 assert(VD && CopyExpr && "Passed null params");
3135 assert(VD->hasAttr<BlocksAttr>() &&
3136 "setBlockVarCopyInits - not __block var");
3137 BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow);
3138}
3139
3140TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T,
3141 unsigned DataSize) const {
3142 if (!DataSize)
3143 DataSize = TypeLoc::getFullDataSizeForType(Ty: T);
3144 else
3145 assert(DataSize == TypeLoc::getFullDataSizeForType(T) &&
3146 "incorrect data size provided to CreateTypeSourceInfo!");
3147
3148 auto *TInfo =
3149 (TypeSourceInfo*)BumpAlloc.Allocate(Size: sizeof(TypeSourceInfo) + DataSize, Alignment: 8);
3150 new (TInfo) TypeSourceInfo(T, DataSize);
3151 return TInfo;
3152}
3153
3154TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T,
3155 SourceLocation L) const {
3156 TypeSourceInfo *TSI = CreateTypeSourceInfo(T);
3157 TSI->getTypeLoc().initialize(Context&: const_cast<ASTContext &>(*this), Loc: L);
3158 return TSI;
3159}
3160
3161const ASTRecordLayout &
3162ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const {
3163 return getObjCLayout(D);
3164}
3165
3166static auto getCanonicalTemplateArguments(const ASTContext &C,
3167 ArrayRef<TemplateArgument> Args,
3168 bool &AnyNonCanonArgs) {
3169 SmallVector<TemplateArgument, 16> CanonArgs(Args);
3170 AnyNonCanonArgs |= C.canonicalizeTemplateArguments(Args: CanonArgs);
3171 return CanonArgs;
3172}
3173
3174bool ASTContext::canonicalizeTemplateArguments(
3175 MutableArrayRef<TemplateArgument> Args) const {
3176 bool AnyNonCanonArgs = false;
3177 for (auto &Arg : Args) {
3178 TemplateArgument OrigArg = Arg;
3179 Arg = getCanonicalTemplateArgument(Arg);
3180 AnyNonCanonArgs |= !Arg.structurallyEquals(Other: OrigArg);
3181 }
3182 return AnyNonCanonArgs;
3183}
3184
3185//===----------------------------------------------------------------------===//
3186// Type creation/memoization methods
3187//===----------------------------------------------------------------------===//
3188
3189QualType
3190ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const {
3191 unsigned fastQuals = quals.getFastQualifiers();
3192 quals.removeFastQualifiers();
3193
3194 // Check if we've already instantiated this type.
3195 llvm::FoldingSetNodeID ID;
3196 ExtQuals::Profile(ID, BaseType: baseType, Quals: quals);
3197 void *insertPos = nullptr;
3198 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, InsertPos&: insertPos)) {
3199 assert(eq->getQualifiers() == quals);
3200 return QualType(eq, fastQuals);
3201 }
3202
3203 // If the base type is not canonical, make the appropriate canonical type.
3204 QualType canon;
3205 if (!baseType->isCanonicalUnqualified()) {
3206 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split();
3207 canonSplit.Quals.addConsistentQualifiers(qs: quals);
3208 canon = getExtQualType(baseType: canonSplit.Ty, quals: canonSplit.Quals);
3209
3210 // Re-find the insert position.
3211 (void) ExtQualNodes.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
3212 }
3213
3214 auto *eq = new (*this, alignof(ExtQuals)) ExtQuals(baseType, canon, quals);
3215 ExtQualNodes.InsertNode(N: eq, InsertPos: insertPos);
3216 return QualType(eq, fastQuals);
3217}
3218
3219QualType ASTContext::getAddrSpaceQualType(QualType T,
3220 LangAS AddressSpace) const {
3221 QualType CanT = getCanonicalType(T);
3222 if (CanT.getAddressSpace() == AddressSpace)
3223 return T;
3224
3225 // If we are composing extended qualifiers together, merge together
3226 // into one ExtQuals node.
3227 QualifierCollector Quals;
3228 const Type *TypeNode = Quals.strip(type: T);
3229
3230 // If this type already has an address space specified, it cannot get
3231 // another one.
3232 assert(!Quals.hasAddressSpace() &&
3233 "Type cannot be in multiple addr spaces!");
3234 Quals.addAddressSpace(space: AddressSpace);
3235
3236 return getExtQualType(baseType: TypeNode, quals: Quals);
3237}
3238
3239QualType ASTContext::removeAddrSpaceQualType(QualType T) const {
3240 // If the type is not qualified with an address space, just return it
3241 // immediately.
3242 if (!T.hasAddressSpace())
3243 return T;
3244
3245 QualifierCollector Quals;
3246 const Type *TypeNode;
3247 // For arrays, strip the qualifier off the element type, then reconstruct the
3248 // array type
3249 if (T.getTypePtr()->isArrayType()) {
3250 T = getUnqualifiedArrayType(T, Quals);
3251 TypeNode = T.getTypePtr();
3252 } else {
3253 // If we are composing extended qualifiers together, merge together
3254 // into one ExtQuals node.
3255 while (T.hasAddressSpace()) {
3256 TypeNode = Quals.strip(type: T);
3257
3258 // If the type no longer has an address space after stripping qualifiers,
3259 // jump out.
3260 if (!QualType(TypeNode, 0).hasAddressSpace())
3261 break;
3262
3263 // There might be sugar in the way. Strip it and try again.
3264 T = T.getSingleStepDesugaredType(Context: *this);
3265 }
3266 }
3267
3268 Quals.removeAddressSpace();
3269
3270 // Removal of the address space can mean there are no longer any
3271 // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts)
3272 // or required.
3273 if (Quals.hasNonFastQualifiers())
3274 return getExtQualType(baseType: TypeNode, quals: Quals);
3275 else
3276 return QualType(TypeNode, Quals.getFastQualifiers());
3277}
3278
3279uint16_t
3280ASTContext::getPointerAuthVTablePointerDiscriminator(const CXXRecordDecl *RD) {
3281 assert(RD->isPolymorphic() &&
3282 "Attempted to get vtable pointer discriminator on a monomorphic type");
3283 std::unique_ptr<MangleContext> MC(createMangleContext());
3284 SmallString<256> Str;
3285 llvm::raw_svector_ostream Out(Str);
3286 MC->mangleCXXVTable(RD, Out);
3287 return llvm::getPointerAuthStableSipHash(S: Str);
3288}
3289
3290/// Encode a function type for use in the discriminator of a function pointer
3291/// type. We can't use the itanium scheme for this since C has quite permissive
3292/// rules for type compatibility that we need to be compatible with.
3293///
3294/// Formally, this function associates every function pointer type T with an
3295/// encoded string E(T). Let the equivalence relation T1 ~ T2 be defined as
3296/// E(T1) == E(T2). E(T) is part of the ABI of values of type T. C type
3297/// compatibility requires equivalent treatment under the ABI, so
3298/// CCompatible(T1, T2) must imply E(T1) == E(T2), that is, CCompatible must be
3299/// a subset of ~. Crucially, however, it must be a proper subset because
3300/// CCompatible is not an equivalence relation: for example, int[] is compatible
3301/// with both int[1] and int[2], but the latter are not compatible with each
3302/// other. Therefore this encoding function must be careful to only distinguish
3303/// types if there is no third type with which they are both required to be
3304/// compatible.
3305static void encodeTypeForFunctionPointerAuth(const ASTContext &Ctx,
3306 raw_ostream &OS, QualType QT) {
3307 // FIXME: Consider address space qualifiers.
3308 const Type *T = QT.getCanonicalType().getTypePtr();
3309
3310 // FIXME: Consider using the C++ type mangling when we encounter a construct
3311 // that is incompatible with C.
3312
3313 switch (T->getTypeClass()) {
3314 case Type::Atomic:
3315 return encodeTypeForFunctionPointerAuth(
3316 Ctx, OS, QT: cast<AtomicType>(Val: T)->getValueType());
3317
3318 case Type::LValueReference:
3319 OS << "R";
3320 encodeTypeForFunctionPointerAuth(Ctx, OS,
3321 QT: cast<ReferenceType>(Val: T)->getPointeeType());
3322 return;
3323 case Type::RValueReference:
3324 OS << "O";
3325 encodeTypeForFunctionPointerAuth(Ctx, OS,
3326 QT: cast<ReferenceType>(Val: T)->getPointeeType());
3327 return;
3328
3329 case Type::Pointer:
3330 // C11 6.7.6.1p2:
3331 // For two pointer types to be compatible, both shall be identically
3332 // qualified and both shall be pointers to compatible types.
3333 // FIXME: we should also consider pointee types.
3334 OS << "P";
3335 return;
3336
3337 case Type::ObjCObjectPointer:
3338 case Type::BlockPointer:
3339 OS << "P";
3340 return;
3341
3342 case Type::Complex:
3343 OS << "C";
3344 return encodeTypeForFunctionPointerAuth(
3345 Ctx, OS, QT: cast<ComplexType>(Val: T)->getElementType());
3346
3347 case Type::VariableArray:
3348 case Type::ConstantArray:
3349 case Type::IncompleteArray:
3350 case Type::ArrayParameter:
3351 // C11 6.7.6.2p6:
3352 // For two array types to be compatible, both shall have compatible
3353 // element types, and if both size specifiers are present, and are integer
3354 // constant expressions, then both size specifiers shall have the same
3355 // constant value [...]
3356 //
3357 // So since ElemType[N] has to be compatible ElemType[], we can't encode the
3358 // width of the array.
3359 OS << "A";
3360 return encodeTypeForFunctionPointerAuth(
3361 Ctx, OS, QT: cast<ArrayType>(Val: T)->getElementType());
3362
3363 case Type::ObjCInterface:
3364 case Type::ObjCObject:
3365 OS << "<objc_object>";
3366 return;
3367
3368 case Type::Enum: {
3369 // C11 6.7.2.2p4:
3370 // Each enumerated type shall be compatible with char, a signed integer
3371 // type, or an unsigned integer type.
3372 //
3373 // So we have to treat enum types as integers.
3374 QualType UnderlyingType = T->castAsEnumDecl()->getIntegerType();
3375 return encodeTypeForFunctionPointerAuth(
3376 Ctx, OS, QT: UnderlyingType.isNull() ? Ctx.IntTy : UnderlyingType);
3377 }
3378
3379 case Type::FunctionNoProto:
3380 case Type::FunctionProto: {
3381 // C11 6.7.6.3p15:
3382 // For two function types to be compatible, both shall specify compatible
3383 // return types. Moreover, the parameter type lists, if both are present,
3384 // shall agree in the number of parameters and in the use of the ellipsis
3385 // terminator; corresponding parameters shall have compatible types.
3386 //
3387 // That paragraph goes on to describe how unprototyped functions are to be
3388 // handled, which we ignore here. Unprototyped function pointers are hashed
3389 // as though they were prototyped nullary functions since thats probably
3390 // what the user meant. This behavior is non-conforming.
3391 // FIXME: If we add a "custom discriminator" function type attribute we
3392 // should encode functions as their discriminators.
3393 OS << "F";
3394 const auto *FuncType = cast<FunctionType>(Val: T);
3395 encodeTypeForFunctionPointerAuth(Ctx, OS, QT: FuncType->getReturnType());
3396 if (const auto *FPT = dyn_cast<FunctionProtoType>(Val: FuncType)) {
3397 for (QualType Param : FPT->param_types()) {
3398 Param = Ctx.getSignatureParameterType(T: Param);
3399 encodeTypeForFunctionPointerAuth(Ctx, OS, QT: Param);
3400 }
3401 if (FPT->isVariadic())
3402 OS << "z";
3403 }
3404 OS << "E";
3405 return;
3406 }
3407
3408 case Type::MemberPointer: {
3409 OS << "M";
3410 const auto *MPT = T->castAs<MemberPointerType>();
3411 encodeTypeForFunctionPointerAuth(
3412 Ctx, OS, QT: QualType(MPT->getQualifier().getAsType(), 0));
3413 encodeTypeForFunctionPointerAuth(Ctx, OS, QT: MPT->getPointeeType());
3414 return;
3415 }
3416 case Type::ExtVector:
3417 case Type::Vector:
3418 OS << "Dv" << Ctx.getTypeSizeInChars(T).getQuantity();
3419 break;
3420
3421 // Don't bother discriminating based on these types.
3422 case Type::Pipe:
3423 case Type::BitInt:
3424 case Type::ConstantMatrix:
3425 OS << "?";
3426 return;
3427
3428 case Type::Builtin: {
3429 const auto *BTy = T->castAs<BuiltinType>();
3430 switch (BTy->getKind()) {
3431#define SIGNED_TYPE(Id, SingletonId) \
3432 case BuiltinType::Id: \
3433 OS << "i"; \
3434 return;
3435#define UNSIGNED_TYPE(Id, SingletonId) \
3436 case BuiltinType::Id: \
3437 OS << "i"; \
3438 return;
3439#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id:
3440#define BUILTIN_TYPE(Id, SingletonId)
3441#include "clang/AST/BuiltinTypes.def"
3442 llvm_unreachable("placeholder types should not appear here.");
3443
3444 case BuiltinType::Half:
3445 OS << "Dh";
3446 return;
3447 case BuiltinType::Float:
3448 OS << "f";
3449 return;
3450 case BuiltinType::Double:
3451 OS << "d";
3452 return;
3453 case BuiltinType::LongDouble:
3454 OS << "e";
3455 return;
3456 case BuiltinType::Float16:
3457 OS << "DF16_";
3458 return;
3459 case BuiltinType::Float128:
3460 OS << "g";
3461 return;
3462
3463 case BuiltinType::Void:
3464 OS << "v";
3465 return;
3466
3467 case BuiltinType::ObjCId:
3468 case BuiltinType::ObjCClass:
3469 case BuiltinType::ObjCSel:
3470 case BuiltinType::NullPtr:
3471 OS << "P";
3472 return;
3473
3474 // Don't bother discriminating based on OpenCL types.
3475 case BuiltinType::OCLSampler:
3476 case BuiltinType::OCLEvent:
3477 case BuiltinType::OCLClkEvent:
3478 case BuiltinType::OCLQueue:
3479 case BuiltinType::OCLReserveID:
3480 case BuiltinType::BFloat16:
3481 case BuiltinType::VectorQuad:
3482 case BuiltinType::VectorPair:
3483 case BuiltinType::DMR1024:
3484 case BuiltinType::DMR2048:
3485 OS << "?";
3486 return;
3487
3488 // Don't bother discriminating based on these seldom-used types.
3489 case BuiltinType::Ibm128:
3490 return;
3491#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3492 case BuiltinType::Id: \
3493 return;
3494#include "clang/Basic/OpenCLImageTypes.def"
3495#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3496 case BuiltinType::Id: \
3497 return;
3498#include "clang/Basic/OpenCLExtensionTypes.def"
3499#define SVE_TYPE(Name, Id, SingletonId) \
3500 case BuiltinType::Id: \
3501 return;
3502#include "clang/Basic/AArch64ACLETypes.def"
3503#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) \
3504 case BuiltinType::Id: \
3505 return;
3506#include "clang/Basic/HLSLIntangibleTypes.def"
3507 case BuiltinType::Dependent:
3508 llvm_unreachable("should never get here");
3509#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) case BuiltinType::Id:
3510#include "clang/Basic/AMDGPUTypes.def"
3511 case BuiltinType::WasmExternRef:
3512#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3513#include "clang/Basic/RISCVVTypes.def"
3514 llvm_unreachable("not yet implemented");
3515 }
3516 llvm_unreachable("should never get here");
3517 }
3518 case Type::Record: {
3519 const RecordDecl *RD = T->castAsCanonical<RecordType>()->getDecl();
3520 const IdentifierInfo *II = RD->getIdentifier();
3521
3522 // In C++, an immediate typedef of an anonymous struct or union
3523 // is considered to name it for ODR purposes, but C's specification
3524 // of type compatibility does not have a similar rule. Using the typedef
3525 // name in function type discriminators anyway, as we do here,
3526 // therefore technically violates the C standard: two function pointer
3527 // types defined in terms of two typedef'd anonymous structs with
3528 // different names are formally still compatible, but we are assigning
3529 // them different discriminators and therefore incompatible ABIs.
3530 //
3531 // This is a relatively minor violation that significantly improves
3532 // discrimination in some cases and has not caused problems in
3533 // practice. Regardless, it is now part of the ABI in places where
3534 // function type discrimination is used, and it can no longer be
3535 // changed except on new platforms.
3536
3537 if (!II)
3538 if (const TypedefNameDecl *Typedef = RD->getTypedefNameForAnonDecl())
3539 II = Typedef->getDeclName().getAsIdentifierInfo();
3540
3541 if (!II) {
3542 OS << "<anonymous_record>";
3543 return;
3544 }
3545 OS << II->getLength() << II->getName();
3546 return;
3547 }
3548 case Type::HLSLAttributedResource:
3549 case Type::HLSLInlineSpirv:
3550 llvm_unreachable("should never get here");
3551 break;
3552 case Type::OverflowBehavior:
3553 llvm_unreachable("should never get here");
3554 break;
3555 case Type::DeducedTemplateSpecialization:
3556 case Type::Auto:
3557#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3558#define DEPENDENT_TYPE(Class, Base) case Type::Class:
3559#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3560#define ABSTRACT_TYPE(Class, Base)
3561#define TYPE(Class, Base)
3562#include "clang/AST/TypeNodes.inc"
3563 llvm_unreachable("unexpected non-canonical or dependent type!");
3564 return;
3565 }
3566}
3567
3568uint16_t ASTContext::getPointerAuthTypeDiscriminator(QualType T) {
3569 assert(!T->isDependentType() &&
3570 "cannot compute type discriminator of a dependent type");
3571 SmallString<256> Str;
3572 llvm::raw_svector_ostream Out(Str);
3573
3574 if (T->isFunctionPointerType() || T->isFunctionReferenceType())
3575 T = T->getPointeeType();
3576
3577 if (T->isFunctionType()) {
3578 encodeTypeForFunctionPointerAuth(Ctx: *this, OS&: Out, QT: T);
3579 } else {
3580 T = T.getUnqualifiedType();
3581 // Calls to member function pointers don't need to worry about
3582 // language interop or the laxness of the C type compatibility rules.
3583 // We just mangle the member pointer type directly, which is
3584 // implicitly much stricter about type matching. However, we do
3585 // strip any top-level exception specification before this mangling.
3586 // C++23 requires calls to work when the function type is convertible
3587 // to the pointer type by a function pointer conversion, which can
3588 // change the exception specification. This does not technically
3589 // require the exception specification to not affect representation,
3590 // because the function pointer conversion is still always a direct
3591 // value conversion and therefore an opportunity to resign the
3592 // pointer. (This is in contrast to e.g. qualification conversions,
3593 // which can be applied in nested pointer positions, effectively
3594 // requiring qualified and unqualified representations to match.)
3595 // However, it is pragmatic to ignore exception specifications
3596 // because it allows a certain amount of `noexcept` mismatching
3597 // to not become a visible ODR problem. This also leaves some
3598 // room for the committee to add laxness to function pointer
3599 // conversions in future standards.
3600 if (auto *MPT = T->getAs<MemberPointerType>())
3601 if (MPT->isMemberFunctionPointer()) {
3602 QualType PointeeType = MPT->getPointeeType();
3603 if (PointeeType->castAs<FunctionProtoType>()->getExceptionSpecType() !=
3604 EST_None) {
3605 QualType FT = getFunctionTypeWithExceptionSpec(Orig: PointeeType, ESI: EST_None);
3606 T = getMemberPointerType(T: FT, Qualifier: MPT->getQualifier(),
3607 Cls: MPT->getMostRecentCXXRecordDecl());
3608 }
3609 }
3610 std::unique_ptr<MangleContext> MC(createMangleContext());
3611 MC->mangleCanonicalTypeName(T, Out);
3612 }
3613
3614 return llvm::getPointerAuthStableSipHash(S: Str);
3615}
3616
3617QualType ASTContext::getObjCGCQualType(QualType T,
3618 Qualifiers::GC GCAttr) const {
3619 QualType CanT = getCanonicalType(T);
3620 if (CanT.getObjCGCAttr() == GCAttr)
3621 return T;
3622
3623 if (const auto *ptr = T->getAs<PointerType>()) {
3624 QualType Pointee = ptr->getPointeeType();
3625 if (Pointee->isAnyPointerType()) {
3626 QualType ResultType = getObjCGCQualType(T: Pointee, GCAttr);
3627 return getPointerType(T: ResultType);
3628 }
3629 }
3630
3631 // If we are composing extended qualifiers together, merge together
3632 // into one ExtQuals node.
3633 QualifierCollector Quals;
3634 const Type *TypeNode = Quals.strip(type: T);
3635
3636 // If this type already has an ObjCGC specified, it cannot get
3637 // another one.
3638 assert(!Quals.hasObjCGCAttr() &&
3639 "Type cannot have multiple ObjCGCs!");
3640 Quals.addObjCGCAttr(type: GCAttr);
3641
3642 return getExtQualType(baseType: TypeNode, quals: Quals);
3643}
3644
3645QualType ASTContext::removePtrSizeAddrSpace(QualType T) const {
3646 if (const PointerType *Ptr = T->getAs<PointerType>()) {
3647 QualType Pointee = Ptr->getPointeeType();
3648 if (isPtrSizeAddressSpace(AS: Pointee.getAddressSpace())) {
3649 return getPointerType(T: removeAddrSpaceQualType(T: Pointee));
3650 }
3651 }
3652 return T;
3653}
3654
3655QualType ASTContext::getCountAttributedType(
3656 QualType WrappedTy, Expr *CountExpr, bool CountInBytes, bool OrNull,
3657 ArrayRef<TypeCoupledDeclRefInfo> DependentDecls) const {
3658 assert(WrappedTy->isPointerType() || WrappedTy->isArrayType());
3659
3660 llvm::FoldingSetNodeID ID;
3661 CountAttributedType::Profile(ID, WrappedTy, CountExpr, CountInBytes, Nullable: OrNull);
3662
3663 void *InsertPos = nullptr;
3664 CountAttributedType *CATy =
3665 CountAttributedTypes.FindNodeOrInsertPos(ID, InsertPos);
3666 if (CATy)
3667 return QualType(CATy, 0);
3668
3669 QualType CanonTy = getCanonicalType(T: WrappedTy);
3670 size_t Size = CountAttributedType::totalSizeToAlloc<TypeCoupledDeclRefInfo>(
3671 Counts: DependentDecls.size());
3672 CATy = (CountAttributedType *)Allocate(Size, Align: TypeAlignment);
3673 new (CATy) CountAttributedType(WrappedTy, CanonTy, CountExpr, CountInBytes,
3674 OrNull, DependentDecls);
3675 Types.push_back(Elt: CATy);
3676 CountAttributedTypes.InsertNode(N: CATy, InsertPos);
3677
3678 return QualType(CATy, 0);
3679}
3680
3681QualType
3682ASTContext::adjustType(QualType Orig,
3683 llvm::function_ref<QualType(QualType)> Adjust) const {
3684 switch (Orig->getTypeClass()) {
3685 case Type::Attributed: {
3686 const auto *AT = cast<AttributedType>(Val&: Orig);
3687 return getAttributedType(attrKind: AT->getAttrKind(),
3688 modifiedType: adjustType(Orig: AT->getModifiedType(), Adjust),
3689 equivalentType: adjustType(Orig: AT->getEquivalentType(), Adjust),
3690 attr: AT->getAttr());
3691 }
3692
3693 case Type::BTFTagAttributed: {
3694 const auto *BTFT = dyn_cast<BTFTagAttributedType>(Val&: Orig);
3695 return getBTFTagAttributedType(BTFAttr: BTFT->getAttr(),
3696 Wrapped: adjustType(Orig: BTFT->getWrappedType(), Adjust));
3697 }
3698
3699 case Type::OverflowBehavior: {
3700 const auto *OB = dyn_cast<OverflowBehaviorType>(Val&: Orig);
3701 return getOverflowBehaviorType(Kind: OB->getBehaviorKind(),
3702 Wrapped: adjustType(Orig: OB->getUnderlyingType(), Adjust));
3703 }
3704
3705 case Type::Paren:
3706 return getParenType(
3707 NamedType: adjustType(Orig: cast<ParenType>(Val&: Orig)->getInnerType(), Adjust));
3708
3709 case Type::Adjusted: {
3710 const auto *AT = cast<AdjustedType>(Val&: Orig);
3711 return getAdjustedType(Orig: AT->getOriginalType(),
3712 New: adjustType(Orig: AT->getAdjustedType(), Adjust));
3713 }
3714
3715 case Type::MacroQualified: {
3716 const auto *MQT = cast<MacroQualifiedType>(Val&: Orig);
3717 return getMacroQualifiedType(UnderlyingTy: adjustType(Orig: MQT->getUnderlyingType(), Adjust),
3718 MacroII: MQT->getMacroIdentifier());
3719 }
3720
3721 default:
3722 return Adjust(Orig);
3723 }
3724}
3725
3726const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T,
3727 FunctionType::ExtInfo Info) {
3728 if (T->getExtInfo() == Info)
3729 return T;
3730
3731 QualType Result;
3732 if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(Val: T)) {
3733 Result = getFunctionNoProtoType(ResultTy: FNPT->getReturnType(), Info);
3734 } else {
3735 const auto *FPT = cast<FunctionProtoType>(Val: T);
3736 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
3737 EPI.ExtInfo = Info;
3738 Result = getFunctionType(ResultTy: FPT->getReturnType(), Args: FPT->getParamTypes(), EPI);
3739 }
3740
3741 return cast<FunctionType>(Val: Result.getTypePtr());
3742}
3743
3744QualType ASTContext::adjustFunctionResultType(QualType FunctionType,
3745 QualType ResultType) {
3746 return adjustType(Orig: FunctionType, Adjust: [&](QualType Orig) {
3747 if (const auto *FNPT = Orig->getAs<FunctionNoProtoType>())
3748 return getFunctionNoProtoType(ResultTy: ResultType, Info: FNPT->getExtInfo());
3749
3750 const auto *FPT = Orig->castAs<FunctionProtoType>();
3751 return getFunctionType(ResultTy: ResultType, Args: FPT->getParamTypes(),
3752 EPI: FPT->getExtProtoInfo());
3753 });
3754}
3755
3756void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD,
3757 QualType ResultType) {
3758 FD = FD->getMostRecentDecl();
3759 while (true) {
3760 FD->setType(adjustFunctionResultType(FunctionType: FD->getType(), ResultType));
3761 if (FunctionDecl *Next = FD->getPreviousDecl())
3762 FD = Next;
3763 else
3764 break;
3765 }
3766 if (ASTMutationListener *L = getASTMutationListener())
3767 L->DeducedReturnType(FD, ReturnType: ResultType);
3768}
3769
3770/// Get a function type and produce the equivalent function type with the
3771/// specified exception specification. Type sugar that can be present on a
3772/// declaration of a function with an exception specification is permitted
3773/// and preserved. Other type sugar (for instance, typedefs) is not.
3774QualType ASTContext::getFunctionTypeWithExceptionSpec(
3775 QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const {
3776 return adjustType(Orig, Adjust: [&](QualType Ty) {
3777 const auto *Proto = Ty->castAs<FunctionProtoType>();
3778 return getFunctionType(ResultTy: Proto->getReturnType(), Args: Proto->getParamTypes(),
3779 EPI: Proto->getExtProtoInfo().withExceptionSpec(ESI));
3780 });
3781}
3782
3783bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T,
3784 QualType U) const {
3785 return hasSameType(T1: T, T2: U) ||
3786 (getLangOpts().CPlusPlus17 &&
3787 hasSameType(T1: getFunctionTypeWithExceptionSpec(Orig: T, ESI: EST_None),
3788 T2: getFunctionTypeWithExceptionSpec(Orig: U, ESI: EST_None)));
3789}
3790
3791QualType ASTContext::getFunctionTypeWithoutPtrSizes(QualType T) {
3792 if (const auto *Proto = T->getAs<FunctionProtoType>()) {
3793 QualType RetTy = removePtrSizeAddrSpace(T: Proto->getReturnType());
3794 SmallVector<QualType, 16> Args(Proto->param_types().size());
3795 for (unsigned i = 0, n = Args.size(); i != n; ++i)
3796 Args[i] = removePtrSizeAddrSpace(T: Proto->param_types()[i]);
3797 return getFunctionType(ResultTy: RetTy, Args, EPI: Proto->getExtProtoInfo());
3798 }
3799
3800 if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) {
3801 QualType RetTy = removePtrSizeAddrSpace(T: Proto->getReturnType());
3802 return getFunctionNoProtoType(ResultTy: RetTy, Info: Proto->getExtInfo());
3803 }
3804
3805 return T;
3806}
3807
3808bool ASTContext::hasSameFunctionTypeIgnoringPtrSizes(QualType T, QualType U) {
3809 return hasSameType(T1: T, T2: U) ||
3810 hasSameType(T1: getFunctionTypeWithoutPtrSizes(T),
3811 T2: getFunctionTypeWithoutPtrSizes(T: U));
3812}
3813
3814QualType ASTContext::getFunctionTypeWithoutParamABIs(QualType T) const {
3815 if (const auto *Proto = T->getAs<FunctionProtoType>()) {
3816 FunctionProtoType::ExtProtoInfo EPI = Proto->getExtProtoInfo();
3817 EPI.ExtParameterInfos = nullptr;
3818 return getFunctionType(ResultTy: Proto->getReturnType(), Args: Proto->param_types(), EPI);
3819 }
3820 return T;
3821}
3822
3823bool ASTContext::hasSameFunctionTypeIgnoringParamABI(QualType T,
3824 QualType U) const {
3825 return hasSameType(T1: T, T2: U) || hasSameType(T1: getFunctionTypeWithoutParamABIs(T),
3826 T2: getFunctionTypeWithoutParamABIs(T: U));
3827}
3828
3829void ASTContext::adjustExceptionSpec(
3830 FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI,
3831 bool AsWritten) {
3832 // Update the type.
3833 QualType Updated =
3834 getFunctionTypeWithExceptionSpec(Orig: FD->getType(), ESI);
3835 FD->setType(Updated);
3836
3837 if (!AsWritten)
3838 return;
3839
3840 // Update the type in the type source information too.
3841 if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) {
3842 // If the type and the type-as-written differ, we may need to update
3843 // the type-as-written too.
3844 if (TSInfo->getType() != FD->getType())
3845 Updated = getFunctionTypeWithExceptionSpec(Orig: TSInfo->getType(), ESI);
3846
3847 // FIXME: When we get proper type location information for exceptions,
3848 // we'll also have to rebuild the TypeSourceInfo. For now, we just patch
3849 // up the TypeSourceInfo;
3850 assert(TypeLoc::getFullDataSizeForType(Updated) ==
3851 TypeLoc::getFullDataSizeForType(TSInfo->getType()) &&
3852 "TypeLoc size mismatch from updating exception specification");
3853 TSInfo->overrideType(T: Updated);
3854 }
3855}
3856
3857/// getComplexType - Return the uniqued reference to the type for a complex
3858/// number with the specified element type.
3859QualType ASTContext::getComplexType(QualType T) const {
3860 // Unique pointers, to guarantee there is only one pointer of a particular
3861 // structure.
3862 llvm::FoldingSetNodeID ID;
3863 ComplexType::Profile(ID, Element: T);
3864
3865 void *InsertPos = nullptr;
3866 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos))
3867 return QualType(CT, 0);
3868
3869 // If the pointee type isn't canonical, this won't be a canonical type either,
3870 // so fill in the canonical type field.
3871 QualType Canonical;
3872 if (!T.isCanonical()) {
3873 Canonical = getComplexType(T: getCanonicalType(T));
3874
3875 // Get the new insert position for the node we care about.
3876 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos);
3877 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3878 }
3879 auto *New = new (*this, alignof(ComplexType)) ComplexType(T, Canonical);
3880 Types.push_back(Elt: New);
3881 ComplexTypes.InsertNode(N: New, InsertPos);
3882 return QualType(New, 0);
3883}
3884
3885/// getPointerType - Return the uniqued reference to the type for a pointer to
3886/// the specified type.
3887QualType ASTContext::getPointerType(QualType T) const {
3888 // Unique pointers, to guarantee there is only one pointer of a particular
3889 // structure.
3890 llvm::FoldingSetNodeID ID;
3891 PointerType::Profile(ID, Pointee: T);
3892
3893 void *InsertPos = nullptr;
3894 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3895 return QualType(PT, 0);
3896
3897 // If the pointee type isn't canonical, this won't be a canonical type either,
3898 // so fill in the canonical type field.
3899 QualType Canonical;
3900 if (!T.isCanonical()) {
3901 Canonical = getPointerType(T: getCanonicalType(T));
3902
3903 // Get the new insert position for the node we care about.
3904 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3905 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3906 }
3907 auto *New = new (*this, alignof(PointerType)) PointerType(T, Canonical);
3908 Types.push_back(Elt: New);
3909 PointerTypes.InsertNode(N: New, InsertPos);
3910 return QualType(New, 0);
3911}
3912
3913QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const {
3914 llvm::FoldingSetNodeID ID;
3915 AdjustedType::Profile(ID, Orig, New);
3916 void *InsertPos = nullptr;
3917 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3918 if (AT)
3919 return QualType(AT, 0);
3920
3921 QualType Canonical = getCanonicalType(T: New);
3922
3923 // Get the new insert position for the node we care about.
3924 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3925 assert(!AT && "Shouldn't be in the map!");
3926
3927 AT = new (*this, alignof(AdjustedType))
3928 AdjustedType(Type::Adjusted, Orig, New, Canonical);
3929 Types.push_back(Elt: AT);
3930 AdjustedTypes.InsertNode(N: AT, InsertPos);
3931 return QualType(AT, 0);
3932}
3933
3934QualType ASTContext::getDecayedType(QualType Orig, QualType Decayed) const {
3935 llvm::FoldingSetNodeID ID;
3936 AdjustedType::Profile(ID, Orig, New: Decayed);
3937 void *InsertPos = nullptr;
3938 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3939 if (AT)
3940 return QualType(AT, 0);
3941
3942 QualType Canonical = getCanonicalType(T: Decayed);
3943
3944 // Get the new insert position for the node we care about.
3945 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3946 assert(!AT && "Shouldn't be in the map!");
3947
3948 AT = new (*this, alignof(DecayedType)) DecayedType(Orig, Decayed, Canonical);
3949 Types.push_back(Elt: AT);
3950 AdjustedTypes.InsertNode(N: AT, InsertPos);
3951 return QualType(AT, 0);
3952}
3953
3954QualType ASTContext::getDecayedType(QualType T) const {
3955 assert((T->isArrayType() || T->isFunctionType()) && "T does not decay");
3956
3957 QualType Decayed;
3958
3959 // C99 6.7.5.3p7:
3960 // A declaration of a parameter as "array of type" shall be
3961 // adjusted to "qualified pointer to type", where the type
3962 // qualifiers (if any) are those specified within the [ and ] of
3963 // the array type derivation.
3964 if (T->isArrayType())
3965 Decayed = getArrayDecayedType(T);
3966
3967 // C99 6.7.5.3p8:
3968 // A declaration of a parameter as "function returning type"
3969 // shall be adjusted to "pointer to function returning type", as
3970 // in 6.3.2.1.
3971 if (T->isFunctionType())
3972 Decayed = getPointerType(T);
3973
3974 return getDecayedType(Orig: T, Decayed);
3975}
3976
3977QualType ASTContext::getArrayParameterType(QualType Ty) const {
3978 if (Ty->isArrayParameterType())
3979 return Ty;
3980 assert(Ty->isConstantArrayType() && "Ty must be an array type.");
3981 QualType DTy = Ty.getDesugaredType(Context: *this);
3982 const auto *ATy = cast<ConstantArrayType>(Val&: DTy);
3983 llvm::FoldingSetNodeID ID;
3984 ATy->Profile(ID, Ctx: *this, ET: ATy->getElementType(), ArraySize: ATy->getZExtSize(),
3985 SizeExpr: ATy->getSizeExpr(), SizeMod: ATy->getSizeModifier(),
3986 TypeQuals: ATy->getIndexTypeQualifiers().getAsOpaqueValue());
3987 void *InsertPos = nullptr;
3988 ArrayParameterType *AT =
3989 ArrayParameterTypes.FindNodeOrInsertPos(ID, InsertPos);
3990 if (AT)
3991 return QualType(AT, 0);
3992
3993 QualType Canonical;
3994 if (!DTy.isCanonical()) {
3995 Canonical = getArrayParameterType(Ty: getCanonicalType(T: Ty));
3996
3997 // Get the new insert position for the node we care about.
3998 AT = ArrayParameterTypes.FindNodeOrInsertPos(ID, InsertPos);
3999 assert(!AT && "Shouldn't be in the map!");
4000 }
4001
4002 AT = new (*this, alignof(ArrayParameterType))
4003 ArrayParameterType(ATy, Canonical);
4004 Types.push_back(Elt: AT);
4005 ArrayParameterTypes.InsertNode(N: AT, InsertPos);
4006 return QualType(AT, 0);
4007}
4008
4009/// getBlockPointerType - Return the uniqued reference to the type for
4010/// a pointer to the specified block.
4011QualType ASTContext::getBlockPointerType(QualType T) const {
4012 assert(T->isFunctionType() && "block of function types only");
4013 // Unique pointers, to guarantee there is only one block of a particular
4014 // structure.
4015 llvm::FoldingSetNodeID ID;
4016 BlockPointerType::Profile(ID, Pointee: T);
4017
4018 void *InsertPos = nullptr;
4019 if (BlockPointerType *PT =
4020 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
4021 return QualType(PT, 0);
4022
4023 // If the block pointee type isn't canonical, this won't be a canonical
4024 // type either so fill in the canonical type field.
4025 QualType Canonical;
4026 if (!T.isCanonical()) {
4027 Canonical = getBlockPointerType(T: getCanonicalType(T));
4028
4029 // Get the new insert position for the node we care about.
4030 BlockPointerType *NewIP =
4031 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
4032 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4033 }
4034 auto *New =
4035 new (*this, alignof(BlockPointerType)) BlockPointerType(T, Canonical);
4036 Types.push_back(Elt: New);
4037 BlockPointerTypes.InsertNode(N: New, InsertPos);
4038 return QualType(New, 0);
4039}
4040
4041/// getLValueReferenceType - Return the uniqued reference to the type for an
4042/// lvalue reference to the specified type.
4043QualType
4044ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
4045 assert((!T->isPlaceholderType() ||
4046 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) &&
4047 "Unresolved placeholder type");
4048
4049 // Unique pointers, to guarantee there is only one pointer of a particular
4050 // structure.
4051 llvm::FoldingSetNodeID ID;
4052 ReferenceType::Profile(ID, Referencee: T, SpelledAsLValue);
4053
4054 void *InsertPos = nullptr;
4055 if (LValueReferenceType *RT =
4056 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
4057 return QualType(RT, 0);
4058
4059 const auto *InnerRef = T->getAs<ReferenceType>();
4060
4061 // If the referencee type isn't canonical, this won't be a canonical type
4062 // either, so fill in the canonical type field.
4063 QualType Canonical;
4064 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) {
4065 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
4066 Canonical = getLValueReferenceType(T: getCanonicalType(T: PointeeType));
4067
4068 // Get the new insert position for the node we care about.
4069 LValueReferenceType *NewIP =
4070 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
4071 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4072 }
4073
4074 auto *New = new (*this, alignof(LValueReferenceType))
4075 LValueReferenceType(T, Canonical, SpelledAsLValue);
4076 Types.push_back(Elt: New);
4077 LValueReferenceTypes.InsertNode(N: New, InsertPos);
4078
4079 return QualType(New, 0);
4080}
4081
4082/// getRValueReferenceType - Return the uniqued reference to the type for an
4083/// rvalue reference to the specified type.
4084QualType ASTContext::getRValueReferenceType(QualType T) const {
4085 assert((!T->isPlaceholderType() ||
4086 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) &&
4087 "Unresolved placeholder type");
4088
4089 // Unique pointers, to guarantee there is only one pointer of a particular
4090 // structure.
4091 llvm::FoldingSetNodeID ID;
4092 ReferenceType::Profile(ID, Referencee: T, SpelledAsLValue: false);
4093
4094 void *InsertPos = nullptr;
4095 if (RValueReferenceType *RT =
4096 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
4097 return QualType(RT, 0);
4098
4099 const auto *InnerRef = T->getAs<ReferenceType>();
4100
4101 // If the referencee type isn't canonical, this won't be a canonical type
4102 // either, so fill in the canonical type field.
4103 QualType Canonical;
4104 if (InnerRef || !T.isCanonical()) {
4105 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
4106 Canonical = getRValueReferenceType(T: getCanonicalType(T: PointeeType));
4107
4108 // Get the new insert position for the node we care about.
4109 RValueReferenceType *NewIP =
4110 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
4111 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4112 }
4113
4114 auto *New = new (*this, alignof(RValueReferenceType))
4115 RValueReferenceType(T, Canonical);
4116 Types.push_back(Elt: New);
4117 RValueReferenceTypes.InsertNode(N: New, InsertPos);
4118 return QualType(New, 0);
4119}
4120
4121QualType ASTContext::getMemberPointerType(QualType T,
4122 NestedNameSpecifier Qualifier,
4123 const CXXRecordDecl *Cls) const {
4124 if (!Qualifier) {
4125 assert(Cls && "At least one of Qualifier or Cls must be provided");
4126 Qualifier = NestedNameSpecifier(getCanonicalTagType(TD: Cls).getTypePtr());
4127 } else if (!Cls) {
4128 Cls = Qualifier.getAsRecordDecl();
4129 }
4130 // Unique pointers, to guarantee there is only one pointer of a particular
4131 // structure.
4132 llvm::FoldingSetNodeID ID;
4133 MemberPointerType::Profile(ID, Pointee: T, Qualifier, Cls);
4134
4135 void *InsertPos = nullptr;
4136 if (MemberPointerType *PT =
4137 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
4138 return QualType(PT, 0);
4139
4140 NestedNameSpecifier CanonicalQualifier = [&] {
4141 if (!Cls)
4142 return Qualifier.getCanonical();
4143 NestedNameSpecifier R(getCanonicalTagType(TD: Cls).getTypePtr());
4144 assert(R.isCanonical());
4145 return R;
4146 }();
4147 // If the pointee or class type isn't canonical, this won't be a canonical
4148 // type either, so fill in the canonical type field.
4149 QualType Canonical;
4150 if (!T.isCanonical() || Qualifier != CanonicalQualifier) {
4151 Canonical =
4152 getMemberPointerType(T: getCanonicalType(T), Qualifier: CanonicalQualifier, Cls);
4153 assert(!cast<MemberPointerType>(Canonical)->isSugared());
4154 // Get the new insert position for the node we care about.
4155 [[maybe_unused]] MemberPointerType *NewIP =
4156 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
4157 assert(!NewIP && "Shouldn't be in the map!");
4158 }
4159 auto *New = new (*this, alignof(MemberPointerType))
4160 MemberPointerType(T, Qualifier, Canonical);
4161 Types.push_back(Elt: New);
4162 MemberPointerTypes.InsertNode(N: New, InsertPos);
4163 return QualType(New, 0);
4164}
4165
4166/// getConstantArrayType - Return the unique reference to the type for an
4167/// array of the specified element type.
4168QualType ASTContext::getConstantArrayType(QualType EltTy,
4169 const llvm::APInt &ArySizeIn,
4170 const Expr *SizeExpr,
4171 ArraySizeModifier ASM,
4172 unsigned IndexTypeQuals) const {
4173 assert((EltTy->isDependentType() ||
4174 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) &&
4175 "Constant array of VLAs is illegal!");
4176
4177 // We only need the size as part of the type if it's instantiation-dependent.
4178 if (SizeExpr && !SizeExpr->isInstantiationDependent())
4179 SizeExpr = nullptr;
4180
4181 // Convert the array size into a canonical width matching the pointer size for
4182 // the target.
4183 llvm::APInt ArySize(ArySizeIn);
4184 ArySize = ArySize.zextOrTrunc(width: Target->getMaxPointerWidth());
4185
4186 llvm::FoldingSetNodeID ID;
4187 ConstantArrayType::Profile(ID, Ctx: *this, ET: EltTy, ArraySize: ArySize.getZExtValue(), SizeExpr,
4188 SizeMod: ASM, TypeQuals: IndexTypeQuals);
4189
4190 void *InsertPos = nullptr;
4191 if (ConstantArrayType *ATP =
4192 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos))
4193 return QualType(ATP, 0);
4194
4195 // If the element type isn't canonical or has qualifiers, or the array bound
4196 // is instantiation-dependent, this won't be a canonical type either, so fill
4197 // in the canonical type field.
4198 QualType Canon;
4199 // FIXME: Check below should look for qualifiers behind sugar.
4200 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) {
4201 SplitQualType canonSplit = getCanonicalType(T: EltTy).split();
4202 Canon = getConstantArrayType(EltTy: QualType(canonSplit.Ty, 0), ArySizeIn: ArySize, SizeExpr: nullptr,
4203 ASM, IndexTypeQuals);
4204 Canon = getQualifiedType(T: Canon, Qs: canonSplit.Quals);
4205
4206 // Get the new insert position for the node we care about.
4207 ConstantArrayType *NewIP =
4208 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
4209 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4210 }
4211
4212 auto *New = ConstantArrayType::Create(Ctx: *this, ET: EltTy, Can: Canon, Sz: ArySize, SzExpr: SizeExpr,
4213 SzMod: ASM, Qual: IndexTypeQuals);
4214 ConstantArrayTypes.InsertNode(N: New, InsertPos);
4215 Types.push_back(Elt: New);
4216 return QualType(New, 0);
4217}
4218
4219/// getVariableArrayDecayedType - Turns the given type, which may be
4220/// variably-modified, into the corresponding type with all the known
4221/// sizes replaced with [*].
4222QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
4223 // Vastly most common case.
4224 if (!type->isVariablyModifiedType()) return type;
4225
4226 QualType result;
4227
4228 SplitQualType split = type.getSplitDesugaredType();
4229 const Type *ty = split.Ty;
4230 switch (ty->getTypeClass()) {
4231#define TYPE(Class, Base)
4232#define ABSTRACT_TYPE(Class, Base)
4233#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
4234#include "clang/AST/TypeNodes.inc"
4235 llvm_unreachable("didn't desugar past all non-canonical types?");
4236
4237 // These types should never be variably-modified.
4238 case Type::Builtin:
4239 case Type::Complex:
4240 case Type::Vector:
4241 case Type::DependentVector:
4242 case Type::ExtVector:
4243 case Type::DependentSizedExtVector:
4244 case Type::ConstantMatrix:
4245 case Type::DependentSizedMatrix:
4246 case Type::DependentAddressSpace:
4247 case Type::ObjCObject:
4248 case Type::ObjCInterface:
4249 case Type::ObjCObjectPointer:
4250 case Type::Record:
4251 case Type::Enum:
4252 case Type::UnresolvedUsing:
4253 case Type::TypeOfExpr:
4254 case Type::TypeOf:
4255 case Type::Decltype:
4256 case Type::UnaryTransform:
4257 case Type::DependentName:
4258 case Type::InjectedClassName:
4259 case Type::TemplateSpecialization:
4260 case Type::TemplateTypeParm:
4261 case Type::SubstTemplateTypeParmPack:
4262 case Type::SubstBuiltinTemplatePack:
4263 case Type::Auto:
4264 case Type::DeducedTemplateSpecialization:
4265 case Type::PackExpansion:
4266 case Type::PackIndexing:
4267 case Type::BitInt:
4268 case Type::DependentBitInt:
4269 case Type::ArrayParameter:
4270 case Type::HLSLAttributedResource:
4271 case Type::HLSLInlineSpirv:
4272 case Type::OverflowBehavior:
4273 llvm_unreachable("type should never be variably-modified");
4274
4275 // These types can be variably-modified but should never need to
4276 // further decay.
4277 case Type::FunctionNoProto:
4278 case Type::FunctionProto:
4279 case Type::BlockPointer:
4280 case Type::MemberPointer:
4281 case Type::Pipe:
4282 return type;
4283
4284 // These types can be variably-modified. All these modifications
4285 // preserve structure except as noted by comments.
4286 // TODO: if we ever care about optimizing VLAs, there are no-op
4287 // optimizations available here.
4288 case Type::Pointer:
4289 result = getPointerType(T: getVariableArrayDecayedType(
4290 type: cast<PointerType>(Val: ty)->getPointeeType()));
4291 break;
4292
4293 case Type::LValueReference: {
4294 const auto *lv = cast<LValueReferenceType>(Val: ty);
4295 result = getLValueReferenceType(
4296 T: getVariableArrayDecayedType(type: lv->getPointeeType()),
4297 SpelledAsLValue: lv->isSpelledAsLValue());
4298 break;
4299 }
4300
4301 case Type::RValueReference: {
4302 const auto *lv = cast<RValueReferenceType>(Val: ty);
4303 result = getRValueReferenceType(
4304 T: getVariableArrayDecayedType(type: lv->getPointeeType()));
4305 break;
4306 }
4307
4308 case Type::Atomic: {
4309 const auto *at = cast<AtomicType>(Val: ty);
4310 result = getAtomicType(T: getVariableArrayDecayedType(type: at->getValueType()));
4311 break;
4312 }
4313
4314 case Type::ConstantArray: {
4315 const auto *cat = cast<ConstantArrayType>(Val: ty);
4316 result = getConstantArrayType(
4317 EltTy: getVariableArrayDecayedType(type: cat->getElementType()),
4318 ArySizeIn: cat->getSize(),
4319 SizeExpr: cat->getSizeExpr(),
4320 ASM: cat->getSizeModifier(),
4321 IndexTypeQuals: cat->getIndexTypeCVRQualifiers());
4322 break;
4323 }
4324
4325 case Type::DependentSizedArray: {
4326 const auto *dat = cast<DependentSizedArrayType>(Val: ty);
4327 result = getDependentSizedArrayType(
4328 EltTy: getVariableArrayDecayedType(type: dat->getElementType()), NumElts: dat->getSizeExpr(),
4329 ASM: dat->getSizeModifier(), IndexTypeQuals: dat->getIndexTypeCVRQualifiers());
4330 break;
4331 }
4332
4333 // Turn incomplete types into [*] types.
4334 case Type::IncompleteArray: {
4335 const auto *iat = cast<IncompleteArrayType>(Val: ty);
4336 result =
4337 getVariableArrayType(EltTy: getVariableArrayDecayedType(type: iat->getElementType()),
4338 /*size*/ NumElts: nullptr, ASM: ArraySizeModifier::Normal,
4339 IndexTypeQuals: iat->getIndexTypeCVRQualifiers());
4340 break;
4341 }
4342
4343 // Turn VLA types into [*] types.
4344 case Type::VariableArray: {
4345 const auto *vat = cast<VariableArrayType>(Val: ty);
4346 result =
4347 getVariableArrayType(EltTy: getVariableArrayDecayedType(type: vat->getElementType()),
4348 /*size*/ NumElts: nullptr, ASM: ArraySizeModifier::Star,
4349 IndexTypeQuals: vat->getIndexTypeCVRQualifiers());
4350 break;
4351 }
4352 }
4353
4354 // Apply the top-level qualifiers from the original.
4355 return getQualifiedType(T: result, Qs: split.Quals);
4356}
4357
4358/// getVariableArrayType - Returns a non-unique reference to the type for a
4359/// variable array of the specified element type.
4360QualType ASTContext::getVariableArrayType(QualType EltTy, Expr *NumElts,
4361 ArraySizeModifier ASM,
4362 unsigned IndexTypeQuals) const {
4363 // Since we don't unique expressions, it isn't possible to unique VLA's
4364 // that have an expression provided for their size.
4365 QualType Canon;
4366
4367 // Be sure to pull qualifiers off the element type.
4368 // FIXME: Check below should look for qualifiers behind sugar.
4369 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
4370 SplitQualType canonSplit = getCanonicalType(T: EltTy).split();
4371 Canon = getVariableArrayType(EltTy: QualType(canonSplit.Ty, 0), NumElts, ASM,
4372 IndexTypeQuals);
4373 Canon = getQualifiedType(T: Canon, Qs: canonSplit.Quals);
4374 }
4375
4376 auto *New = new (*this, alignof(VariableArrayType))
4377 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals);
4378
4379 VariableArrayTypes.push_back(x: New);
4380 Types.push_back(Elt: New);
4381 return QualType(New, 0);
4382}
4383
4384/// getDependentSizedArrayType - Returns a non-unique reference to
4385/// the type for a dependently-sized array of the specified element
4386/// type.
4387QualType
4388ASTContext::getDependentSizedArrayType(QualType elementType, Expr *numElements,
4389 ArraySizeModifier ASM,
4390 unsigned elementTypeQuals) const {
4391 assert((!numElements || numElements->isTypeDependent() ||
4392 numElements->isValueDependent()) &&
4393 "Size must be type- or value-dependent!");
4394
4395 SplitQualType canonElementType = getCanonicalType(T: elementType).split();
4396
4397 void *insertPos = nullptr;
4398 llvm::FoldingSetNodeID ID;
4399 DependentSizedArrayType::Profile(
4400 ID, Context: *this, ET: numElements ? QualType(canonElementType.Ty, 0) : elementType,
4401 SizeMod: ASM, TypeQuals: elementTypeQuals, E: numElements);
4402
4403 // Look for an existing type with these properties.
4404 DependentSizedArrayType *canonTy =
4405 DependentSizedArrayTypes.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
4406
4407 // Dependently-sized array types that do not have a specified number
4408 // of elements will have their sizes deduced from a dependent
4409 // initializer.
4410 if (!numElements) {
4411 if (canonTy)
4412 return QualType(canonTy, 0);
4413
4414 auto *newType = new (*this, alignof(DependentSizedArrayType))
4415 DependentSizedArrayType(elementType, QualType(), numElements, ASM,
4416 elementTypeQuals);
4417 DependentSizedArrayTypes.InsertNode(N: newType, InsertPos: insertPos);
4418 Types.push_back(Elt: newType);
4419 return QualType(newType, 0);
4420 }
4421
4422 // If we don't have one, build one.
4423 if (!canonTy) {
4424 canonTy = new (*this, alignof(DependentSizedArrayType))
4425 DependentSizedArrayType(QualType(canonElementType.Ty, 0), QualType(),
4426 numElements, ASM, elementTypeQuals);
4427 DependentSizedArrayTypes.InsertNode(N: canonTy, InsertPos: insertPos);
4428 Types.push_back(Elt: canonTy);
4429 }
4430
4431 // Apply qualifiers from the element type to the array.
4432 QualType canon = getQualifiedType(T: QualType(canonTy,0),
4433 Qs: canonElementType.Quals);
4434
4435 // If we didn't need extra canonicalization for the element type or the size
4436 // expression, then just use that as our result.
4437 if (QualType(canonElementType.Ty, 0) == elementType &&
4438 canonTy->getSizeExpr() == numElements)
4439 return canon;
4440
4441 // Otherwise, we need to build a type which follows the spelling
4442 // of the element type.
4443 auto *sugaredType = new (*this, alignof(DependentSizedArrayType))
4444 DependentSizedArrayType(elementType, canon, numElements, ASM,
4445 elementTypeQuals);
4446 Types.push_back(Elt: sugaredType);
4447 return QualType(sugaredType, 0);
4448}
4449
4450QualType ASTContext::getIncompleteArrayType(QualType elementType,
4451 ArraySizeModifier ASM,
4452 unsigned elementTypeQuals) const {
4453 llvm::FoldingSetNodeID ID;
4454 IncompleteArrayType::Profile(ID, ET: elementType, SizeMod: ASM, TypeQuals: elementTypeQuals);
4455
4456 void *insertPos = nullptr;
4457 if (IncompleteArrayType *iat =
4458 IncompleteArrayTypes.FindNodeOrInsertPos(ID, InsertPos&: insertPos))
4459 return QualType(iat, 0);
4460
4461 // If the element type isn't canonical, this won't be a canonical type
4462 // either, so fill in the canonical type field. We also have to pull
4463 // qualifiers off the element type.
4464 QualType canon;
4465
4466 // FIXME: Check below should look for qualifiers behind sugar.
4467 if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) {
4468 SplitQualType canonSplit = getCanonicalType(T: elementType).split();
4469 canon = getIncompleteArrayType(elementType: QualType(canonSplit.Ty, 0),
4470 ASM, elementTypeQuals);
4471 canon = getQualifiedType(T: canon, Qs: canonSplit.Quals);
4472
4473 // Get the new insert position for the node we care about.
4474 IncompleteArrayType *existing =
4475 IncompleteArrayTypes.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
4476 assert(!existing && "Shouldn't be in the map!"); (void) existing;
4477 }
4478
4479 auto *newType = new (*this, alignof(IncompleteArrayType))
4480 IncompleteArrayType(elementType, canon, ASM, elementTypeQuals);
4481
4482 IncompleteArrayTypes.InsertNode(N: newType, InsertPos: insertPos);
4483 Types.push_back(Elt: newType);
4484 return QualType(newType, 0);
4485}
4486
4487ASTContext::BuiltinVectorTypeInfo
4488ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const {
4489#define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \
4490 {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \
4491 NUMVECTORS};
4492
4493#define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \
4494 {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS};
4495
4496 switch (Ty->getKind()) {
4497 default:
4498 llvm_unreachable("Unsupported builtin vector type");
4499
4500#define SVE_VECTOR_TYPE_INT(Name, MangledName, Id, SingletonId, NumEls, \
4501 ElBits, NF, IsSigned) \
4502 case BuiltinType::Id: \
4503 return {getIntTypeForBitwidth(ElBits, IsSigned), \
4504 llvm::ElementCount::getScalable(NumEls), NF};
4505#define SVE_VECTOR_TYPE_FLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4506 ElBits, NF) \
4507 case BuiltinType::Id: \
4508 return {ElBits == 16 ? HalfTy : (ElBits == 32 ? FloatTy : DoubleTy), \
4509 llvm::ElementCount::getScalable(NumEls), NF};
4510#define SVE_VECTOR_TYPE_BFLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4511 ElBits, NF) \
4512 case BuiltinType::Id: \
4513 return {BFloat16Ty, llvm::ElementCount::getScalable(NumEls), NF};
4514#define SVE_VECTOR_TYPE_MFLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4515 ElBits, NF) \
4516 case BuiltinType::Id: \
4517 return {MFloat8Ty, llvm::ElementCount::getScalable(NumEls), NF};
4518#define SVE_PREDICATE_TYPE_ALL(Name, MangledName, Id, SingletonId, NumEls, NF) \
4519 case BuiltinType::Id: \
4520 return {BoolTy, llvm::ElementCount::getScalable(NumEls), NF};
4521#include "clang/Basic/AArch64ACLETypes.def"
4522
4523#define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \
4524 IsSigned) \
4525 case BuiltinType::Id: \
4526 return {getIntTypeForBitwidth(ElBits, IsSigned), \
4527 llvm::ElementCount::getScalable(NumEls), NF};
4528#define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
4529 case BuiltinType::Id: \
4530 return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \
4531 llvm::ElementCount::getScalable(NumEls), NF};
4532#define RVV_VECTOR_TYPE_BFLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
4533 case BuiltinType::Id: \
4534 return {BFloat16Ty, llvm::ElementCount::getScalable(NumEls), NF};
4535#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
4536 case BuiltinType::Id: \
4537 return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1};
4538#include "clang/Basic/RISCVVTypes.def"
4539 }
4540}
4541
4542/// getExternrefType - Return a WebAssembly externref type, which represents an
4543/// opaque reference to a host value.
4544QualType ASTContext::getWebAssemblyExternrefType() const {
4545 if (Target->getTriple().isWasm() && Target->hasFeature(Feature: "reference-types")) {
4546#define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \
4547 if (BuiltinType::Id == BuiltinType::WasmExternRef) \
4548 return SingletonId;
4549#include "clang/Basic/WebAssemblyReferenceTypes.def"
4550 }
4551 llvm_unreachable(
4552 "shouldn't try to generate type externref outside WebAssembly target");
4553}
4554
4555/// getScalableVectorType - Return the unique reference to a scalable vector
4556/// type of the specified element type and size. VectorType must be a built-in
4557/// type.
4558QualType ASTContext::getScalableVectorType(QualType EltTy, unsigned NumElts,
4559 unsigned NumFields) const {
4560 auto K = llvm::ScalableVecTyKey{.EltTy: EltTy, .NumElts: NumElts, .NumFields: NumFields};
4561 if (auto It = ScalableVecTyMap.find(Val: K); It != ScalableVecTyMap.end())
4562 return It->second;
4563
4564 if (Target->hasAArch64ACLETypes()) {
4565 uint64_t EltTySize = getTypeSize(T: EltTy);
4566
4567#define SVE_VECTOR_TYPE_INT(Name, MangledName, Id, SingletonId, NumEls, \
4568 ElBits, NF, IsSigned) \
4569 if (EltTy->hasIntegerRepresentation() && !EltTy->isBooleanType() && \
4570 EltTy->hasSignedIntegerRepresentation() == IsSigned && \
4571 EltTySize == ElBits && NumElts == (NumEls * NF) && NumFields == 1) { \
4572 return ScalableVecTyMap[K] = SingletonId; \
4573 }
4574#define SVE_VECTOR_TYPE_FLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4575 ElBits, NF) \
4576 if (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
4577 EltTySize == ElBits && NumElts == (NumEls * NF) && NumFields == 1) { \
4578 return ScalableVecTyMap[K] = SingletonId; \
4579 }
4580#define SVE_VECTOR_TYPE_BFLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4581 ElBits, NF) \
4582 if (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
4583 EltTySize == ElBits && NumElts == (NumEls * NF) && NumFields == 1) { \
4584 return ScalableVecTyMap[K] = SingletonId; \
4585 }
4586#define SVE_VECTOR_TYPE_MFLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4587 ElBits, NF) \
4588 if (EltTy->isMFloat8Type() && EltTySize == ElBits && \
4589 NumElts == (NumEls * NF) && NumFields == 1) { \
4590 return ScalableVecTyMap[K] = SingletonId; \
4591 }
4592#define SVE_PREDICATE_TYPE_ALL(Name, MangledName, Id, SingletonId, NumEls, NF) \
4593 if (EltTy->isBooleanType() && NumElts == (NumEls * NF) && NumFields == 1) \
4594 return ScalableVecTyMap[K] = SingletonId;
4595#include "clang/Basic/AArch64ACLETypes.def"
4596 } else if (Target->hasRISCVVTypes()) {
4597 uint64_t EltTySize = getTypeSize(T: EltTy);
4598#define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \
4599 IsFP, IsBF) \
4600 if (!EltTy->isBooleanType() && \
4601 ((EltTy->hasIntegerRepresentation() && \
4602 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
4603 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
4604 IsFP && !IsBF) || \
4605 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
4606 IsBF && !IsFP)) && \
4607 EltTySize == ElBits && NumElts == NumEls && NumFields == NF) \
4608 return ScalableVecTyMap[K] = SingletonId;
4609#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
4610 if (EltTy->isBooleanType() && NumElts == NumEls) \
4611 return ScalableVecTyMap[K] = SingletonId;
4612#include "clang/Basic/RISCVVTypes.def"
4613 }
4614 return QualType();
4615}
4616
4617/// getVectorType - Return the unique reference to a vector type of
4618/// the specified element type and size. VectorType must be a built-in type.
4619QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
4620 VectorKind VecKind) const {
4621 assert(vecType->isBuiltinType() ||
4622 (vecType->isBitIntType() &&
4623 // Only support _BitInt elements with byte-sized power of 2 NumBits.
4624 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits())));
4625
4626 // Check if we've already instantiated a vector of this type.
4627 llvm::FoldingSetNodeID ID;
4628 VectorType::Profile(ID, ElementType: vecType, NumElements: NumElts, TypeClass: Type::Vector, VecKind);
4629
4630 void *InsertPos = nullptr;
4631 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
4632 return QualType(VTP, 0);
4633
4634 // If the element type isn't canonical, this won't be a canonical type either,
4635 // so fill in the canonical type field.
4636 QualType Canonical;
4637 if (!vecType.isCanonical()) {
4638 Canonical = getVectorType(vecType: getCanonicalType(T: vecType), NumElts, VecKind);
4639
4640 // Get the new insert position for the node we care about.
4641 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4642 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4643 }
4644 auto *New = new (*this, alignof(VectorType))
4645 VectorType(vecType, NumElts, Canonical, VecKind);
4646 VectorTypes.InsertNode(N: New, InsertPos);
4647 Types.push_back(Elt: New);
4648 return QualType(New, 0);
4649}
4650
4651QualType ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr,
4652 SourceLocation AttrLoc,
4653 VectorKind VecKind) const {
4654 llvm::FoldingSetNodeID ID;
4655 DependentVectorType::Profile(ID, Context: *this, ElementType: getCanonicalType(T: VecType), SizeExpr,
4656 VecKind);
4657 void *InsertPos = nullptr;
4658 DependentVectorType *Canon =
4659 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4660 DependentVectorType *New;
4661
4662 if (Canon) {
4663 New = new (*this, alignof(DependentVectorType)) DependentVectorType(
4664 VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind);
4665 } else {
4666 QualType CanonVecTy = getCanonicalType(T: VecType);
4667 if (CanonVecTy == VecType) {
4668 New = new (*this, alignof(DependentVectorType))
4669 DependentVectorType(VecType, QualType(), SizeExpr, AttrLoc, VecKind);
4670
4671 DependentVectorType *CanonCheck =
4672 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4673 assert(!CanonCheck &&
4674 "Dependent-sized vector_size canonical type broken");
4675 (void)CanonCheck;
4676 DependentVectorTypes.InsertNode(N: New, InsertPos);
4677 } else {
4678 QualType CanonTy = getDependentVectorType(VecType: CanonVecTy, SizeExpr,
4679 AttrLoc: SourceLocation(), VecKind);
4680 New = new (*this, alignof(DependentVectorType))
4681 DependentVectorType(VecType, CanonTy, SizeExpr, AttrLoc, VecKind);
4682 }
4683 }
4684
4685 Types.push_back(Elt: New);
4686 return QualType(New, 0);
4687}
4688
4689/// getExtVectorType - Return the unique reference to an extended vector type of
4690/// the specified element type and size. VectorType must be a built-in type.
4691QualType ASTContext::getExtVectorType(QualType vecType,
4692 unsigned NumElts) const {
4693 assert(vecType->isBuiltinType() || vecType->isDependentType() ||
4694 (vecType->isBitIntType() &&
4695 // Only support _BitInt elements with byte-sized power of 2 NumBits.
4696 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits())));
4697
4698 // Check if we've already instantiated a vector of this type.
4699 llvm::FoldingSetNodeID ID;
4700 VectorType::Profile(ID, ElementType: vecType, NumElements: NumElts, TypeClass: Type::ExtVector,
4701 VecKind: VectorKind::Generic);
4702 void *InsertPos = nullptr;
4703 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
4704 return QualType(VTP, 0);
4705
4706 // If the element type isn't canonical, this won't be a canonical type either,
4707 // so fill in the canonical type field.
4708 QualType Canonical;
4709 if (!vecType.isCanonical()) {
4710 Canonical = getExtVectorType(vecType: getCanonicalType(T: vecType), NumElts);
4711
4712 // Get the new insert position for the node we care about.
4713 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4714 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4715 }
4716 auto *New = new (*this, alignof(ExtVectorType))
4717 ExtVectorType(vecType, NumElts, Canonical);
4718 VectorTypes.InsertNode(N: New, InsertPos);
4719 Types.push_back(Elt: New);
4720 return QualType(New, 0);
4721}
4722
4723QualType
4724ASTContext::getDependentSizedExtVectorType(QualType vecType,
4725 Expr *SizeExpr,
4726 SourceLocation AttrLoc) const {
4727 llvm::FoldingSetNodeID ID;
4728 DependentSizedExtVectorType::Profile(ID, Context: *this, ElementType: getCanonicalType(T: vecType),
4729 SizeExpr);
4730
4731 void *InsertPos = nullptr;
4732 DependentSizedExtVectorType *Canon
4733 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4734 DependentSizedExtVectorType *New;
4735 if (Canon) {
4736 // We already have a canonical version of this array type; use it as
4737 // the canonical type for a newly-built type.
4738 New = new (*this, alignof(DependentSizedExtVectorType))
4739 DependentSizedExtVectorType(vecType, QualType(Canon, 0), SizeExpr,
4740 AttrLoc);
4741 } else {
4742 QualType CanonVecTy = getCanonicalType(T: vecType);
4743 if (CanonVecTy == vecType) {
4744 New = new (*this, alignof(DependentSizedExtVectorType))
4745 DependentSizedExtVectorType(vecType, QualType(), SizeExpr, AttrLoc);
4746
4747 DependentSizedExtVectorType *CanonCheck
4748 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4749 assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken");
4750 (void)CanonCheck;
4751 DependentSizedExtVectorTypes.InsertNode(N: New, InsertPos);
4752 } else {
4753 QualType CanonExtTy = getDependentSizedExtVectorType(vecType: CanonVecTy, SizeExpr,
4754 AttrLoc: SourceLocation());
4755 New = new (*this, alignof(DependentSizedExtVectorType))
4756 DependentSizedExtVectorType(vecType, CanonExtTy, SizeExpr, AttrLoc);
4757 }
4758 }
4759
4760 Types.push_back(Elt: New);
4761 return QualType(New, 0);
4762}
4763
4764QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows,
4765 unsigned NumColumns) const {
4766 llvm::FoldingSetNodeID ID;
4767 ConstantMatrixType::Profile(ID, ElementType: ElementTy, NumRows, NumColumns,
4768 TypeClass: Type::ConstantMatrix);
4769
4770 assert(MatrixType::isValidElementType(ElementTy, getLangOpts()) &&
4771 "need a valid element type");
4772 assert(NumRows > 0 && NumRows <= LangOpts.MaxMatrixDimension &&
4773 NumColumns > 0 && NumColumns <= LangOpts.MaxMatrixDimension &&
4774 "need valid matrix dimensions");
4775 void *InsertPos = nullptr;
4776 if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos))
4777 return QualType(MTP, 0);
4778
4779 QualType Canonical;
4780 if (!ElementTy.isCanonical()) {
4781 Canonical =
4782 getConstantMatrixType(ElementTy: getCanonicalType(T: ElementTy), NumRows, NumColumns);
4783
4784 ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4785 assert(!NewIP && "Matrix type shouldn't already exist in the map");
4786 (void)NewIP;
4787 }
4788
4789 auto *New = new (*this, alignof(ConstantMatrixType))
4790 ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical);
4791 MatrixTypes.InsertNode(N: New, InsertPos);
4792 Types.push_back(Elt: New);
4793 return QualType(New, 0);
4794}
4795
4796QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy,
4797 Expr *RowExpr,
4798 Expr *ColumnExpr,
4799 SourceLocation AttrLoc) const {
4800 QualType CanonElementTy = getCanonicalType(T: ElementTy);
4801 llvm::FoldingSetNodeID ID;
4802 DependentSizedMatrixType::Profile(ID, Context: *this, ElementType: CanonElementTy, RowExpr,
4803 ColumnExpr);
4804
4805 void *InsertPos = nullptr;
4806 DependentSizedMatrixType *Canon =
4807 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4808
4809 if (!Canon) {
4810 Canon = new (*this, alignof(DependentSizedMatrixType))
4811 DependentSizedMatrixType(CanonElementTy, QualType(), RowExpr,
4812 ColumnExpr, AttrLoc);
4813#ifndef NDEBUG
4814 DependentSizedMatrixType *CanonCheck =
4815 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4816 assert(!CanonCheck && "Dependent-sized matrix canonical type broken");
4817#endif
4818 DependentSizedMatrixTypes.InsertNode(N: Canon, InsertPos);
4819 Types.push_back(Elt: Canon);
4820 }
4821
4822 // Already have a canonical version of the matrix type
4823 //
4824 // If it exactly matches the requested type, use it directly.
4825 if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr &&
4826 Canon->getRowExpr() == ColumnExpr)
4827 return QualType(Canon, 0);
4828
4829 // Use Canon as the canonical type for newly-built type.
4830 DependentSizedMatrixType *New = new (*this, alignof(DependentSizedMatrixType))
4831 DependentSizedMatrixType(ElementTy, QualType(Canon, 0), RowExpr,
4832 ColumnExpr, AttrLoc);
4833 Types.push_back(Elt: New);
4834 return QualType(New, 0);
4835}
4836
4837QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType,
4838 Expr *AddrSpaceExpr,
4839 SourceLocation AttrLoc) const {
4840 assert(AddrSpaceExpr->isInstantiationDependent());
4841
4842 QualType canonPointeeType = getCanonicalType(T: PointeeType);
4843
4844 void *insertPos = nullptr;
4845 llvm::FoldingSetNodeID ID;
4846 DependentAddressSpaceType::Profile(ID, Context: *this, PointeeType: canonPointeeType,
4847 AddrSpaceExpr);
4848
4849 DependentAddressSpaceType *canonTy =
4850 DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
4851
4852 if (!canonTy) {
4853 canonTy = new (*this, alignof(DependentAddressSpaceType))
4854 DependentAddressSpaceType(canonPointeeType, QualType(), AddrSpaceExpr,
4855 AttrLoc);
4856 DependentAddressSpaceTypes.InsertNode(N: canonTy, InsertPos: insertPos);
4857 Types.push_back(Elt: canonTy);
4858 }
4859
4860 if (canonPointeeType == PointeeType &&
4861 canonTy->getAddrSpaceExpr() == AddrSpaceExpr)
4862 return QualType(canonTy, 0);
4863
4864 auto *sugaredType = new (*this, alignof(DependentAddressSpaceType))
4865 DependentAddressSpaceType(PointeeType, QualType(canonTy, 0),
4866 AddrSpaceExpr, AttrLoc);
4867 Types.push_back(Elt: sugaredType);
4868 return QualType(sugaredType, 0);
4869}
4870
4871/// Determine whether \p T is canonical as the result type of a function.
4872static bool isCanonicalResultType(QualType T) {
4873 return T.isCanonical() &&
4874 (T.getObjCLifetime() == Qualifiers::OCL_None ||
4875 T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone);
4876}
4877
4878/// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
4879QualType
4880ASTContext::getFunctionNoProtoType(QualType ResultTy,
4881 const FunctionType::ExtInfo &Info) const {
4882 // FIXME: This assertion cannot be enabled (yet) because the ObjC rewriter
4883 // functionality creates a function without a prototype regardless of
4884 // language mode (so it makes them even in C++). Once the rewriter has been
4885 // fixed, this assertion can be enabled again.
4886 //assert(!LangOpts.requiresStrictPrototypes() &&
4887 // "strict prototypes are disabled");
4888
4889 // Unique functions, to guarantee there is only one function of a particular
4890 // structure.
4891 llvm::FoldingSetNodeID ID;
4892 FunctionNoProtoType::Profile(ID, ResultType: ResultTy, Info);
4893
4894 void *InsertPos = nullptr;
4895 if (FunctionNoProtoType *FT =
4896 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
4897 return QualType(FT, 0);
4898
4899 QualType Canonical;
4900 if (!isCanonicalResultType(T: ResultTy)) {
4901 Canonical =
4902 getFunctionNoProtoType(ResultTy: getCanonicalFunctionResultType(ResultType: ResultTy), Info);
4903
4904 // Get the new insert position for the node we care about.
4905 FunctionNoProtoType *NewIP =
4906 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
4907 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4908 }
4909
4910 auto *New = new (*this, alignof(FunctionNoProtoType))
4911 FunctionNoProtoType(ResultTy, Canonical, Info);
4912 Types.push_back(Elt: New);
4913 FunctionNoProtoTypes.InsertNode(N: New, InsertPos);
4914 return QualType(New, 0);
4915}
4916
4917CanQualType
4918ASTContext::getCanonicalFunctionResultType(QualType ResultType) const {
4919 CanQualType CanResultType = getCanonicalType(T: ResultType);
4920
4921 // Canonical result types do not have ARC lifetime qualifiers.
4922 if (CanResultType.getQualifiers().hasObjCLifetime()) {
4923 Qualifiers Qs = CanResultType.getQualifiers();
4924 Qs.removeObjCLifetime();
4925 return CanQualType::CreateUnsafe(
4926 Other: getQualifiedType(T: CanResultType.getUnqualifiedType(), Qs));
4927 }
4928
4929 return CanResultType;
4930}
4931
4932static bool isCanonicalExceptionSpecification(
4933 const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) {
4934 if (ESI.Type == EST_None)
4935 return true;
4936 if (!NoexceptInType)
4937 return false;
4938
4939 // C++17 onwards: exception specification is part of the type, as a simple
4940 // boolean "can this function type throw".
4941 if (ESI.Type == EST_BasicNoexcept)
4942 return true;
4943
4944 // A noexcept(expr) specification is (possibly) canonical if expr is
4945 // value-dependent.
4946 if (ESI.Type == EST_DependentNoexcept)
4947 return true;
4948
4949 // A dynamic exception specification is canonical if it only contains pack
4950 // expansions (so we can't tell whether it's non-throwing) and all its
4951 // contained types are canonical.
4952 if (ESI.Type == EST_Dynamic) {
4953 bool AnyPackExpansions = false;
4954 for (QualType ET : ESI.Exceptions) {
4955 if (!ET.isCanonical())
4956 return false;
4957 if (ET->getAs<PackExpansionType>())
4958 AnyPackExpansions = true;
4959 }
4960 return AnyPackExpansions;
4961 }
4962
4963 return false;
4964}
4965
4966QualType ASTContext::getFunctionTypeInternal(
4967 QualType ResultTy, ArrayRef<QualType> ArgArray,
4968 const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const {
4969 size_t NumArgs = ArgArray.size();
4970
4971 // Unique functions, to guarantee there is only one function of a particular
4972 // structure.
4973 llvm::FoldingSetNodeID ID;
4974 FunctionProtoType::Profile(ID, Result: ResultTy, ArgTys: ArgArray.begin(), NumArgs, EPI,
4975 Context: *this, Canonical: true);
4976
4977 QualType Canonical;
4978 bool Unique = false;
4979
4980 void *InsertPos = nullptr;
4981 if (FunctionProtoType *FPT =
4982 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) {
4983 QualType Existing = QualType(FPT, 0);
4984
4985 // If we find a pre-existing equivalent FunctionProtoType, we can just reuse
4986 // it so long as our exception specification doesn't contain a dependent
4987 // noexcept expression, or we're just looking for a canonical type.
4988 // Otherwise, we're going to need to create a type
4989 // sugar node to hold the concrete expression.
4990 if (OnlyWantCanonical || !isComputedNoexcept(ESpecType: EPI.ExceptionSpec.Type) ||
4991 EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr())
4992 return Existing;
4993
4994 // We need a new type sugar node for this one, to hold the new noexcept
4995 // expression. We do no canonicalization here, but that's OK since we don't
4996 // expect to see the same noexcept expression much more than once.
4997 Canonical = getCanonicalType(T: Existing);
4998 Unique = true;
4999 }
5000
5001 bool NoexceptInType = getLangOpts().CPlusPlus17;
5002 bool IsCanonicalExceptionSpec =
5003 isCanonicalExceptionSpecification(ESI: EPI.ExceptionSpec, NoexceptInType);
5004
5005 // Determine whether the type being created is already canonical or not.
5006 bool isCanonical = !Unique && IsCanonicalExceptionSpec &&
5007 isCanonicalResultType(T: ResultTy) && !EPI.HasTrailingReturn;
5008 for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
5009 if (!ArgArray[i].isCanonicalAsParam())
5010 isCanonical = false;
5011
5012 if (OnlyWantCanonical)
5013 assert(isCanonical &&
5014 "given non-canonical parameters constructing canonical type");
5015
5016 // If this type isn't canonical, get the canonical version of it if we don't
5017 // already have it. The exception spec is only partially part of the
5018 // canonical type, and only in C++17 onwards.
5019 if (!isCanonical && Canonical.isNull()) {
5020 SmallVector<QualType, 16> CanonicalArgs;
5021 CanonicalArgs.reserve(N: NumArgs);
5022 for (unsigned i = 0; i != NumArgs; ++i)
5023 CanonicalArgs.push_back(Elt: getCanonicalParamType(T: ArgArray[i]));
5024
5025 llvm::SmallVector<QualType, 8> ExceptionTypeStorage;
5026 FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI;
5027 CanonicalEPI.HasTrailingReturn = false;
5028
5029 if (IsCanonicalExceptionSpec) {
5030 // Exception spec is already OK.
5031 } else if (NoexceptInType) {
5032 switch (EPI.ExceptionSpec.Type) {
5033 case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated:
5034 // We don't know yet. It shouldn't matter what we pick here; no-one
5035 // should ever look at this.
5036 [[fallthrough]];
5037 case EST_None: case EST_MSAny: case EST_NoexceptFalse:
5038 CanonicalEPI.ExceptionSpec.Type = EST_None;
5039 break;
5040
5041 // A dynamic exception specification is almost always "not noexcept",
5042 // with the exception that a pack expansion might expand to no types.
5043 case EST_Dynamic: {
5044 bool AnyPacks = false;
5045 for (QualType ET : EPI.ExceptionSpec.Exceptions) {
5046 if (ET->getAs<PackExpansionType>())
5047 AnyPacks = true;
5048 ExceptionTypeStorage.push_back(Elt: getCanonicalType(T: ET));
5049 }
5050 if (!AnyPacks)
5051 CanonicalEPI.ExceptionSpec.Type = EST_None;
5052 else {
5053 CanonicalEPI.ExceptionSpec.Type = EST_Dynamic;
5054 CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage;
5055 }
5056 break;
5057 }
5058
5059 case EST_DynamicNone:
5060 case EST_BasicNoexcept:
5061 case EST_NoexceptTrue:
5062 case EST_NoThrow:
5063 CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept;
5064 break;
5065
5066 case EST_DependentNoexcept:
5067 llvm_unreachable("dependent noexcept is already canonical");
5068 }
5069 } else {
5070 CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo();
5071 }
5072
5073 // Adjust the canonical function result type.
5074 CanQualType CanResultTy = getCanonicalFunctionResultType(ResultType: ResultTy);
5075 Canonical =
5076 getFunctionTypeInternal(ResultTy: CanResultTy, ArgArray: CanonicalArgs, EPI: CanonicalEPI, OnlyWantCanonical: true);
5077
5078 // Get the new insert position for the node we care about.
5079 FunctionProtoType *NewIP =
5080 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
5081 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
5082 }
5083
5084 // Compute the needed size to hold this FunctionProtoType and the
5085 // various trailing objects.
5086 auto ESH = FunctionProtoType::getExceptionSpecSize(
5087 EST: EPI.ExceptionSpec.Type, NumExceptions: EPI.ExceptionSpec.Exceptions.size());
5088 size_t Size = FunctionProtoType::totalSizeToAlloc<
5089 QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields,
5090 FunctionType::FunctionTypeExtraAttributeInfo,
5091 FunctionType::FunctionTypeArmAttributes, FunctionType::ExceptionType,
5092 Expr *, FunctionDecl *, FunctionProtoType::ExtParameterInfo, Qualifiers,
5093 FunctionEffect, EffectConditionExpr>(
5094 Counts: NumArgs, Counts: EPI.Variadic, Counts: EPI.requiresFunctionProtoTypeExtraBitfields(),
5095 Counts: EPI.requiresFunctionProtoTypeExtraAttributeInfo(),
5096 Counts: EPI.requiresFunctionProtoTypeArmAttributes(), Counts: ESH.NumExceptionType,
5097 Counts: ESH.NumExprPtr, Counts: ESH.NumFunctionDeclPtr,
5098 Counts: EPI.ExtParameterInfos ? NumArgs : 0,
5099 Counts: EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0, Counts: EPI.FunctionEffects.size(),
5100 Counts: EPI.FunctionEffects.conditions().size());
5101
5102 auto *FTP = (FunctionProtoType *)Allocate(Size, Align: alignof(FunctionProtoType));
5103 FunctionProtoType::ExtProtoInfo newEPI = EPI;
5104 new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI);
5105 Types.push_back(Elt: FTP);
5106 if (!Unique)
5107 FunctionProtoTypes.InsertNode(N: FTP, InsertPos);
5108 if (!EPI.FunctionEffects.empty())
5109 AnyFunctionEffects = true;
5110 return QualType(FTP, 0);
5111}
5112
5113QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const {
5114 llvm::FoldingSetNodeID ID;
5115 PipeType::Profile(ID, T, isRead: ReadOnly);
5116
5117 void *InsertPos = nullptr;
5118 if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos))
5119 return QualType(PT, 0);
5120
5121 // If the pipe element type isn't canonical, this won't be a canonical type
5122 // either, so fill in the canonical type field.
5123 QualType Canonical;
5124 if (!T.isCanonical()) {
5125 Canonical = getPipeType(T: getCanonicalType(T), ReadOnly);
5126
5127 // Get the new insert position for the node we care about.
5128 PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos);
5129 assert(!NewIP && "Shouldn't be in the map!");
5130 (void)NewIP;
5131 }
5132 auto *New = new (*this, alignof(PipeType)) PipeType(T, Canonical, ReadOnly);
5133 Types.push_back(Elt: New);
5134 PipeTypes.InsertNode(N: New, InsertPos);
5135 return QualType(New, 0);
5136}
5137
5138QualType ASTContext::adjustStringLiteralBaseType(QualType Ty) const {
5139 // OpenCL v1.1 s6.5.3: a string literal is in the constant address space.
5140 return LangOpts.OpenCL ? getAddrSpaceQualType(T: Ty, AddressSpace: LangAS::opencl_constant)
5141 : Ty;
5142}
5143
5144QualType ASTContext::getReadPipeType(QualType T) const {
5145 return getPipeType(T, ReadOnly: true);
5146}
5147
5148QualType ASTContext::getWritePipeType(QualType T) const {
5149 return getPipeType(T, ReadOnly: false);
5150}
5151
5152QualType ASTContext::getBitIntType(bool IsUnsigned, unsigned NumBits) const {
5153 llvm::FoldingSetNodeID ID;
5154 BitIntType::Profile(ID, IsUnsigned, NumBits);
5155
5156 void *InsertPos = nullptr;
5157 if (BitIntType *EIT = BitIntTypes.FindNodeOrInsertPos(ID, InsertPos))
5158 return QualType(EIT, 0);
5159
5160 auto *New = new (*this, alignof(BitIntType)) BitIntType(IsUnsigned, NumBits);
5161 BitIntTypes.InsertNode(N: New, InsertPos);
5162 Types.push_back(Elt: New);
5163 return QualType(New, 0);
5164}
5165
5166QualType ASTContext::getDependentBitIntType(bool IsUnsigned,
5167 Expr *NumBitsExpr) const {
5168 assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent");
5169 llvm::FoldingSetNodeID ID;
5170 DependentBitIntType::Profile(ID, Context: *this, IsUnsigned, NumBitsExpr);
5171
5172 void *InsertPos = nullptr;
5173 if (DependentBitIntType *Existing =
5174 DependentBitIntTypes.FindNodeOrInsertPos(ID, InsertPos))
5175 return QualType(Existing, 0);
5176
5177 auto *New = new (*this, alignof(DependentBitIntType))
5178 DependentBitIntType(IsUnsigned, NumBitsExpr);
5179 DependentBitIntTypes.InsertNode(N: New, InsertPos);
5180
5181 Types.push_back(Elt: New);
5182 return QualType(New, 0);
5183}
5184
5185QualType
5186ASTContext::getPredefinedSugarType(PredefinedSugarType::Kind KD) const {
5187 using Kind = PredefinedSugarType::Kind;
5188
5189 if (auto *Target = PredefinedSugarTypes[llvm::to_underlying(E: KD)];
5190 Target != nullptr)
5191 return QualType(Target, 0);
5192
5193 auto getCanonicalType = [](const ASTContext &Ctx, Kind KDI) -> QualType {
5194 switch (KDI) {
5195 // size_t (C99TC3 6.5.3.4), signed size_t (C++23 5.13.2) and
5196 // ptrdiff_t (C99TC3 6.5.6) Although these types are not built-in, they
5197 // are part of the core language and are widely used. Using
5198 // PredefinedSugarType makes these types as named sugar types rather than
5199 // standard integer types, enabling better hints and diagnostics.
5200 case Kind::SizeT:
5201 return Ctx.getFromTargetType(Type: Ctx.Target->getSizeType());
5202 case Kind::SignedSizeT:
5203 return Ctx.getFromTargetType(Type: Ctx.Target->getSignedSizeType());
5204 case Kind::PtrdiffT:
5205 return Ctx.getFromTargetType(Type: Ctx.Target->getPtrDiffType(AddrSpace: LangAS::Default));
5206 }
5207 llvm_unreachable("unexpected kind");
5208 };
5209 auto *New = new (*this, alignof(PredefinedSugarType))
5210 PredefinedSugarType(KD, &Idents.get(Name: PredefinedSugarType::getName(KD)),
5211 getCanonicalType(*this, static_cast<Kind>(KD)));
5212 Types.push_back(Elt: New);
5213 PredefinedSugarTypes[llvm::to_underlying(E: KD)] = New;
5214 return QualType(New, 0);
5215}
5216
5217QualType ASTContext::getTypeDeclType(ElaboratedTypeKeyword Keyword,
5218 NestedNameSpecifier Qualifier,
5219 const TypeDecl *Decl) const {
5220 if (auto *Tag = dyn_cast<TagDecl>(Val: Decl))
5221 return getTagType(Keyword, Qualifier, TD: Tag,
5222 /*OwnsTag=*/false);
5223 if (auto *Typedef = dyn_cast<TypedefNameDecl>(Val: Decl))
5224 return getTypedefType(Keyword, Qualifier, Decl: Typedef);
5225 if (auto *UD = dyn_cast<UnresolvedUsingTypenameDecl>(Val: Decl))
5226 return getUnresolvedUsingType(Keyword, Qualifier, D: UD);
5227
5228 assert(Keyword == ElaboratedTypeKeyword::None);
5229 assert(!Qualifier);
5230 return QualType(Decl->TypeForDecl, 0);
5231}
5232
5233CanQualType ASTContext::getCanonicalTypeDeclType(const TypeDecl *TD) const {
5234 if (auto *Tag = dyn_cast<TagDecl>(Val: TD))
5235 return getCanonicalTagType(TD: Tag);
5236 if (auto *TN = dyn_cast<TypedefNameDecl>(Val: TD))
5237 return getCanonicalType(T: TN->getUnderlyingType());
5238 if (const auto *UD = dyn_cast<UnresolvedUsingTypenameDecl>(Val: TD))
5239 return getCanonicalUnresolvedUsingType(D: UD);
5240 assert(TD->TypeForDecl);
5241 return TD->TypeForDecl->getCanonicalTypeUnqualified();
5242}
5243
5244QualType ASTContext::getTypeDeclType(const TypeDecl *Decl) const {
5245 if (const auto *TD = dyn_cast<TagDecl>(Val: Decl))
5246 return getCanonicalTagType(TD);
5247 if (const auto *TD = dyn_cast<TypedefNameDecl>(Val: Decl);
5248 isa_and_nonnull<TypedefDecl, TypeAliasDecl>(Val: TD))
5249 return getTypedefType(Keyword: ElaboratedTypeKeyword::None,
5250 /*Qualifier=*/std::nullopt, Decl: TD);
5251 if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Val: Decl))
5252 return getCanonicalUnresolvedUsingType(D: Using);
5253
5254 assert(Decl->TypeForDecl);
5255 return QualType(Decl->TypeForDecl, 0);
5256}
5257
5258/// getTypedefType - Return the unique reference to the type for the
5259/// specified typedef name decl.
5260QualType
5261ASTContext::getTypedefType(ElaboratedTypeKeyword Keyword,
5262 NestedNameSpecifier Qualifier,
5263 const TypedefNameDecl *Decl, QualType UnderlyingType,
5264 std::optional<bool> TypeMatchesDeclOrNone) const {
5265 if (!TypeMatchesDeclOrNone) {
5266 QualType DeclUnderlyingType = Decl->getUnderlyingType();
5267 assert(!DeclUnderlyingType.isNull());
5268 if (UnderlyingType.isNull())
5269 UnderlyingType = DeclUnderlyingType;
5270 else
5271 assert(hasSameType(UnderlyingType, DeclUnderlyingType));
5272 TypeMatchesDeclOrNone = UnderlyingType == DeclUnderlyingType;
5273 } else {
5274 // FIXME: This is a workaround for a serialization cycle: assume the decl
5275 // underlying type is not available; don't touch it.
5276 assert(!UnderlyingType.isNull());
5277 }
5278
5279 if (Keyword == ElaboratedTypeKeyword::None && !Qualifier &&
5280 *TypeMatchesDeclOrNone) {
5281 if (Decl->TypeForDecl)
5282 return QualType(Decl->TypeForDecl, 0);
5283
5284 auto *NewType = new (*this, alignof(TypedefType))
5285 TypedefType(Type::Typedef, Keyword, Qualifier, Decl, UnderlyingType,
5286 !*TypeMatchesDeclOrNone);
5287
5288 Types.push_back(Elt: NewType);
5289 Decl->TypeForDecl = NewType;
5290 return QualType(NewType, 0);
5291 }
5292
5293 llvm::FoldingSetNodeID ID;
5294 TypedefType::Profile(ID, Keyword, Qualifier, Decl,
5295 Underlying: *TypeMatchesDeclOrNone ? QualType() : UnderlyingType);
5296
5297 void *InsertPos = nullptr;
5298 if (FoldingSetPlaceholder<TypedefType> *Placeholder =
5299 TypedefTypes.FindNodeOrInsertPos(ID, InsertPos))
5300 return QualType(Placeholder->getType(), 0);
5301
5302 void *Mem =
5303 Allocate(Size: TypedefType::totalSizeToAlloc<FoldingSetPlaceholder<TypedefType>,
5304 NestedNameSpecifier, QualType>(
5305 Counts: 1, Counts: !!Qualifier, Counts: !*TypeMatchesDeclOrNone),
5306 Align: alignof(TypedefType));
5307 auto *NewType =
5308 new (Mem) TypedefType(Type::Typedef, Keyword, Qualifier, Decl,
5309 UnderlyingType, !*TypeMatchesDeclOrNone);
5310 auto *Placeholder = new (NewType->getFoldingSetPlaceholder())
5311 FoldingSetPlaceholder<TypedefType>();
5312 TypedefTypes.InsertNode(N: Placeholder, InsertPos);
5313 Types.push_back(Elt: NewType);
5314 return QualType(NewType, 0);
5315}
5316
5317QualType ASTContext::getUsingType(ElaboratedTypeKeyword Keyword,
5318 NestedNameSpecifier Qualifier,
5319 const UsingShadowDecl *D,
5320 QualType UnderlyingType) const {
5321 // FIXME: This is expensive to compute every time!
5322 if (UnderlyingType.isNull()) {
5323 const auto *UD = cast<UsingDecl>(Val: D->getIntroducer());
5324 UnderlyingType =
5325 getTypeDeclType(Keyword: UD->hasTypename() ? ElaboratedTypeKeyword::Typename
5326 : ElaboratedTypeKeyword::None,
5327 Qualifier: UD->getQualifier(), Decl: cast<TypeDecl>(Val: D->getTargetDecl()));
5328 }
5329
5330 llvm::FoldingSetNodeID ID;
5331 UsingType::Profile(ID, Keyword, Qualifier, D, UnderlyingType);
5332
5333 void *InsertPos = nullptr;
5334 if (const UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos))
5335 return QualType(T, 0);
5336
5337 assert(!UnderlyingType.hasLocalQualifiers());
5338
5339 assert(
5340 hasSameType(getCanonicalTypeDeclType(cast<TypeDecl>(D->getTargetDecl())),
5341 UnderlyingType));
5342
5343 void *Mem =
5344 Allocate(Size: UsingType::totalSizeToAlloc<NestedNameSpecifier>(Counts: !!Qualifier),
5345 Align: alignof(UsingType));
5346 UsingType *T = new (Mem) UsingType(Keyword, Qualifier, D, UnderlyingType);
5347 Types.push_back(Elt: T);
5348 UsingTypes.InsertNode(N: T, InsertPos);
5349 return QualType(T, 0);
5350}
5351
5352TagType *ASTContext::getTagTypeInternal(ElaboratedTypeKeyword Keyword,
5353 NestedNameSpecifier Qualifier,
5354 const TagDecl *TD, bool OwnsTag,
5355 bool IsInjected,
5356 const Type *CanonicalType,
5357 bool WithFoldingSetNode) const {
5358 auto [TC, Size] = [&] {
5359 switch (TD->getDeclKind()) {
5360 case Decl::Enum:
5361 static_assert(alignof(EnumType) == alignof(TagType));
5362 return std::make_tuple(args: Type::Enum, args: sizeof(EnumType));
5363 case Decl::ClassTemplatePartialSpecialization:
5364 case Decl::ClassTemplateSpecialization:
5365 case Decl::CXXRecord:
5366 static_assert(alignof(RecordType) == alignof(TagType));
5367 static_assert(alignof(InjectedClassNameType) == alignof(TagType));
5368 if (cast<CXXRecordDecl>(Val: TD)->hasInjectedClassType())
5369 return std::make_tuple(args: Type::InjectedClassName,
5370 args: sizeof(InjectedClassNameType));
5371 [[fallthrough]];
5372 case Decl::Record:
5373 return std::make_tuple(args: Type::Record, args: sizeof(RecordType));
5374 default:
5375 llvm_unreachable("unexpected decl kind");
5376 }
5377 }();
5378
5379 if (Qualifier) {
5380 static_assert(alignof(NestedNameSpecifier) <= alignof(TagType));
5381 Size = llvm::alignTo(Value: Size, Align: alignof(NestedNameSpecifier)) +
5382 sizeof(NestedNameSpecifier);
5383 }
5384 void *Mem;
5385 if (WithFoldingSetNode) {
5386 // FIXME: It would be more profitable to tail allocate the folding set node
5387 // from the type, instead of the other way around, due to the greater
5388 // alignment requirements of the type. But this makes it harder to deal with
5389 // the different type node sizes. This would require either uniquing from
5390 // different folding sets, or having the folding setaccept a
5391 // contextual parameter which is not fixed at construction.
5392 Mem = Allocate(
5393 Size: sizeof(TagTypeFoldingSetPlaceholder) +
5394 TagTypeFoldingSetPlaceholder::getOffset() + Size,
5395 Align: std::max(a: alignof(TagTypeFoldingSetPlaceholder), b: alignof(TagType)));
5396 auto *T = new (Mem) TagTypeFoldingSetPlaceholder();
5397 Mem = T->getTagType();
5398 } else {
5399 Mem = Allocate(Size, Align: alignof(TagType));
5400 }
5401
5402 auto *T = [&, TC = TC]() -> TagType * {
5403 switch (TC) {
5404 case Type::Enum: {
5405 assert(isa<EnumDecl>(TD));
5406 auto *T = new (Mem) EnumType(TC, Keyword, Qualifier, TD, OwnsTag,
5407 IsInjected, CanonicalType);
5408 assert(reinterpret_cast<void *>(T) ==
5409 reinterpret_cast<void *>(static_cast<TagType *>(T)) &&
5410 "TagType must be the first base of EnumType");
5411 return T;
5412 }
5413 case Type::Record: {
5414 assert(isa<RecordDecl>(TD));
5415 auto *T = new (Mem) RecordType(TC, Keyword, Qualifier, TD, OwnsTag,
5416 IsInjected, CanonicalType);
5417 assert(reinterpret_cast<void *>(T) ==
5418 reinterpret_cast<void *>(static_cast<TagType *>(T)) &&
5419 "TagType must be the first base of RecordType");
5420 return T;
5421 }
5422 case Type::InjectedClassName: {
5423 auto *T = new (Mem) InjectedClassNameType(Keyword, Qualifier, TD,
5424 IsInjected, CanonicalType);
5425 assert(reinterpret_cast<void *>(T) ==
5426 reinterpret_cast<void *>(static_cast<TagType *>(T)) &&
5427 "TagType must be the first base of InjectedClassNameType");
5428 return T;
5429 }
5430 default:
5431 llvm_unreachable("unexpected type class");
5432 }
5433 }();
5434 assert(T->getKeyword() == Keyword);
5435 assert(T->getQualifier() == Qualifier);
5436 assert(T->getDecl() == TD);
5437 assert(T->isInjected() == IsInjected);
5438 assert(T->isTagOwned() == OwnsTag);
5439 assert((T->isCanonicalUnqualified()
5440 ? QualType()
5441 : T->getCanonicalTypeInternal()) == QualType(CanonicalType, 0));
5442 Types.push_back(Elt: T);
5443 return T;
5444}
5445
5446static const TagDecl *getNonInjectedClassName(const TagDecl *TD) {
5447 if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: TD);
5448 RD && RD->isInjectedClassName())
5449 return cast<TagDecl>(Val: RD->getDeclContext());
5450 return TD;
5451}
5452
5453CanQualType ASTContext::getCanonicalTagType(const TagDecl *TD) const {
5454 TD = ::getNonInjectedClassName(TD)->getCanonicalDecl();
5455 if (TD->TypeForDecl)
5456 return TD->TypeForDecl->getCanonicalTypeUnqualified();
5457
5458 const Type *CanonicalType = getTagTypeInternal(
5459 Keyword: ElaboratedTypeKeyword::None,
5460 /*Qualifier=*/std::nullopt, TD,
5461 /*OwnsTag=*/false, /*IsInjected=*/false, /*CanonicalType=*/nullptr,
5462 /*WithFoldingSetNode=*/false);
5463 TD->TypeForDecl = CanonicalType;
5464 return CanQualType::CreateUnsafe(Other: QualType(CanonicalType, 0));
5465}
5466
5467QualType ASTContext::getTagType(ElaboratedTypeKeyword Keyword,
5468 NestedNameSpecifier Qualifier,
5469 const TagDecl *TD, bool OwnsTag) const {
5470
5471 const TagDecl *NonInjectedTD = ::getNonInjectedClassName(TD);
5472 bool IsInjected = TD != NonInjectedTD;
5473
5474 ElaboratedTypeKeyword PreferredKeyword =
5475 getLangOpts().CPlusPlus ? ElaboratedTypeKeyword::None
5476 : KeywordHelpers::getKeywordForTagTypeKind(
5477 Tag: NonInjectedTD->getTagKind());
5478
5479 if (Keyword == PreferredKeyword && !Qualifier && !OwnsTag) {
5480 if (const Type *T = TD->TypeForDecl; T && !T->isCanonicalUnqualified())
5481 return QualType(T, 0);
5482
5483 const Type *CanonicalType = getCanonicalTagType(TD: NonInjectedTD).getTypePtr();
5484 const Type *T =
5485 getTagTypeInternal(Keyword,
5486 /*Qualifier=*/std::nullopt, TD: NonInjectedTD,
5487 /*OwnsTag=*/false, IsInjected, CanonicalType,
5488 /*WithFoldingSetNode=*/false);
5489 TD->TypeForDecl = T;
5490 return QualType(T, 0);
5491 }
5492
5493 llvm::FoldingSetNodeID ID;
5494 TagTypeFoldingSetPlaceholder::Profile(ID, Keyword, Qualifier, Tag: NonInjectedTD,
5495 OwnsTag, IsInjected);
5496
5497 void *InsertPos = nullptr;
5498 if (TagTypeFoldingSetPlaceholder *T =
5499 TagTypes.FindNodeOrInsertPos(ID, InsertPos))
5500 return QualType(T->getTagType(), 0);
5501
5502 const Type *CanonicalType = getCanonicalTagType(TD: NonInjectedTD).getTypePtr();
5503 TagType *T =
5504 getTagTypeInternal(Keyword, Qualifier, TD: NonInjectedTD, OwnsTag, IsInjected,
5505 CanonicalType, /*WithFoldingSetNode=*/true);
5506 TagTypes.InsertNode(N: TagTypeFoldingSetPlaceholder::fromTagType(T), InsertPos);
5507 return QualType(T, 0);
5508}
5509
5510bool ASTContext::computeBestEnumTypes(bool IsPacked, unsigned NumNegativeBits,
5511 unsigned NumPositiveBits,
5512 QualType &BestType,
5513 QualType &BestPromotionType) {
5514 unsigned IntWidth = Target->getIntWidth();
5515 unsigned CharWidth = Target->getCharWidth();
5516 unsigned ShortWidth = Target->getShortWidth();
5517 bool EnumTooLarge = false;
5518 unsigned BestWidth;
5519 if (NumNegativeBits) {
5520 // If there is a negative value, figure out the smallest integer type (of
5521 // int/long/longlong) that fits.
5522 // If it's packed, check also if it fits a char or a short.
5523 if (IsPacked && NumNegativeBits <= CharWidth &&
5524 NumPositiveBits < CharWidth) {
5525 BestType = SignedCharTy;
5526 BestWidth = CharWidth;
5527 } else if (IsPacked && NumNegativeBits <= ShortWidth &&
5528 NumPositiveBits < ShortWidth) {
5529 BestType = ShortTy;
5530 BestWidth = ShortWidth;
5531 } else if (NumNegativeBits <= IntWidth && NumPositiveBits < IntWidth) {
5532 BestType = IntTy;
5533 BestWidth = IntWidth;
5534 } else {
5535 BestWidth = Target->getLongWidth();
5536
5537 if (NumNegativeBits <= BestWidth && NumPositiveBits < BestWidth) {
5538 BestType = LongTy;
5539 } else {
5540 BestWidth = Target->getLongLongWidth();
5541
5542 if (NumNegativeBits > BestWidth || NumPositiveBits >= BestWidth)
5543 EnumTooLarge = true;
5544 BestType = LongLongTy;
5545 }
5546 }
5547 BestPromotionType = (BestWidth <= IntWidth ? IntTy : BestType);
5548 } else {
5549 // If there is no negative value, figure out the smallest type that fits
5550 // all of the enumerator values.
5551 // If it's packed, check also if it fits a char or a short.
5552 if (IsPacked && NumPositiveBits <= CharWidth) {
5553 BestType = UnsignedCharTy;
5554 BestPromotionType = IntTy;
5555 BestWidth = CharWidth;
5556 } else if (IsPacked && NumPositiveBits <= ShortWidth) {
5557 BestType = UnsignedShortTy;
5558 BestPromotionType = IntTy;
5559 BestWidth = ShortWidth;
5560 } else if (NumPositiveBits <= IntWidth) {
5561 BestType = UnsignedIntTy;
5562 BestWidth = IntWidth;
5563 BestPromotionType = (NumPositiveBits == BestWidth || !LangOpts.CPlusPlus)
5564 ? UnsignedIntTy
5565 : IntTy;
5566 } else if (NumPositiveBits <= (BestWidth = Target->getLongWidth())) {
5567 BestType = UnsignedLongTy;
5568 BestPromotionType = (NumPositiveBits == BestWidth || !LangOpts.CPlusPlus)
5569 ? UnsignedLongTy
5570 : LongTy;
5571 } else {
5572 BestWidth = Target->getLongLongWidth();
5573 if (NumPositiveBits > BestWidth) {
5574 // This can happen with bit-precise integer types, but those are not
5575 // allowed as the type for an enumerator per C23 6.7.2.2p4 and p12.
5576 // FIXME: GCC uses __int128_t and __uint128_t for cases that fit within
5577 // a 128-bit integer, we should consider doing the same.
5578 EnumTooLarge = true;
5579 }
5580 BestType = UnsignedLongLongTy;
5581 BestPromotionType = (NumPositiveBits == BestWidth || !LangOpts.CPlusPlus)
5582 ? UnsignedLongLongTy
5583 : LongLongTy;
5584 }
5585 }
5586 return EnumTooLarge;
5587}
5588
5589bool ASTContext::isRepresentableIntegerValue(llvm::APSInt &Value, QualType T) {
5590 assert((T->isIntegralType(*this) || T->isEnumeralType()) &&
5591 "Integral type required!");
5592 unsigned BitWidth = getIntWidth(T);
5593
5594 if (Value.isUnsigned() || Value.isNonNegative()) {
5595 if (T->isSignedIntegerOrEnumerationType())
5596 --BitWidth;
5597 return Value.getActiveBits() <= BitWidth;
5598 }
5599 return Value.getSignificantBits() <= BitWidth;
5600}
5601
5602UnresolvedUsingType *ASTContext::getUnresolvedUsingTypeInternal(
5603 ElaboratedTypeKeyword Keyword, NestedNameSpecifier Qualifier,
5604 const UnresolvedUsingTypenameDecl *D, void *InsertPos,
5605 const Type *CanonicalType) const {
5606 void *Mem = Allocate(
5607 Size: UnresolvedUsingType::totalSizeToAlloc<
5608 FoldingSetPlaceholder<UnresolvedUsingType>, NestedNameSpecifier>(
5609 Counts: !!InsertPos, Counts: !!Qualifier),
5610 Align: alignof(UnresolvedUsingType));
5611 auto *T = new (Mem) UnresolvedUsingType(Keyword, Qualifier, D, CanonicalType);
5612 if (InsertPos) {
5613 auto *Placeholder = new (T->getFoldingSetPlaceholder())
5614 FoldingSetPlaceholder<TypedefType>();
5615 TypedefTypes.InsertNode(N: Placeholder, InsertPos);
5616 }
5617 Types.push_back(Elt: T);
5618 return T;
5619}
5620
5621CanQualType ASTContext::getCanonicalUnresolvedUsingType(
5622 const UnresolvedUsingTypenameDecl *D) const {
5623 D = D->getCanonicalDecl();
5624 if (D->TypeForDecl)
5625 return D->TypeForDecl->getCanonicalTypeUnqualified();
5626
5627 const Type *CanonicalType = getUnresolvedUsingTypeInternal(
5628 Keyword: ElaboratedTypeKeyword::None,
5629 /*Qualifier=*/std::nullopt, D,
5630 /*InsertPos=*/nullptr, /*CanonicalType=*/nullptr);
5631 D->TypeForDecl = CanonicalType;
5632 return CanQualType::CreateUnsafe(Other: QualType(CanonicalType, 0));
5633}
5634
5635QualType
5636ASTContext::getUnresolvedUsingType(ElaboratedTypeKeyword Keyword,
5637 NestedNameSpecifier Qualifier,
5638 const UnresolvedUsingTypenameDecl *D) const {
5639 if (Keyword == ElaboratedTypeKeyword::None && !Qualifier) {
5640 if (const Type *T = D->TypeForDecl; T && !T->isCanonicalUnqualified())
5641 return QualType(T, 0);
5642
5643 const Type *CanonicalType = getCanonicalUnresolvedUsingType(D).getTypePtr();
5644 const Type *T =
5645 getUnresolvedUsingTypeInternal(Keyword: ElaboratedTypeKeyword::None,
5646 /*Qualifier=*/std::nullopt, D,
5647 /*InsertPos=*/nullptr, CanonicalType);
5648 D->TypeForDecl = T;
5649 return QualType(T, 0);
5650 }
5651
5652 llvm::FoldingSetNodeID ID;
5653 UnresolvedUsingType::Profile(ID, Keyword, Qualifier, D);
5654
5655 void *InsertPos = nullptr;
5656 if (FoldingSetPlaceholder<UnresolvedUsingType> *Placeholder =
5657 UnresolvedUsingTypes.FindNodeOrInsertPos(ID, InsertPos))
5658 return QualType(Placeholder->getType(), 0);
5659 assert(InsertPos);
5660
5661 const Type *CanonicalType = getCanonicalUnresolvedUsingType(D).getTypePtr();
5662 const Type *T = getUnresolvedUsingTypeInternal(Keyword, Qualifier, D,
5663 InsertPos, CanonicalType);
5664 return QualType(T, 0);
5665}
5666
5667QualType ASTContext::getAttributedType(attr::Kind attrKind,
5668 QualType modifiedType,
5669 QualType equivalentType,
5670 const Attr *attr) const {
5671 llvm::FoldingSetNodeID id;
5672 AttributedType::Profile(ID&: id, attrKind, modified: modifiedType, equivalent: equivalentType, attr);
5673
5674 void *insertPos = nullptr;
5675 AttributedType *type = AttributedTypes.FindNodeOrInsertPos(ID: id, InsertPos&: insertPos);
5676 if (type) return QualType(type, 0);
5677
5678 assert(!attr || attr->getKind() == attrKind);
5679
5680 QualType canon = getCanonicalType(T: equivalentType);
5681 type = new (*this, alignof(AttributedType))
5682 AttributedType(canon, attrKind, attr, modifiedType, equivalentType);
5683
5684 Types.push_back(Elt: type);
5685 AttributedTypes.InsertNode(N: type, InsertPos: insertPos);
5686
5687 return QualType(type, 0);
5688}
5689
5690QualType ASTContext::getAttributedType(const Attr *attr, QualType modifiedType,
5691 QualType equivalentType) const {
5692 return getAttributedType(attrKind: attr->getKind(), modifiedType, equivalentType, attr);
5693}
5694
5695QualType ASTContext::getAttributedType(NullabilityKind nullability,
5696 QualType modifiedType,
5697 QualType equivalentType) {
5698 switch (nullability) {
5699 case NullabilityKind::NonNull:
5700 return getAttributedType(attrKind: attr::TypeNonNull, modifiedType, equivalentType);
5701
5702 case NullabilityKind::Nullable:
5703 return getAttributedType(attrKind: attr::TypeNullable, modifiedType, equivalentType);
5704
5705 case NullabilityKind::NullableResult:
5706 return getAttributedType(attrKind: attr::TypeNullableResult, modifiedType,
5707 equivalentType);
5708
5709 case NullabilityKind::Unspecified:
5710 return getAttributedType(attrKind: attr::TypeNullUnspecified, modifiedType,
5711 equivalentType);
5712 }
5713
5714 llvm_unreachable("Unknown nullability kind");
5715}
5716
5717QualType ASTContext::getBTFTagAttributedType(const BTFTypeTagAttr *BTFAttr,
5718 QualType Wrapped) const {
5719 llvm::FoldingSetNodeID ID;
5720 BTFTagAttributedType::Profile(ID, Wrapped, BTFAttr);
5721
5722 void *InsertPos = nullptr;
5723 BTFTagAttributedType *Ty =
5724 BTFTagAttributedTypes.FindNodeOrInsertPos(ID, InsertPos);
5725 if (Ty)
5726 return QualType(Ty, 0);
5727
5728 QualType Canon = getCanonicalType(T: Wrapped);
5729 Ty = new (*this, alignof(BTFTagAttributedType))
5730 BTFTagAttributedType(Canon, Wrapped, BTFAttr);
5731
5732 Types.push_back(Elt: Ty);
5733 BTFTagAttributedTypes.InsertNode(N: Ty, InsertPos);
5734
5735 return QualType(Ty, 0);
5736}
5737
5738QualType ASTContext::getOverflowBehaviorType(const OverflowBehaviorAttr *Attr,
5739 QualType Underlying) const {
5740 const IdentifierInfo *II = Attr->getBehaviorKind();
5741 StringRef IdentName = II->getName();
5742 OverflowBehaviorType::OverflowBehaviorKind Kind;
5743 if (IdentName == "wrap") {
5744 Kind = OverflowBehaviorType::OverflowBehaviorKind::Wrap;
5745 } else if (IdentName == "trap") {
5746 Kind = OverflowBehaviorType::OverflowBehaviorKind::Trap;
5747 } else {
5748 return Underlying;
5749 }
5750
5751 return getOverflowBehaviorType(Kind, Wrapped: Underlying);
5752}
5753
5754QualType ASTContext::getOverflowBehaviorType(
5755 OverflowBehaviorType::OverflowBehaviorKind Kind,
5756 QualType Underlying) const {
5757 assert(!Underlying->isOverflowBehaviorType() &&
5758 "Cannot have underlying types that are themselves OBTs");
5759 llvm::FoldingSetNodeID ID;
5760 OverflowBehaviorType::Profile(ID, Underlying, Kind);
5761 void *InsertPos = nullptr;
5762
5763 if (OverflowBehaviorType *OBT =
5764 OverflowBehaviorTypes.FindNodeOrInsertPos(ID, InsertPos)) {
5765 return QualType(OBT, 0);
5766 }
5767
5768 QualType Canonical;
5769 if (!Underlying.isCanonical() || Underlying.hasLocalQualifiers()) {
5770 SplitQualType canonSplit = getCanonicalType(T: Underlying).split();
5771 Canonical = getOverflowBehaviorType(Kind, Underlying: QualType(canonSplit.Ty, 0));
5772 Canonical = getQualifiedType(T: Canonical, Qs: canonSplit.Quals);
5773 assert(!OverflowBehaviorTypes.FindNodeOrInsertPos(ID, InsertPos) &&
5774 "Shouldn't be in the map");
5775 }
5776
5777 OverflowBehaviorType *Ty = new (*this, alignof(OverflowBehaviorType))
5778 OverflowBehaviorType(Canonical, Underlying, Kind);
5779
5780 Types.push_back(Elt: Ty);
5781 OverflowBehaviorTypes.InsertNode(N: Ty, InsertPos);
5782 return QualType(Ty, 0);
5783}
5784
5785QualType ASTContext::getHLSLAttributedResourceType(
5786 QualType Wrapped, QualType Contained,
5787 const HLSLAttributedResourceType::Attributes &Attrs) {
5788
5789 llvm::FoldingSetNodeID ID;
5790 HLSLAttributedResourceType::Profile(ID, Wrapped, Contained, Attrs);
5791
5792 void *InsertPos = nullptr;
5793 HLSLAttributedResourceType *Ty =
5794 HLSLAttributedResourceTypes.FindNodeOrInsertPos(ID, InsertPos);
5795 if (Ty)
5796 return QualType(Ty, 0);
5797
5798 Ty = new (*this, alignof(HLSLAttributedResourceType))
5799 HLSLAttributedResourceType(Wrapped, Contained, Attrs);
5800
5801 Types.push_back(Elt: Ty);
5802 HLSLAttributedResourceTypes.InsertNode(N: Ty, InsertPos);
5803
5804 return QualType(Ty, 0);
5805}
5806
5807QualType ASTContext::getHLSLInlineSpirvType(uint32_t Opcode, uint32_t Size,
5808 uint32_t Alignment,
5809 ArrayRef<SpirvOperand> Operands) {
5810 llvm::FoldingSetNodeID ID;
5811 HLSLInlineSpirvType::Profile(ID, Opcode, Size, Alignment, Operands);
5812
5813 void *InsertPos = nullptr;
5814 HLSLInlineSpirvType *Ty =
5815 HLSLInlineSpirvTypes.FindNodeOrInsertPos(ID, InsertPos);
5816 if (Ty)
5817 return QualType(Ty, 0);
5818
5819 void *Mem = Allocate(
5820 Size: HLSLInlineSpirvType::totalSizeToAlloc<SpirvOperand>(Counts: Operands.size()),
5821 Align: alignof(HLSLInlineSpirvType));
5822
5823 Ty = new (Mem) HLSLInlineSpirvType(Opcode, Size, Alignment, Operands);
5824
5825 Types.push_back(Elt: Ty);
5826 HLSLInlineSpirvTypes.InsertNode(N: Ty, InsertPos);
5827
5828 return QualType(Ty, 0);
5829}
5830
5831/// Retrieve a substitution-result type.
5832QualType ASTContext::getSubstTemplateTypeParmType(QualType Replacement,
5833 Decl *AssociatedDecl,
5834 unsigned Index,
5835 UnsignedOrNone PackIndex,
5836 bool Final) const {
5837 llvm::FoldingSetNodeID ID;
5838 SubstTemplateTypeParmType::Profile(ID, Replacement, AssociatedDecl, Index,
5839 PackIndex, Final);
5840 void *InsertPos = nullptr;
5841 SubstTemplateTypeParmType *SubstParm =
5842 SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
5843
5844 if (!SubstParm) {
5845 void *Mem = Allocate(Size: SubstTemplateTypeParmType::totalSizeToAlloc<QualType>(
5846 Counts: !Replacement.isCanonical()),
5847 Align: alignof(SubstTemplateTypeParmType));
5848 SubstParm = new (Mem) SubstTemplateTypeParmType(Replacement, AssociatedDecl,
5849 Index, PackIndex, Final);
5850 Types.push_back(Elt: SubstParm);
5851 SubstTemplateTypeParmTypes.InsertNode(N: SubstParm, InsertPos);
5852 }
5853
5854 return QualType(SubstParm, 0);
5855}
5856
5857QualType
5858ASTContext::getSubstTemplateTypeParmPackType(Decl *AssociatedDecl,
5859 unsigned Index, bool Final,
5860 const TemplateArgument &ArgPack) {
5861#ifndef NDEBUG
5862 for (const auto &P : ArgPack.pack_elements())
5863 assert(P.getKind() == TemplateArgument::Type && "Pack contains a non-type");
5864#endif
5865
5866 llvm::FoldingSetNodeID ID;
5867 SubstTemplateTypeParmPackType::Profile(ID, AssociatedDecl, Index, Final,
5868 ArgPack);
5869 void *InsertPos = nullptr;
5870 if (SubstTemplateTypeParmPackType *SubstParm =
5871 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos))
5872 return QualType(SubstParm, 0);
5873
5874 QualType Canon;
5875 {
5876 TemplateArgument CanonArgPack = getCanonicalTemplateArgument(Arg: ArgPack);
5877 if (!AssociatedDecl->isCanonicalDecl() ||
5878 !CanonArgPack.structurallyEquals(Other: ArgPack)) {
5879 Canon = getSubstTemplateTypeParmPackType(
5880 AssociatedDecl: AssociatedDecl->getCanonicalDecl(), Index, Final, ArgPack: CanonArgPack);
5881 [[maybe_unused]] const auto *Nothing =
5882 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos);
5883 assert(!Nothing);
5884 }
5885 }
5886
5887 auto *SubstParm = new (*this, alignof(SubstTemplateTypeParmPackType))
5888 SubstTemplateTypeParmPackType(Canon, AssociatedDecl, Index, Final,
5889 ArgPack);
5890 Types.push_back(Elt: SubstParm);
5891 SubstTemplateTypeParmPackTypes.InsertNode(N: SubstParm, InsertPos);
5892 return QualType(SubstParm, 0);
5893}
5894
5895QualType
5896ASTContext::getSubstBuiltinTemplatePack(const TemplateArgument &ArgPack) {
5897 assert(llvm::all_of(ArgPack.pack_elements(),
5898 [](const auto &P) {
5899 return P.getKind() == TemplateArgument::Type;
5900 }) &&
5901 "Pack contains a non-type");
5902
5903 llvm::FoldingSetNodeID ID;
5904 SubstBuiltinTemplatePackType::Profile(ID, ArgPack);
5905
5906 void *InsertPos = nullptr;
5907 if (auto *T =
5908 SubstBuiltinTemplatePackTypes.FindNodeOrInsertPos(ID, InsertPos))
5909 return QualType(T, 0);
5910
5911 QualType Canon;
5912 TemplateArgument CanonArgPack = getCanonicalTemplateArgument(Arg: ArgPack);
5913 if (!CanonArgPack.structurallyEquals(Other: ArgPack)) {
5914 Canon = getSubstBuiltinTemplatePack(ArgPack: CanonArgPack);
5915 // Refresh InsertPos, in case the recursive call above caused rehashing,
5916 // which would invalidate the bucket pointer.
5917 [[maybe_unused]] const auto *Nothing =
5918 SubstBuiltinTemplatePackTypes.FindNodeOrInsertPos(ID, InsertPos);
5919 assert(!Nothing);
5920 }
5921
5922 auto *PackType = new (*this, alignof(SubstBuiltinTemplatePackType))
5923 SubstBuiltinTemplatePackType(Canon, ArgPack);
5924 Types.push_back(Elt: PackType);
5925 SubstBuiltinTemplatePackTypes.InsertNode(N: PackType, InsertPos);
5926 return QualType(PackType, 0);
5927}
5928
5929/// Retrieve the template type parameter type for a template
5930/// parameter or parameter pack with the given depth, index, and (optionally)
5931/// name.
5932QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index,
5933 bool ParameterPack,
5934 TemplateTypeParmDecl *TTPDecl) const {
5935 llvm::FoldingSetNodeID ID;
5936 TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl);
5937 void *InsertPos = nullptr;
5938 TemplateTypeParmType *TypeParm
5939 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
5940
5941 if (TypeParm)
5942 return QualType(TypeParm, 0);
5943
5944 if (TTPDecl) {
5945 QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack);
5946 TypeParm = new (*this, alignof(TemplateTypeParmType))
5947 TemplateTypeParmType(Depth, Index, ParameterPack, TTPDecl, Canon);
5948
5949 TemplateTypeParmType *TypeCheck
5950 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
5951 assert(!TypeCheck && "Template type parameter canonical type broken");
5952 (void)TypeCheck;
5953 } else
5954 TypeParm = new (*this, alignof(TemplateTypeParmType)) TemplateTypeParmType(
5955 Depth, Index, ParameterPack, /*TTPDecl=*/nullptr, /*Canon=*/QualType());
5956
5957 Types.push_back(Elt: TypeParm);
5958 TemplateTypeParmTypes.InsertNode(N: TypeParm, InsertPos);
5959
5960 return QualType(TypeParm, 0);
5961}
5962
5963static ElaboratedTypeKeyword
5964getCanonicalElaboratedTypeKeyword(ElaboratedTypeKeyword Keyword) {
5965 switch (Keyword) {
5966 // These are just themselves.
5967 case ElaboratedTypeKeyword::None:
5968 case ElaboratedTypeKeyword::Struct:
5969 case ElaboratedTypeKeyword::Union:
5970 case ElaboratedTypeKeyword::Enum:
5971 case ElaboratedTypeKeyword::Interface:
5972 return Keyword;
5973
5974 // These are equivalent.
5975 case ElaboratedTypeKeyword::Typename:
5976 return ElaboratedTypeKeyword::None;
5977
5978 // These are functionally equivalent, so relying on their equivalence is
5979 // IFNDR. By making them equivalent, we disallow overloading, which at least
5980 // can produce a diagnostic.
5981 case ElaboratedTypeKeyword::Class:
5982 return ElaboratedTypeKeyword::Struct;
5983 }
5984 llvm_unreachable("unexpected keyword kind");
5985}
5986
5987TypeSourceInfo *ASTContext::getTemplateSpecializationTypeInfo(
5988 ElaboratedTypeKeyword Keyword, SourceLocation ElaboratedKeywordLoc,
5989 NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKeywordLoc,
5990 TemplateName Name, SourceLocation NameLoc,
5991 const TemplateArgumentListInfo &SpecifiedArgs,
5992 ArrayRef<TemplateArgument> CanonicalArgs, QualType Underlying) const {
5993 QualType TST = getTemplateSpecializationType(
5994 Keyword, T: Name, SpecifiedArgs: SpecifiedArgs.arguments(), CanonicalArgs, Canon: Underlying);
5995
5996 TypeSourceInfo *TSI = CreateTypeSourceInfo(T: TST);
5997 TSI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>().set(
5998 ElaboratedKeywordLoc, QualifierLoc, TemplateKeywordLoc, NameLoc,
5999 TAL: SpecifiedArgs);
6000 return TSI;
6001}
6002
6003QualType ASTContext::getTemplateSpecializationType(
6004 ElaboratedTypeKeyword Keyword, TemplateName Template,
6005 ArrayRef<TemplateArgumentLoc> SpecifiedArgs,
6006 ArrayRef<TemplateArgument> CanonicalArgs, QualType Underlying) const {
6007 SmallVector<TemplateArgument, 4> SpecifiedArgVec;
6008 SpecifiedArgVec.reserve(N: SpecifiedArgs.size());
6009 for (const TemplateArgumentLoc &Arg : SpecifiedArgs)
6010 SpecifiedArgVec.push_back(Elt: Arg.getArgument());
6011
6012 return getTemplateSpecializationType(Keyword, T: Template, SpecifiedArgs: SpecifiedArgVec,
6013 CanonicalArgs, Underlying);
6014}
6015
6016[[maybe_unused]] static bool
6017hasAnyPackExpansions(ArrayRef<TemplateArgument> Args) {
6018 for (const TemplateArgument &Arg : Args)
6019 if (Arg.isPackExpansion())
6020 return true;
6021 return false;
6022}
6023
6024QualType ASTContext::getCanonicalTemplateSpecializationType(
6025 ElaboratedTypeKeyword Keyword, TemplateName Template,
6026 ArrayRef<TemplateArgument> Args) const {
6027 assert(Template ==
6028 getCanonicalTemplateName(Template, /*IgnoreDeduced=*/true));
6029 assert((Keyword == ElaboratedTypeKeyword::None ||
6030 Template.getAsDependentTemplateName()));
6031#ifndef NDEBUG
6032 for (const auto &Arg : Args)
6033 assert(Arg.structurallyEquals(getCanonicalTemplateArgument(Arg)));
6034#endif
6035
6036 llvm::FoldingSetNodeID ID;
6037 TemplateSpecializationType::Profile(ID, Keyword, T: Template, Args, Underlying: QualType(),
6038 Context: *this);
6039 void *InsertPos = nullptr;
6040 if (auto *T = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos))
6041 return QualType(T, 0);
6042
6043 void *Mem = Allocate(Size: sizeof(TemplateSpecializationType) +
6044 sizeof(TemplateArgument) * Args.size(),
6045 Align: alignof(TemplateSpecializationType));
6046 auto *Spec =
6047 new (Mem) TemplateSpecializationType(Keyword, Template,
6048 /*IsAlias=*/false, Args, QualType());
6049 assert(Spec->isDependentType() &&
6050 "canonical template specialization must be dependent");
6051 Types.push_back(Elt: Spec);
6052 TemplateSpecializationTypes.InsertNode(N: Spec, InsertPos);
6053 return QualType(Spec, 0);
6054}
6055
6056QualType ASTContext::getTemplateSpecializationType(
6057 ElaboratedTypeKeyword Keyword, TemplateName Template,
6058 ArrayRef<TemplateArgument> SpecifiedArgs,
6059 ArrayRef<TemplateArgument> CanonicalArgs, QualType Underlying) const {
6060 const auto *TD = Template.getAsTemplateDecl(/*IgnoreDeduced=*/true);
6061 bool IsTypeAlias = TD && TD->isTypeAlias();
6062 if (Underlying.isNull()) {
6063 TemplateName CanonTemplate =
6064 getCanonicalTemplateName(Name: Template, /*IgnoreDeduced=*/true);
6065 ElaboratedTypeKeyword CanonKeyword =
6066 CanonTemplate.getAsDependentTemplateName()
6067 ? getCanonicalElaboratedTypeKeyword(Keyword)
6068 : ElaboratedTypeKeyword::None;
6069 bool NonCanonical = Template != CanonTemplate || Keyword != CanonKeyword;
6070 SmallVector<TemplateArgument, 4> CanonArgsVec;
6071 if (CanonicalArgs.empty()) {
6072 CanonArgsVec = SmallVector<TemplateArgument, 4>(SpecifiedArgs);
6073 NonCanonical |= canonicalizeTemplateArguments(Args: CanonArgsVec);
6074 CanonicalArgs = CanonArgsVec;
6075 } else {
6076 NonCanonical |= !llvm::equal(
6077 LRange&: SpecifiedArgs, RRange&: CanonicalArgs,
6078 P: [](const TemplateArgument &A, const TemplateArgument &B) {
6079 return A.structurallyEquals(Other: B);
6080 });
6081 }
6082
6083 // We can get here with an alias template when the specialization
6084 // contains a pack expansion that does not match up with a parameter
6085 // pack, or a builtin template which cannot be resolved due to dependency.
6086 assert((!isa_and_nonnull<TypeAliasTemplateDecl>(TD) ||
6087 hasAnyPackExpansions(CanonicalArgs)) &&
6088 "Caller must compute aliased type");
6089 IsTypeAlias = false;
6090
6091 Underlying = getCanonicalTemplateSpecializationType(
6092 Keyword: CanonKeyword, Template: CanonTemplate, Args: CanonicalArgs);
6093 if (!NonCanonical)
6094 return Underlying;
6095 }
6096 void *Mem = Allocate(Size: sizeof(TemplateSpecializationType) +
6097 sizeof(TemplateArgument) * SpecifiedArgs.size() +
6098 (IsTypeAlias ? sizeof(QualType) : 0),
6099 Align: alignof(TemplateSpecializationType));
6100 auto *Spec = new (Mem) TemplateSpecializationType(
6101 Keyword, Template, IsTypeAlias, SpecifiedArgs, Underlying);
6102 Types.push_back(Elt: Spec);
6103 return QualType(Spec, 0);
6104}
6105
6106QualType
6107ASTContext::getParenType(QualType InnerType) const {
6108 llvm::FoldingSetNodeID ID;
6109 ParenType::Profile(ID, Inner: InnerType);
6110
6111 void *InsertPos = nullptr;
6112 ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
6113 if (T)
6114 return QualType(T, 0);
6115
6116 QualType Canon = InnerType;
6117 if (!Canon.isCanonical()) {
6118 Canon = getCanonicalType(T: InnerType);
6119 ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
6120 assert(!CheckT && "Paren canonical type broken");
6121 (void)CheckT;
6122 }
6123
6124 T = new (*this, alignof(ParenType)) ParenType(InnerType, Canon);
6125 Types.push_back(Elt: T);
6126 ParenTypes.InsertNode(N: T, InsertPos);
6127 return QualType(T, 0);
6128}
6129
6130QualType
6131ASTContext::getMacroQualifiedType(QualType UnderlyingTy,
6132 const IdentifierInfo *MacroII) const {
6133 QualType Canon = UnderlyingTy;
6134 if (!Canon.isCanonical())
6135 Canon = getCanonicalType(T: UnderlyingTy);
6136
6137 auto *newType = new (*this, alignof(MacroQualifiedType))
6138 MacroQualifiedType(UnderlyingTy, Canon, MacroII);
6139 Types.push_back(Elt: newType);
6140 return QualType(newType, 0);
6141}
6142
6143QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword,
6144 NestedNameSpecifier NNS,
6145 const IdentifierInfo *Name) const {
6146 llvm::FoldingSetNodeID ID;
6147 DependentNameType::Profile(ID, Keyword, NNS, Name);
6148
6149 void *InsertPos = nullptr;
6150 if (DependentNameType *T =
6151 DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos))
6152 return QualType(T, 0);
6153
6154 ElaboratedTypeKeyword CanonKeyword =
6155 getCanonicalElaboratedTypeKeyword(Keyword);
6156 NestedNameSpecifier CanonNNS = NNS.getCanonical();
6157
6158 QualType Canon;
6159 if (CanonKeyword != Keyword || CanonNNS != NNS) {
6160 Canon = getDependentNameType(Keyword: CanonKeyword, NNS: CanonNNS, Name);
6161 [[maybe_unused]] DependentNameType *T =
6162 DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
6163 assert(!T && "broken canonicalization");
6164 assert(Canon.isCanonical());
6165 }
6166
6167 DependentNameType *T = new (*this, alignof(DependentNameType))
6168 DependentNameType(Keyword, NNS, Name, Canon);
6169 Types.push_back(Elt: T);
6170 DependentNameTypes.InsertNode(N: T, InsertPos);
6171 return QualType(T, 0);
6172}
6173
6174TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) const {
6175 TemplateArgument Arg;
6176 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Val: Param)) {
6177 QualType ArgType = getTypeDeclType(Decl: TTP);
6178 if (TTP->isParameterPack())
6179 ArgType = getPackExpansionType(Pattern: ArgType, NumExpansions: std::nullopt);
6180
6181 Arg = TemplateArgument(ArgType);
6182 } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Val: Param)) {
6183 QualType T =
6184 NTTP->getType().getNonPackExpansionType().getNonLValueExprType(Context: *this);
6185 // For class NTTPs, ensure we include the 'const' so the type matches that
6186 // of a real template argument.
6187 // FIXME: It would be more faithful to model this as something like an
6188 // lvalue-to-rvalue conversion applied to a const-qualified lvalue.
6189 ExprValueKind VK;
6190 if (T->isRecordType()) {
6191 // C++ [temp.param]p8: An id-expression naming a non-type
6192 // template-parameter of class type T denotes a static storage duration
6193 // object of type const T.
6194 T.addConst();
6195 VK = VK_LValue;
6196 } else {
6197 VK = Expr::getValueKindForType(T: NTTP->getType());
6198 }
6199 Expr *E = new (*this)
6200 DeclRefExpr(*this, NTTP, /*RefersToEnclosingVariableOrCapture=*/false,
6201 T, VK, NTTP->getLocation());
6202
6203 if (NTTP->isParameterPack())
6204 E = new (*this) PackExpansionExpr(E, NTTP->getLocation(), std::nullopt);
6205 Arg = TemplateArgument(E, /*IsCanonical=*/false);
6206 } else {
6207 auto *TTP = cast<TemplateTemplateParmDecl>(Val: Param);
6208 TemplateName Name = getQualifiedTemplateName(
6209 /*Qualifier=*/std::nullopt, /*TemplateKeyword=*/false,
6210 Template: TemplateName(TTP));
6211 if (TTP->isParameterPack())
6212 Arg = TemplateArgument(Name, /*NumExpansions=*/std::nullopt);
6213 else
6214 Arg = TemplateArgument(Name);
6215 }
6216
6217 if (Param->isTemplateParameterPack())
6218 Arg =
6219 TemplateArgument::CreatePackCopy(Context&: const_cast<ASTContext &>(*this), Args: Arg);
6220
6221 return Arg;
6222}
6223
6224QualType ASTContext::getPackExpansionType(QualType Pattern,
6225 UnsignedOrNone NumExpansions,
6226 bool ExpectPackInType) const {
6227 assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) &&
6228 "Pack expansions must expand one or more parameter packs");
6229
6230 llvm::FoldingSetNodeID ID;
6231 PackExpansionType::Profile(ID, Pattern, NumExpansions);
6232
6233 void *InsertPos = nullptr;
6234 PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
6235 if (T)
6236 return QualType(T, 0);
6237
6238 QualType Canon;
6239 if (!Pattern.isCanonical()) {
6240 Canon = getPackExpansionType(Pattern: getCanonicalType(T: Pattern), NumExpansions,
6241 /*ExpectPackInType=*/false);
6242
6243 // Find the insert position again, in case we inserted an element into
6244 // PackExpansionTypes and invalidated our insert position.
6245 PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
6246 }
6247
6248 T = new (*this, alignof(PackExpansionType))
6249 PackExpansionType(Pattern, Canon, NumExpansions);
6250 Types.push_back(Elt: T);
6251 PackExpansionTypes.InsertNode(N: T, InsertPos);
6252 return QualType(T, 0);
6253}
6254
6255/// CmpProtocolNames - Comparison predicate for sorting protocols
6256/// alphabetically.
6257static int CmpProtocolNames(ObjCProtocolDecl *const *LHS,
6258 ObjCProtocolDecl *const *RHS) {
6259 return DeclarationName::compare(LHS: (*LHS)->getDeclName(), RHS: (*RHS)->getDeclName());
6260}
6261
6262static bool areSortedAndUniqued(ArrayRef<ObjCProtocolDecl *> Protocols) {
6263 if (Protocols.empty()) return true;
6264
6265 if (Protocols[0]->getCanonicalDecl() != Protocols[0])
6266 return false;
6267
6268 for (unsigned i = 1; i != Protocols.size(); ++i)
6269 if (CmpProtocolNames(LHS: &Protocols[i - 1], RHS: &Protocols[i]) >= 0 ||
6270 Protocols[i]->getCanonicalDecl() != Protocols[i])
6271 return false;
6272 return true;
6273}
6274
6275static void
6276SortAndUniqueProtocols(SmallVectorImpl<ObjCProtocolDecl *> &Protocols) {
6277 // Sort protocols, keyed by name.
6278 llvm::array_pod_sort(Start: Protocols.begin(), End: Protocols.end(), Compare: CmpProtocolNames);
6279
6280 // Canonicalize.
6281 for (ObjCProtocolDecl *&P : Protocols)
6282 P = P->getCanonicalDecl();
6283
6284 // Remove duplicates.
6285 auto ProtocolsEnd = llvm::unique(R&: Protocols);
6286 Protocols.erase(CS: ProtocolsEnd, CE: Protocols.end());
6287}
6288
6289QualType ASTContext::getObjCObjectType(QualType BaseType,
6290 ObjCProtocolDecl * const *Protocols,
6291 unsigned NumProtocols) const {
6292 return getObjCObjectType(Base: BaseType, typeArgs: {}, protocols: ArrayRef(Protocols, NumProtocols),
6293 /*isKindOf=*/false);
6294}
6295
6296QualType ASTContext::getObjCObjectType(
6297 QualType baseType,
6298 ArrayRef<QualType> typeArgs,
6299 ArrayRef<ObjCProtocolDecl *> protocols,
6300 bool isKindOf) const {
6301 // If the base type is an interface and there aren't any protocols or
6302 // type arguments to add, then the interface type will do just fine.
6303 if (typeArgs.empty() && protocols.empty() && !isKindOf &&
6304 isa<ObjCInterfaceType>(Val: baseType))
6305 return baseType;
6306
6307 // Look in the folding set for an existing type.
6308 llvm::FoldingSetNodeID ID;
6309 ObjCObjectTypeImpl::Profile(ID, Base: baseType, typeArgs, protocols, isKindOf);
6310 void *InsertPos = nullptr;
6311 if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos))
6312 return QualType(QT, 0);
6313
6314 // Determine the type arguments to be used for canonicalization,
6315 // which may be explicitly specified here or written on the base
6316 // type.
6317 ArrayRef<QualType> effectiveTypeArgs = typeArgs;
6318 if (effectiveTypeArgs.empty()) {
6319 if (const auto *baseObject = baseType->getAs<ObjCObjectType>())
6320 effectiveTypeArgs = baseObject->getTypeArgs();
6321 }
6322
6323 // Build the canonical type, which has the canonical base type and a
6324 // sorted-and-uniqued list of protocols and the type arguments
6325 // canonicalized.
6326 QualType canonical;
6327 bool typeArgsAreCanonical = llvm::all_of(
6328 Range&: effectiveTypeArgs, P: [&](QualType type) { return type.isCanonical(); });
6329 bool protocolsSorted = areSortedAndUniqued(Protocols: protocols);
6330 if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) {
6331 // Determine the canonical type arguments.
6332 ArrayRef<QualType> canonTypeArgs;
6333 SmallVector<QualType, 4> canonTypeArgsVec;
6334 if (!typeArgsAreCanonical) {
6335 canonTypeArgsVec.reserve(N: effectiveTypeArgs.size());
6336 for (auto typeArg : effectiveTypeArgs)
6337 canonTypeArgsVec.push_back(Elt: getCanonicalType(T: typeArg));
6338 canonTypeArgs = canonTypeArgsVec;
6339 } else {
6340 canonTypeArgs = effectiveTypeArgs;
6341 }
6342
6343 ArrayRef<ObjCProtocolDecl *> canonProtocols;
6344 SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec;
6345 if (!protocolsSorted) {
6346 canonProtocolsVec.append(in_start: protocols.begin(), in_end: protocols.end());
6347 SortAndUniqueProtocols(Protocols&: canonProtocolsVec);
6348 canonProtocols = canonProtocolsVec;
6349 } else {
6350 canonProtocols = protocols;
6351 }
6352
6353 canonical = getObjCObjectType(baseType: getCanonicalType(T: baseType), typeArgs: canonTypeArgs,
6354 protocols: canonProtocols, isKindOf);
6355
6356 // Regenerate InsertPos.
6357 ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos);
6358 }
6359
6360 unsigned size = sizeof(ObjCObjectTypeImpl);
6361 size += typeArgs.size() * sizeof(QualType);
6362 size += protocols.size() * sizeof(ObjCProtocolDecl *);
6363 void *mem = Allocate(Size: size, Align: alignof(ObjCObjectTypeImpl));
6364 auto *T =
6365 new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols,
6366 isKindOf);
6367
6368 Types.push_back(Elt: T);
6369 ObjCObjectTypes.InsertNode(N: T, InsertPos);
6370 return QualType(T, 0);
6371}
6372
6373/// Apply Objective-C protocol qualifiers to the given type.
6374/// If this is for the canonical type of a type parameter, we can apply
6375/// protocol qualifiers on the ObjCObjectPointerType.
6376QualType
6377ASTContext::applyObjCProtocolQualifiers(QualType type,
6378 ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError,
6379 bool allowOnPointerType) const {
6380 hasError = false;
6381
6382 if (const auto *objT = dyn_cast<ObjCTypeParamType>(Val: type.getTypePtr())) {
6383 return getObjCTypeParamType(Decl: objT->getDecl(), protocols);
6384 }
6385
6386 // Apply protocol qualifiers to ObjCObjectPointerType.
6387 if (allowOnPointerType) {
6388 if (const auto *objPtr =
6389 dyn_cast<ObjCObjectPointerType>(Val: type.getTypePtr())) {
6390 const ObjCObjectType *objT = objPtr->getObjectType();
6391 // Merge protocol lists and construct ObjCObjectType.
6392 SmallVector<ObjCProtocolDecl*, 8> protocolsVec;
6393 protocolsVec.append(in_start: objT->qual_begin(),
6394 in_end: objT->qual_end());
6395 protocolsVec.append(in_start: protocols.begin(), in_end: protocols.end());
6396 ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec;
6397 type = getObjCObjectType(
6398 baseType: objT->getBaseType(),
6399 typeArgs: objT->getTypeArgsAsWritten(),
6400 protocols,
6401 isKindOf: objT->isKindOfTypeAsWritten());
6402 return getObjCObjectPointerType(OIT: type);
6403 }
6404 }
6405
6406 // Apply protocol qualifiers to ObjCObjectType.
6407 if (const auto *objT = dyn_cast<ObjCObjectType>(Val: type.getTypePtr())){
6408 // FIXME: Check for protocols to which the class type is already
6409 // known to conform.
6410
6411 return getObjCObjectType(baseType: objT->getBaseType(),
6412 typeArgs: objT->getTypeArgsAsWritten(),
6413 protocols,
6414 isKindOf: objT->isKindOfTypeAsWritten());
6415 }
6416
6417 // If the canonical type is ObjCObjectType, ...
6418 if (type->isObjCObjectType()) {
6419 // Silently overwrite any existing protocol qualifiers.
6420 // TODO: determine whether that's the right thing to do.
6421
6422 // FIXME: Check for protocols to which the class type is already
6423 // known to conform.
6424 return getObjCObjectType(baseType: type, typeArgs: {}, protocols, isKindOf: false);
6425 }
6426
6427 // id<protocol-list>
6428 if (type->isObjCIdType()) {
6429 const auto *objPtr = type->castAs<ObjCObjectPointerType>();
6430 type = getObjCObjectType(baseType: ObjCBuiltinIdTy, typeArgs: {}, protocols,
6431 isKindOf: objPtr->isKindOfType());
6432 return getObjCObjectPointerType(OIT: type);
6433 }
6434
6435 // Class<protocol-list>
6436 if (type->isObjCClassType()) {
6437 const auto *objPtr = type->castAs<ObjCObjectPointerType>();
6438 type = getObjCObjectType(baseType: ObjCBuiltinClassTy, typeArgs: {}, protocols,
6439 isKindOf: objPtr->isKindOfType());
6440 return getObjCObjectPointerType(OIT: type);
6441 }
6442
6443 hasError = true;
6444 return type;
6445}
6446
6447QualType
6448ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl,
6449 ArrayRef<ObjCProtocolDecl *> protocols) const {
6450 // Look in the folding set for an existing type.
6451 llvm::FoldingSetNodeID ID;
6452 ObjCTypeParamType::Profile(ID, OTPDecl: Decl, CanonicalType: Decl->getUnderlyingType(), protocols);
6453 void *InsertPos = nullptr;
6454 if (ObjCTypeParamType *TypeParam =
6455 ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos))
6456 return QualType(TypeParam, 0);
6457
6458 // We canonicalize to the underlying type.
6459 QualType Canonical = getCanonicalType(T: Decl->getUnderlyingType());
6460 if (!protocols.empty()) {
6461 // Apply the protocol qualifers.
6462 bool hasError;
6463 Canonical = getCanonicalType(T: applyObjCProtocolQualifiers(
6464 type: Canonical, protocols, hasError, allowOnPointerType: true /*allowOnPointerType*/));
6465 assert(!hasError && "Error when apply protocol qualifier to bound type");
6466 }
6467
6468 unsigned size = sizeof(ObjCTypeParamType);
6469 size += protocols.size() * sizeof(ObjCProtocolDecl *);
6470 void *mem = Allocate(Size: size, Align: alignof(ObjCTypeParamType));
6471 auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols);
6472
6473 Types.push_back(Elt: newType);
6474 ObjCTypeParamTypes.InsertNode(N: newType, InsertPos);
6475 return QualType(newType, 0);
6476}
6477
6478void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig,
6479 ObjCTypeParamDecl *New) const {
6480 New->setTypeSourceInfo(getTrivialTypeSourceInfo(T: Orig->getUnderlyingType()));
6481 // Update TypeForDecl after updating TypeSourceInfo.
6482 auto *NewTypeParamTy = cast<ObjCTypeParamType>(Val: New->TypeForDecl);
6483 SmallVector<ObjCProtocolDecl *, 8> protocols;
6484 protocols.append(in_start: NewTypeParamTy->qual_begin(), in_end: NewTypeParamTy->qual_end());
6485 QualType UpdatedTy = getObjCTypeParamType(Decl: New, protocols);
6486 New->TypeForDecl = UpdatedTy.getTypePtr();
6487}
6488
6489/// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's
6490/// protocol list adopt all protocols in QT's qualified-id protocol
6491/// list.
6492bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT,
6493 ObjCInterfaceDecl *IC) {
6494 if (!QT->isObjCQualifiedIdType())
6495 return false;
6496
6497 if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) {
6498 // If both the right and left sides have qualifiers.
6499 for (auto *Proto : OPT->quals()) {
6500 if (!IC->ClassImplementsProtocol(lProto: Proto, lookupCategory: false))
6501 return false;
6502 }
6503 return true;
6504 }
6505 return false;
6506}
6507
6508/// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in
6509/// QT's qualified-id protocol list adopt all protocols in IDecl's list
6510/// of protocols.
6511bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT,
6512 ObjCInterfaceDecl *IDecl) {
6513 if (!QT->isObjCQualifiedIdType())
6514 return false;
6515 const auto *OPT = QT->getAs<ObjCObjectPointerType>();
6516 if (!OPT)
6517 return false;
6518 if (!IDecl->hasDefinition())
6519 return false;
6520 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols;
6521 CollectInheritedProtocols(CDecl: IDecl, Protocols&: InheritedProtocols);
6522 if (InheritedProtocols.empty())
6523 return false;
6524 // Check that if every protocol in list of id<plist> conforms to a protocol
6525 // of IDecl's, then bridge casting is ok.
6526 bool Conforms = false;
6527 for (auto *Proto : OPT->quals()) {
6528 Conforms = false;
6529 for (auto *PI : InheritedProtocols) {
6530 if (ProtocolCompatibleWithProtocol(lProto: Proto, rProto: PI)) {
6531 Conforms = true;
6532 break;
6533 }
6534 }
6535 if (!Conforms)
6536 break;
6537 }
6538 if (Conforms)
6539 return true;
6540
6541 for (auto *PI : InheritedProtocols) {
6542 // If both the right and left sides have qualifiers.
6543 bool Adopts = false;
6544 for (auto *Proto : OPT->quals()) {
6545 // return 'true' if 'PI' is in the inheritance hierarchy of Proto
6546 if ((Adopts = ProtocolCompatibleWithProtocol(lProto: PI, rProto: Proto)))
6547 break;
6548 }
6549 if (!Adopts)
6550 return false;
6551 }
6552 return true;
6553}
6554
6555/// getObjCObjectPointerType - Return a ObjCObjectPointerType type for
6556/// the given object type.
6557QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const {
6558 llvm::FoldingSetNodeID ID;
6559 ObjCObjectPointerType::Profile(ID, T: ObjectT);
6560
6561 void *InsertPos = nullptr;
6562 if (ObjCObjectPointerType *QT =
6563 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
6564 return QualType(QT, 0);
6565
6566 // Find the canonical object type.
6567 QualType Canonical;
6568 if (!ObjectT.isCanonical()) {
6569 Canonical = getObjCObjectPointerType(ObjectT: getCanonicalType(T: ObjectT));
6570
6571 // Regenerate InsertPos.
6572 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
6573 }
6574
6575 // No match.
6576 void *Mem =
6577 Allocate(Size: sizeof(ObjCObjectPointerType), Align: alignof(ObjCObjectPointerType));
6578 auto *QType =
6579 new (Mem) ObjCObjectPointerType(Canonical, ObjectT);
6580
6581 Types.push_back(Elt: QType);
6582 ObjCObjectPointerTypes.InsertNode(N: QType, InsertPos);
6583 return QualType(QType, 0);
6584}
6585
6586/// getObjCInterfaceType - Return the unique reference to the type for the
6587/// specified ObjC interface decl. The list of protocols is optional.
6588QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl,
6589 ObjCInterfaceDecl *PrevDecl) const {
6590 if (Decl->TypeForDecl)
6591 return QualType(Decl->TypeForDecl, 0);
6592
6593 if (PrevDecl) {
6594 assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl");
6595 Decl->TypeForDecl = PrevDecl->TypeForDecl;
6596 return QualType(PrevDecl->TypeForDecl, 0);
6597 }
6598
6599 // Prefer the definition, if there is one.
6600 if (const ObjCInterfaceDecl *Def = Decl->getDefinition())
6601 Decl = Def;
6602
6603 void *Mem = Allocate(Size: sizeof(ObjCInterfaceType), Align: alignof(ObjCInterfaceType));
6604 auto *T = new (Mem) ObjCInterfaceType(Decl);
6605 Decl->TypeForDecl = T;
6606 Types.push_back(Elt: T);
6607 return QualType(T, 0);
6608}
6609
6610/// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique
6611/// TypeOfExprType AST's (since expression's are never shared). For example,
6612/// multiple declarations that refer to "typeof(x)" all contain different
6613/// DeclRefExpr's. This doesn't effect the type checker, since it operates
6614/// on canonical type's (which are always unique).
6615QualType ASTContext::getTypeOfExprType(Expr *tofExpr, TypeOfKind Kind) const {
6616 TypeOfExprType *toe;
6617 if (tofExpr->isTypeDependent()) {
6618 llvm::FoldingSetNodeID ID;
6619 DependentTypeOfExprType::Profile(ID, Context: *this, E: tofExpr,
6620 IsUnqual: Kind == TypeOfKind::Unqualified);
6621
6622 void *InsertPos = nullptr;
6623 DependentTypeOfExprType *Canon =
6624 DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos);
6625 if (Canon) {
6626 // We already have a "canonical" version of an identical, dependent
6627 // typeof(expr) type. Use that as our canonical type.
6628 toe = new (*this, alignof(TypeOfExprType)) TypeOfExprType(
6629 *this, tofExpr, Kind, QualType((TypeOfExprType *)Canon, 0));
6630 } else {
6631 // Build a new, canonical typeof(expr) type.
6632 Canon = new (*this, alignof(DependentTypeOfExprType))
6633 DependentTypeOfExprType(*this, tofExpr, Kind);
6634 DependentTypeOfExprTypes.InsertNode(N: Canon, InsertPos);
6635 toe = Canon;
6636 }
6637 } else {
6638 QualType Canonical = getCanonicalType(T: tofExpr->getType());
6639 toe = new (*this, alignof(TypeOfExprType))
6640 TypeOfExprType(*this, tofExpr, Kind, Canonical);
6641 }
6642 Types.push_back(Elt: toe);
6643 return QualType(toe, 0);
6644}
6645
6646/// getTypeOfType - Unlike many "get<Type>" functions, we don't unique
6647/// TypeOfType nodes. The only motivation to unique these nodes would be
6648/// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be
6649/// an issue. This doesn't affect the type checker, since it operates
6650/// on canonical types (which are always unique).
6651QualType ASTContext::getTypeOfType(QualType tofType, TypeOfKind Kind) const {
6652 QualType Canonical = getCanonicalType(T: tofType);
6653 auto *tot = new (*this, alignof(TypeOfType))
6654 TypeOfType(*this, tofType, Canonical, Kind);
6655 Types.push_back(Elt: tot);
6656 return QualType(tot, 0);
6657}
6658
6659/// getReferenceQualifiedType - Given an expr, will return the type for
6660/// that expression, as in [dcl.type.simple]p4 but without taking id-expressions
6661/// and class member access into account.
6662QualType ASTContext::getReferenceQualifiedType(const Expr *E) const {
6663 // C++11 [dcl.type.simple]p4:
6664 // [...]
6665 QualType T = E->getType();
6666 switch (E->getValueKind()) {
6667 // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the
6668 // type of e;
6669 case VK_XValue:
6670 return getRValueReferenceType(T);
6671 // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the
6672 // type of e;
6673 case VK_LValue:
6674 return getLValueReferenceType(T);
6675 // - otherwise, decltype(e) is the type of e.
6676 case VK_PRValue:
6677 return T;
6678 }
6679 llvm_unreachable("Unknown value kind");
6680}
6681
6682/// Unlike many "get<Type>" functions, we don't unique DecltypeType
6683/// nodes. This would never be helpful, since each such type has its own
6684/// expression, and would not give a significant memory saving, since there
6685/// is an Expr tree under each such type.
6686QualType ASTContext::getDecltypeType(Expr *E, QualType UnderlyingType) const {
6687 // C++11 [temp.type]p2:
6688 // If an expression e involves a template parameter, decltype(e) denotes a
6689 // unique dependent type. Two such decltype-specifiers refer to the same
6690 // type only if their expressions are equivalent (14.5.6.1).
6691 QualType CanonType;
6692 if (!E->isInstantiationDependent()) {
6693 CanonType = getCanonicalType(T: UnderlyingType);
6694 } else if (!UnderlyingType.isNull()) {
6695 CanonType = getDecltypeType(E, UnderlyingType: QualType());
6696 } else {
6697 llvm::FoldingSetNodeID ID;
6698 DependentDecltypeType::Profile(ID, Context: *this, E);
6699
6700 void *InsertPos = nullptr;
6701 if (DependentDecltypeType *Canon =
6702 DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos))
6703 return QualType(Canon, 0);
6704
6705 // Build a new, canonical decltype(expr) type.
6706 auto *DT =
6707 new (*this, alignof(DependentDecltypeType)) DependentDecltypeType(E);
6708 DependentDecltypeTypes.InsertNode(N: DT, InsertPos);
6709 Types.push_back(Elt: DT);
6710 return QualType(DT, 0);
6711 }
6712 auto *DT = new (*this, alignof(DecltypeType))
6713 DecltypeType(E, UnderlyingType, CanonType);
6714 Types.push_back(Elt: DT);
6715 return QualType(DT, 0);
6716}
6717
6718QualType ASTContext::getPackIndexingType(QualType Pattern, Expr *IndexExpr,
6719 bool FullySubstituted,
6720 ArrayRef<QualType> Expansions,
6721 UnsignedOrNone Index) const {
6722 QualType Canonical;
6723 if (FullySubstituted && Index) {
6724 Canonical = getCanonicalType(T: Expansions[*Index]);
6725 } else {
6726 llvm::FoldingSetNodeID ID;
6727 PackIndexingType::Profile(ID, Context: *this, Pattern: Pattern.getCanonicalType(), E: IndexExpr,
6728 FullySubstituted, Expansions);
6729 void *InsertPos = nullptr;
6730 PackIndexingType *Canon =
6731 DependentPackIndexingTypes.FindNodeOrInsertPos(ID, InsertPos);
6732 if (!Canon) {
6733 void *Mem = Allocate(
6734 Size: PackIndexingType::totalSizeToAlloc<QualType>(Counts: Expansions.size()),
6735 Align: TypeAlignment);
6736 Canon =
6737 new (Mem) PackIndexingType(QualType(), Pattern.getCanonicalType(),
6738 IndexExpr, FullySubstituted, Expansions);
6739 DependentPackIndexingTypes.InsertNode(N: Canon, InsertPos);
6740 }
6741 Canonical = QualType(Canon, 0);
6742 }
6743
6744 void *Mem =
6745 Allocate(Size: PackIndexingType::totalSizeToAlloc<QualType>(Counts: Expansions.size()),
6746 Align: TypeAlignment);
6747 auto *T = new (Mem) PackIndexingType(Canonical, Pattern, IndexExpr,
6748 FullySubstituted, Expansions);
6749 Types.push_back(Elt: T);
6750 return QualType(T, 0);
6751}
6752
6753/// getUnaryTransformationType - We don't unique these, since the memory
6754/// savings are minimal and these are rare.
6755QualType
6756ASTContext::getUnaryTransformType(QualType BaseType, QualType UnderlyingType,
6757 UnaryTransformType::UTTKind Kind) const {
6758
6759 llvm::FoldingSetNodeID ID;
6760 UnaryTransformType::Profile(ID, BaseType, UnderlyingType, UKind: Kind);
6761
6762 void *InsertPos = nullptr;
6763 if (UnaryTransformType *UT =
6764 UnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos))
6765 return QualType(UT, 0);
6766
6767 QualType CanonType;
6768 if (!BaseType->isDependentType()) {
6769 CanonType = UnderlyingType.getCanonicalType();
6770 } else {
6771 assert(UnderlyingType.isNull() || BaseType == UnderlyingType);
6772 UnderlyingType = QualType();
6773 if (QualType CanonBase = BaseType.getCanonicalType();
6774 BaseType != CanonBase) {
6775 CanonType = getUnaryTransformType(BaseType: CanonBase, UnderlyingType: QualType(), Kind);
6776 assert(CanonType.isCanonical());
6777
6778 // Find the insertion position again.
6779 [[maybe_unused]] UnaryTransformType *UT =
6780 UnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos);
6781 assert(!UT && "broken canonicalization");
6782 }
6783 }
6784
6785 auto *UT = new (*this, alignof(UnaryTransformType))
6786 UnaryTransformType(BaseType, UnderlyingType, Kind, CanonType);
6787 UnaryTransformTypes.InsertNode(N: UT, InsertPos);
6788 Types.push_back(Elt: UT);
6789 return QualType(UT, 0);
6790}
6791
6792QualType ASTContext::getAutoTypeInternal(
6793 QualType DeducedType, AutoTypeKeyword Keyword, bool IsDependent,
6794 bool IsPack, TemplateDecl *TypeConstraintConcept,
6795 ArrayRef<TemplateArgument> TypeConstraintArgs, bool IsCanon) const {
6796 if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto &&
6797 !TypeConstraintConcept && !IsDependent)
6798 return getAutoDeductType();
6799
6800 // Look in the folding set for an existing type.
6801 llvm::FoldingSetNodeID ID;
6802 bool IsDeducedDependent =
6803 isa_and_nonnull<TemplateTemplateParmDecl>(Val: TypeConstraintConcept) ||
6804 (!DeducedType.isNull() && DeducedType->isDependentType());
6805 AutoType::Profile(ID, Context: *this, Deduced: DeducedType, Keyword,
6806 IsDependent: IsDependent || IsDeducedDependent, CD: TypeConstraintConcept,
6807 Arguments: TypeConstraintArgs);
6808 if (auto const AT_iter = AutoTypes.find(Val: ID); AT_iter != AutoTypes.end())
6809 return QualType(AT_iter->getSecond(), 0);
6810
6811 QualType Canon;
6812 if (!IsCanon) {
6813 if (!DeducedType.isNull()) {
6814 Canon = DeducedType.getCanonicalType();
6815 } else if (TypeConstraintConcept) {
6816 bool AnyNonCanonArgs = false;
6817 auto *CanonicalConcept =
6818 cast<TemplateDecl>(Val: TypeConstraintConcept->getCanonicalDecl());
6819 auto CanonicalConceptArgs = ::getCanonicalTemplateArguments(
6820 C: *this, Args: TypeConstraintArgs, AnyNonCanonArgs);
6821 if (CanonicalConcept != TypeConstraintConcept || AnyNonCanonArgs) {
6822 Canon = getAutoTypeInternal(DeducedType: QualType(), Keyword, IsDependent, IsPack,
6823 TypeConstraintConcept: CanonicalConcept, TypeConstraintArgs: CanonicalConceptArgs,
6824 /*IsCanon=*/true);
6825 }
6826 }
6827 }
6828
6829 void *Mem = Allocate(Size: sizeof(AutoType) +
6830 sizeof(TemplateArgument) * TypeConstraintArgs.size(),
6831 Align: alignof(AutoType));
6832 auto *AT = new (Mem) AutoType(
6833 DeducedType, Keyword,
6834 (IsDependent ? TypeDependence::DependentInstantiation
6835 : TypeDependence::None) |
6836 (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None),
6837 Canon, TypeConstraintConcept, TypeConstraintArgs);
6838#ifndef NDEBUG
6839 llvm::FoldingSetNodeID InsertedID;
6840 AT->Profile(InsertedID, *this);
6841 assert(InsertedID == ID && "ID does not match");
6842#endif
6843 Types.push_back(Elt: AT);
6844 AutoTypes.try_emplace(Key: ID, Args&: AT);
6845 return QualType(AT, 0);
6846}
6847
6848/// getAutoType - Return the uniqued reference to the 'auto' type which has been
6849/// deduced to the given type, or to the canonical undeduced 'auto' type, or the
6850/// canonical deduced-but-dependent 'auto' type.
6851QualType
6852ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword,
6853 bool IsDependent, bool IsPack,
6854 TemplateDecl *TypeConstraintConcept,
6855 ArrayRef<TemplateArgument> TypeConstraintArgs) const {
6856 assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack");
6857 assert((!IsDependent || DeducedType.isNull()) &&
6858 "A dependent auto should be undeduced");
6859 return getAutoTypeInternal(DeducedType, Keyword, IsDependent, IsPack,
6860 TypeConstraintConcept, TypeConstraintArgs);
6861}
6862
6863QualType ASTContext::getUnconstrainedType(QualType T) const {
6864 QualType CanonT = T.getNonPackExpansionType().getCanonicalType();
6865
6866 // Remove a type-constraint from a top-level auto or decltype(auto).
6867 if (auto *AT = CanonT->getAs<AutoType>()) {
6868 if (!AT->isConstrained())
6869 return T;
6870 return getQualifiedType(T: getAutoType(DeducedType: QualType(), Keyword: AT->getKeyword(),
6871 IsDependent: AT->isDependentType(),
6872 IsPack: AT->containsUnexpandedParameterPack()),
6873 Qs: T.getQualifiers());
6874 }
6875
6876 // FIXME: We only support constrained auto at the top level in the type of a
6877 // non-type template parameter at the moment. Once we lift that restriction,
6878 // we'll need to recursively build types containing auto here.
6879 assert(!CanonT->getContainedAutoType() ||
6880 !CanonT->getContainedAutoType()->isConstrained());
6881 return T;
6882}
6883
6884QualType ASTContext::getDeducedTemplateSpecializationTypeInternal(
6885 ElaboratedTypeKeyword Keyword, TemplateName Template, QualType DeducedType,
6886 bool IsDependent, QualType Canon) const {
6887 // Look in the folding set for an existing type.
6888 void *InsertPos = nullptr;
6889 llvm::FoldingSetNodeID ID;
6890 DeducedTemplateSpecializationType::Profile(ID, Keyword, Template, Deduced: DeducedType,
6891 IsDependent);
6892 if (DeducedTemplateSpecializationType *DTST =
6893 DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos))
6894 return QualType(DTST, 0);
6895
6896 auto *DTST = new (*this, alignof(DeducedTemplateSpecializationType))
6897 DeducedTemplateSpecializationType(Keyword, Template, DeducedType,
6898 IsDependent, Canon);
6899
6900#ifndef NDEBUG
6901 llvm::FoldingSetNodeID TempID;
6902 DTST->Profile(TempID);
6903 assert(ID == TempID && "ID does not match");
6904#endif
6905 Types.push_back(Elt: DTST);
6906 DeducedTemplateSpecializationTypes.InsertNode(N: DTST, InsertPos);
6907 return QualType(DTST, 0);
6908}
6909
6910/// Return the uniqued reference to the deduced template specialization type
6911/// which has been deduced to the given type, or to the canonical undeduced
6912/// such type, or the canonical deduced-but-dependent such type.
6913QualType ASTContext::getDeducedTemplateSpecializationType(
6914 ElaboratedTypeKeyword Keyword, TemplateName Template, QualType DeducedType,
6915 bool IsDependent) const {
6916 // FIXME: This could save an extra hash table lookup if it handled all the
6917 // parameters already being canonical.
6918 // FIXME: Can this be formed from a DependentTemplateName, such that the
6919 // keyword should be part of the canonical type?
6920 QualType Canon =
6921 DeducedType.isNull()
6922 ? getDeducedTemplateSpecializationTypeInternal(
6923 Keyword: ElaboratedTypeKeyword::None, Template: getCanonicalTemplateName(Name: Template),
6924 DeducedType: QualType(), IsDependent, Canon: QualType())
6925 : DeducedType.getCanonicalType();
6926 return getDeducedTemplateSpecializationTypeInternal(
6927 Keyword, Template, DeducedType, IsDependent, Canon);
6928}
6929
6930/// getAtomicType - Return the uniqued reference to the atomic type for
6931/// the given value type.
6932QualType ASTContext::getAtomicType(QualType T) const {
6933 // Unique pointers, to guarantee there is only one pointer of a particular
6934 // structure.
6935 llvm::FoldingSetNodeID ID;
6936 AtomicType::Profile(ID, T);
6937
6938 void *InsertPos = nullptr;
6939 if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos))
6940 return QualType(AT, 0);
6941
6942 // If the atomic value type isn't canonical, this won't be a canonical type
6943 // either, so fill in the canonical type field.
6944 QualType Canonical;
6945 if (!T.isCanonical()) {
6946 Canonical = getAtomicType(T: getCanonicalType(T));
6947
6948 // Get the new insert position for the node we care about.
6949 AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos);
6950 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
6951 }
6952 auto *New = new (*this, alignof(AtomicType)) AtomicType(T, Canonical);
6953 Types.push_back(Elt: New);
6954 AtomicTypes.InsertNode(N: New, InsertPos);
6955 return QualType(New, 0);
6956}
6957
6958/// getAutoDeductType - Get type pattern for deducing against 'auto'.
6959QualType ASTContext::getAutoDeductType() const {
6960 if (AutoDeductTy.isNull())
6961 AutoDeductTy = QualType(new (*this, alignof(AutoType))
6962 AutoType(QualType(), AutoTypeKeyword::Auto,
6963 TypeDependence::None, QualType(),
6964 /*concept*/ nullptr, /*args*/ {}),
6965 0);
6966 return AutoDeductTy;
6967}
6968
6969/// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'.
6970QualType ASTContext::getAutoRRefDeductType() const {
6971 if (AutoRRefDeductTy.isNull())
6972 AutoRRefDeductTy = getRValueReferenceType(T: getAutoDeductType());
6973 assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern");
6974 return AutoRRefDeductTy;
6975}
6976
6977/// getSizeType - Return the unique type for "size_t" (C99 7.17), the result
6978/// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and
6979/// needs to agree with the definition in <stddef.h>.
6980QualType ASTContext::getSizeType() const {
6981 return getPredefinedSugarType(KD: PredefinedSugarType::Kind::SizeT);
6982}
6983
6984CanQualType ASTContext::getCanonicalSizeType() const {
6985 return getFromTargetType(Type: Target->getSizeType());
6986}
6987
6988/// Return the unique signed counterpart of the integer type
6989/// corresponding to size_t.
6990QualType ASTContext::getSignedSizeType() const {
6991 return getPredefinedSugarType(KD: PredefinedSugarType::Kind::SignedSizeT);
6992}
6993
6994/// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17)
6995/// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
6996QualType ASTContext::getPointerDiffType() const {
6997 return getPredefinedSugarType(KD: PredefinedSugarType::Kind::PtrdiffT);
6998}
6999
7000/// Return the unique unsigned counterpart of "ptrdiff_t"
7001/// integer type. The standard (C11 7.21.6.1p7) refers to this type
7002/// in the definition of %tu format specifier.
7003QualType ASTContext::getUnsignedPointerDiffType() const {
7004 return getFromTargetType(Type: Target->getUnsignedPtrDiffType(AddrSpace: LangAS::Default));
7005}
7006
7007/// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5).
7008CanQualType ASTContext::getIntMaxType() const {
7009 return getFromTargetType(Type: Target->getIntMaxType());
7010}
7011
7012/// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5).
7013CanQualType ASTContext::getUIntMaxType() const {
7014 return getFromTargetType(Type: Target->getUIntMaxType());
7015}
7016
7017/// getSignedWCharType - Return the type of "signed wchar_t".
7018/// Used when in C++, as a GCC extension.
7019QualType ASTContext::getSignedWCharType() const {
7020 // FIXME: derive from "Target" ?
7021 return WCharTy;
7022}
7023
7024/// getUnsignedWCharType - Return the type of "unsigned wchar_t".
7025/// Used when in C++, as a GCC extension.
7026QualType ASTContext::getUnsignedWCharType() const {
7027 // FIXME: derive from "Target" ?
7028 return UnsignedIntTy;
7029}
7030
7031QualType ASTContext::getIntPtrType() const {
7032 return getFromTargetType(Type: Target->getIntPtrType());
7033}
7034
7035QualType ASTContext::getUIntPtrType() const {
7036 return getCorrespondingUnsignedType(T: getIntPtrType());
7037}
7038
7039/// Return the unique type for "pid_t" defined in
7040/// <sys/types.h>. We need this to compute the correct type for vfork().
7041QualType ASTContext::getProcessIDType() const {
7042 return getFromTargetType(Type: Target->getProcessIDType());
7043}
7044
7045//===----------------------------------------------------------------------===//
7046// Type Operators
7047//===----------------------------------------------------------------------===//
7048
7049CanQualType ASTContext::getCanonicalParamType(QualType T) const {
7050 // Push qualifiers into arrays, and then discard any remaining
7051 // qualifiers.
7052 T = getCanonicalType(T);
7053 T = getVariableArrayDecayedType(type: T);
7054 const Type *Ty = T.getTypePtr();
7055 QualType Result;
7056 if (getLangOpts().HLSL && isa<ConstantArrayType>(Val: Ty)) {
7057 Result = getArrayParameterType(Ty: QualType(Ty, 0));
7058 } else if (isa<ArrayType>(Val: Ty)) {
7059 Result = getArrayDecayedType(T: QualType(Ty,0));
7060 } else if (isa<FunctionType>(Val: Ty)) {
7061 Result = getPointerType(T: QualType(Ty, 0));
7062 } else {
7063 Result = QualType(Ty, 0);
7064 }
7065
7066 return CanQualType::CreateUnsafe(Other: Result);
7067}
7068
7069QualType ASTContext::getUnqualifiedArrayType(QualType type,
7070 Qualifiers &quals) const {
7071 SplitQualType splitType = type.getSplitUnqualifiedType();
7072
7073 // FIXME: getSplitUnqualifiedType() actually walks all the way to
7074 // the unqualified desugared type and then drops it on the floor.
7075 // We then have to strip that sugar back off with
7076 // getUnqualifiedDesugaredType(), which is silly.
7077 const auto *AT =
7078 dyn_cast<ArrayType>(Val: splitType.Ty->getUnqualifiedDesugaredType());
7079
7080 // If we don't have an array, just use the results in splitType.
7081 if (!AT) {
7082 quals = splitType.Quals;
7083 return QualType(splitType.Ty, 0);
7084 }
7085
7086 // Otherwise, recurse on the array's element type.
7087 QualType elementType = AT->getElementType();
7088 QualType unqualElementType = getUnqualifiedArrayType(type: elementType, quals);
7089
7090 // If that didn't change the element type, AT has no qualifiers, so we
7091 // can just use the results in splitType.
7092 if (elementType == unqualElementType) {
7093 assert(quals.empty()); // from the recursive call
7094 quals = splitType.Quals;
7095 return QualType(splitType.Ty, 0);
7096 }
7097
7098 // Otherwise, add in the qualifiers from the outermost type, then
7099 // build the type back up.
7100 quals.addConsistentQualifiers(qs: splitType.Quals);
7101
7102 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: AT)) {
7103 return getConstantArrayType(EltTy: unqualElementType, ArySizeIn: CAT->getSize(),
7104 SizeExpr: CAT->getSizeExpr(), ASM: CAT->getSizeModifier(), IndexTypeQuals: 0);
7105 }
7106
7107 if (const auto *IAT = dyn_cast<IncompleteArrayType>(Val: AT)) {
7108 return getIncompleteArrayType(elementType: unqualElementType, ASM: IAT->getSizeModifier(), elementTypeQuals: 0);
7109 }
7110
7111 if (const auto *VAT = dyn_cast<VariableArrayType>(Val: AT)) {
7112 return getVariableArrayType(EltTy: unqualElementType, NumElts: VAT->getSizeExpr(),
7113 ASM: VAT->getSizeModifier(),
7114 IndexTypeQuals: VAT->getIndexTypeCVRQualifiers());
7115 }
7116
7117 const auto *DSAT = cast<DependentSizedArrayType>(Val: AT);
7118 return getDependentSizedArrayType(elementType: unqualElementType, numElements: DSAT->getSizeExpr(),
7119 ASM: DSAT->getSizeModifier(), elementTypeQuals: 0);
7120}
7121
7122/// Attempt to unwrap two types that may both be array types with the same bound
7123/// (or both be array types of unknown bound) for the purpose of comparing the
7124/// cv-decomposition of two types per C++ [conv.qual].
7125///
7126/// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in
7127/// C++20 [conv.qual], if permitted by the current language mode.
7128void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2,
7129 bool AllowPiMismatch) const {
7130 while (true) {
7131 auto *AT1 = getAsArrayType(T: T1);
7132 if (!AT1)
7133 return;
7134
7135 auto *AT2 = getAsArrayType(T: T2);
7136 if (!AT2)
7137 return;
7138
7139 // If we don't have two array types with the same constant bound nor two
7140 // incomplete array types, we've unwrapped everything we can.
7141 // C++20 also permits one type to be a constant array type and the other
7142 // to be an incomplete array type.
7143 // FIXME: Consider also unwrapping array of unknown bound and VLA.
7144 if (auto *CAT1 = dyn_cast<ConstantArrayType>(Val: AT1)) {
7145 auto *CAT2 = dyn_cast<ConstantArrayType>(Val: AT2);
7146 if (!((CAT2 && CAT1->getSize() == CAT2->getSize()) ||
7147 (AllowPiMismatch && getLangOpts().CPlusPlus20 &&
7148 isa<IncompleteArrayType>(Val: AT2))))
7149 return;
7150 } else if (isa<IncompleteArrayType>(Val: AT1)) {
7151 if (!(isa<IncompleteArrayType>(Val: AT2) ||
7152 (AllowPiMismatch && getLangOpts().CPlusPlus20 &&
7153 isa<ConstantArrayType>(Val: AT2))))
7154 return;
7155 } else {
7156 return;
7157 }
7158
7159 T1 = AT1->getElementType();
7160 T2 = AT2->getElementType();
7161 }
7162}
7163
7164/// Attempt to unwrap two types that may be similar (C++ [conv.qual]).
7165///
7166/// If T1 and T2 are both pointer types of the same kind, or both array types
7167/// with the same bound, unwraps layers from T1 and T2 until a pointer type is
7168/// unwrapped. Top-level qualifiers on T1 and T2 are ignored.
7169///
7170/// This function will typically be called in a loop that successively
7171/// "unwraps" pointer and pointer-to-member types to compare them at each
7172/// level.
7173///
7174/// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in
7175/// C++20 [conv.qual], if permitted by the current language mode.
7176///
7177/// \return \c true if a pointer type was unwrapped, \c false if we reached a
7178/// pair of types that can't be unwrapped further.
7179bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2,
7180 bool AllowPiMismatch) const {
7181 UnwrapSimilarArrayTypes(T1, T2, AllowPiMismatch);
7182
7183 const auto *T1PtrType = T1->getAs<PointerType>();
7184 const auto *T2PtrType = T2->getAs<PointerType>();
7185 if (T1PtrType && T2PtrType) {
7186 T1 = T1PtrType->getPointeeType();
7187 T2 = T2PtrType->getPointeeType();
7188 return true;
7189 }
7190
7191 if (const auto *T1MPType = T1->getAs<MemberPointerType>(),
7192 *T2MPType = T2->getAs<MemberPointerType>();
7193 T1MPType && T2MPType) {
7194 if (auto *RD1 = T1MPType->getMostRecentCXXRecordDecl(),
7195 *RD2 = T2MPType->getMostRecentCXXRecordDecl();
7196 RD1 != RD2 && RD1->getCanonicalDecl() != RD2->getCanonicalDecl())
7197 return false;
7198 if (T1MPType->getQualifier().getCanonical() !=
7199 T2MPType->getQualifier().getCanonical())
7200 return false;
7201 T1 = T1MPType->getPointeeType();
7202 T2 = T2MPType->getPointeeType();
7203 return true;
7204 }
7205
7206 if (getLangOpts().ObjC) {
7207 const auto *T1OPType = T1->getAs<ObjCObjectPointerType>();
7208 const auto *T2OPType = T2->getAs<ObjCObjectPointerType>();
7209 if (T1OPType && T2OPType) {
7210 T1 = T1OPType->getPointeeType();
7211 T2 = T2OPType->getPointeeType();
7212 return true;
7213 }
7214 }
7215
7216 // FIXME: Block pointers, too?
7217
7218 return false;
7219}
7220
7221bool ASTContext::hasSimilarType(QualType T1, QualType T2) const {
7222 while (true) {
7223 Qualifiers Quals;
7224 T1 = getUnqualifiedArrayType(type: T1, quals&: Quals);
7225 T2 = getUnqualifiedArrayType(type: T2, quals&: Quals);
7226 if (hasSameType(T1, T2))
7227 return true;
7228 if (!UnwrapSimilarTypes(T1, T2))
7229 return false;
7230 }
7231}
7232
7233bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) {
7234 while (true) {
7235 Qualifiers Quals1, Quals2;
7236 T1 = getUnqualifiedArrayType(type: T1, quals&: Quals1);
7237 T2 = getUnqualifiedArrayType(type: T2, quals&: Quals2);
7238
7239 Quals1.removeCVRQualifiers();
7240 Quals2.removeCVRQualifiers();
7241 if (Quals1 != Quals2)
7242 return false;
7243
7244 if (hasSameType(T1, T2))
7245 return true;
7246
7247 if (!UnwrapSimilarTypes(T1, T2, /*AllowPiMismatch*/ false))
7248 return false;
7249 }
7250}
7251
7252DeclarationNameInfo
7253ASTContext::getNameForTemplate(TemplateName Name,
7254 SourceLocation NameLoc) const {
7255 switch (Name.getKind()) {
7256 case TemplateName::QualifiedTemplate:
7257 case TemplateName::Template:
7258 // DNInfo work in progress: CHECKME: what about DNLoc?
7259 return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(),
7260 NameLoc);
7261
7262 case TemplateName::OverloadedTemplate: {
7263 OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate();
7264 // DNInfo work in progress: CHECKME: what about DNLoc?
7265 return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc);
7266 }
7267
7268 case TemplateName::AssumedTemplate: {
7269 AssumedTemplateStorage *Storage = Name.getAsAssumedTemplateName();
7270 return DeclarationNameInfo(Storage->getDeclName(), NameLoc);
7271 }
7272
7273 case TemplateName::DependentTemplate: {
7274 DependentTemplateName *DTN = Name.getAsDependentTemplateName();
7275 IdentifierOrOverloadedOperator TN = DTN->getName();
7276 DeclarationName DName;
7277 if (const IdentifierInfo *II = TN.getIdentifier()) {
7278 DName = DeclarationNames.getIdentifier(ID: II);
7279 return DeclarationNameInfo(DName, NameLoc);
7280 } else {
7281 DName = DeclarationNames.getCXXOperatorName(Op: TN.getOperator());
7282 // DNInfo work in progress: FIXME: source locations?
7283 DeclarationNameLoc DNLoc =
7284 DeclarationNameLoc::makeCXXOperatorNameLoc(Range: SourceRange());
7285 return DeclarationNameInfo(DName, NameLoc, DNLoc);
7286 }
7287 }
7288
7289 case TemplateName::SubstTemplateTemplateParm: {
7290 SubstTemplateTemplateParmStorage *subst
7291 = Name.getAsSubstTemplateTemplateParm();
7292 return DeclarationNameInfo(subst->getParameter()->getDeclName(),
7293 NameLoc);
7294 }
7295
7296 case TemplateName::SubstTemplateTemplateParmPack: {
7297 SubstTemplateTemplateParmPackStorage *subst
7298 = Name.getAsSubstTemplateTemplateParmPack();
7299 return DeclarationNameInfo(subst->getParameterPack()->getDeclName(),
7300 NameLoc);
7301 }
7302 case TemplateName::UsingTemplate:
7303 return DeclarationNameInfo(Name.getAsUsingShadowDecl()->getDeclName(),
7304 NameLoc);
7305 case TemplateName::DeducedTemplate: {
7306 DeducedTemplateStorage *DTS = Name.getAsDeducedTemplateName();
7307 return getNameForTemplate(Name: DTS->getUnderlying(), NameLoc);
7308 }
7309 }
7310
7311 llvm_unreachable("bad template name kind!");
7312}
7313
7314static const TemplateArgument *
7315getDefaultTemplateArgumentOrNone(const NamedDecl *P) {
7316 auto handleParam = [](auto *TP) -> const TemplateArgument * {
7317 if (!TP->hasDefaultArgument())
7318 return nullptr;
7319 return &TP->getDefaultArgument().getArgument();
7320 };
7321 switch (P->getKind()) {
7322 case NamedDecl::TemplateTypeParm:
7323 return handleParam(cast<TemplateTypeParmDecl>(Val: P));
7324 case NamedDecl::NonTypeTemplateParm:
7325 return handleParam(cast<NonTypeTemplateParmDecl>(Val: P));
7326 case NamedDecl::TemplateTemplateParm:
7327 return handleParam(cast<TemplateTemplateParmDecl>(Val: P));
7328 default:
7329 llvm_unreachable("Unexpected template parameter kind");
7330 }
7331}
7332
7333TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name,
7334 bool IgnoreDeduced) const {
7335 while (std::optional<TemplateName> UnderlyingOrNone =
7336 Name.desugar(IgnoreDeduced))
7337 Name = *UnderlyingOrNone;
7338
7339 switch (Name.getKind()) {
7340 case TemplateName::Template: {
7341 TemplateDecl *Template = Name.getAsTemplateDecl();
7342 if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Val: Template))
7343 Template = getCanonicalTemplateTemplateParmDecl(TTP);
7344
7345 // The canonical template name is the canonical template declaration.
7346 return TemplateName(cast<TemplateDecl>(Val: Template->getCanonicalDecl()));
7347 }
7348
7349 case TemplateName::OverloadedTemplate:
7350 case TemplateName::AssumedTemplate:
7351 llvm_unreachable("cannot canonicalize unresolved template");
7352
7353 case TemplateName::DependentTemplate: {
7354 DependentTemplateName *DTN = Name.getAsDependentTemplateName();
7355 assert(DTN && "Non-dependent template names must refer to template decls.");
7356 NestedNameSpecifier Qualifier = DTN->getQualifier();
7357 NestedNameSpecifier CanonQualifier = Qualifier.getCanonical();
7358 if (Qualifier != CanonQualifier || !DTN->hasTemplateKeyword())
7359 return getDependentTemplateName(Name: {CanonQualifier, DTN->getName(),
7360 /*HasTemplateKeyword=*/true});
7361 return Name;
7362 }
7363
7364 case TemplateName::SubstTemplateTemplateParmPack: {
7365 SubstTemplateTemplateParmPackStorage *subst =
7366 Name.getAsSubstTemplateTemplateParmPack();
7367 TemplateArgument canonArgPack =
7368 getCanonicalTemplateArgument(Arg: subst->getArgumentPack());
7369 return getSubstTemplateTemplateParmPack(
7370 ArgPack: canonArgPack, AssociatedDecl: subst->getAssociatedDecl()->getCanonicalDecl(),
7371 Index: subst->getIndex(), Final: subst->getFinal());
7372 }
7373 case TemplateName::DeducedTemplate: {
7374 assert(IgnoreDeduced == false);
7375 DeducedTemplateStorage *DTS = Name.getAsDeducedTemplateName();
7376 DefaultArguments DefArgs = DTS->getDefaultArguments();
7377 TemplateName Underlying = DTS->getUnderlying();
7378
7379 TemplateName CanonUnderlying =
7380 getCanonicalTemplateName(Name: Underlying, /*IgnoreDeduced=*/true);
7381 bool NonCanonical = CanonUnderlying != Underlying;
7382 auto CanonArgs =
7383 getCanonicalTemplateArguments(C: *this, Args: DefArgs.Args, AnyNonCanonArgs&: NonCanonical);
7384
7385 ArrayRef<NamedDecl *> Params =
7386 CanonUnderlying.getAsTemplateDecl()->getTemplateParameters()->asArray();
7387 assert(CanonArgs.size() <= Params.size());
7388 // A deduced template name which deduces the same default arguments already
7389 // declared in the underlying template is the same template as the
7390 // underlying template. We need need to note any arguments which differ from
7391 // the corresponding declaration. If any argument differs, we must build a
7392 // deduced template name.
7393 for (int I = CanonArgs.size() - 1; I >= 0; --I) {
7394 const TemplateArgument *A = getDefaultTemplateArgumentOrNone(P: Params[I]);
7395 if (!A)
7396 break;
7397 auto CanonParamDefArg = getCanonicalTemplateArgument(Arg: *A);
7398 TemplateArgument &CanonDefArg = CanonArgs[I];
7399 if (CanonDefArg.structurallyEquals(Other: CanonParamDefArg))
7400 continue;
7401 // Keep popping from the back any deault arguments which are the same.
7402 if (I == int(CanonArgs.size() - 1))
7403 CanonArgs.pop_back();
7404 NonCanonical = true;
7405 }
7406 return NonCanonical ? getDeducedTemplateName(
7407 Underlying: CanonUnderlying,
7408 /*DefaultArgs=*/{.StartPos: DefArgs.StartPos, .Args: CanonArgs})
7409 : Name;
7410 }
7411 case TemplateName::UsingTemplate:
7412 case TemplateName::QualifiedTemplate:
7413 case TemplateName::SubstTemplateTemplateParm:
7414 llvm_unreachable("always sugar node");
7415 }
7416
7417 llvm_unreachable("bad template name!");
7418}
7419
7420bool ASTContext::hasSameTemplateName(const TemplateName &X,
7421 const TemplateName &Y,
7422 bool IgnoreDeduced) const {
7423 return getCanonicalTemplateName(Name: X, IgnoreDeduced) ==
7424 getCanonicalTemplateName(Name: Y, IgnoreDeduced);
7425}
7426
7427bool ASTContext::isSameAssociatedConstraint(
7428 const AssociatedConstraint &ACX, const AssociatedConstraint &ACY) const {
7429 if (ACX.ArgPackSubstIndex != ACY.ArgPackSubstIndex)
7430 return false;
7431 if (!isSameConstraintExpr(XCE: ACX.ConstraintExpr, YCE: ACY.ConstraintExpr))
7432 return false;
7433 return true;
7434}
7435
7436bool ASTContext::isSameConstraintExpr(const Expr *XCE, const Expr *YCE) const {
7437 if (!XCE != !YCE)
7438 return false;
7439
7440 if (!XCE)
7441 return true;
7442
7443 llvm::FoldingSetNodeID XCEID, YCEID;
7444 XCE->Profile(ID&: XCEID, Context: *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true);
7445 YCE->Profile(ID&: YCEID, Context: *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true);
7446 return XCEID == YCEID;
7447}
7448
7449bool ASTContext::isSameTypeConstraint(const TypeConstraint *XTC,
7450 const TypeConstraint *YTC) const {
7451 if (!XTC != !YTC)
7452 return false;
7453
7454 if (!XTC)
7455 return true;
7456
7457 auto *NCX = XTC->getNamedConcept();
7458 auto *NCY = YTC->getNamedConcept();
7459 if (!NCX || !NCY || !isSameEntity(X: NCX, Y: NCY))
7460 return false;
7461 if (XTC->getConceptReference()->hasExplicitTemplateArgs() !=
7462 YTC->getConceptReference()->hasExplicitTemplateArgs())
7463 return false;
7464 if (XTC->getConceptReference()->hasExplicitTemplateArgs())
7465 if (XTC->getConceptReference()
7466 ->getTemplateArgsAsWritten()
7467 ->NumTemplateArgs !=
7468 YTC->getConceptReference()->getTemplateArgsAsWritten()->NumTemplateArgs)
7469 return false;
7470
7471 // Compare slowly by profiling.
7472 //
7473 // We couldn't compare the profiling result for the template
7474 // args here. Consider the following example in different modules:
7475 //
7476 // template <__integer_like _Tp, C<_Tp> Sentinel>
7477 // constexpr _Tp operator()(_Tp &&__t, Sentinel &&last) const {
7478 // return __t;
7479 // }
7480 //
7481 // When we compare the profiling result for `C<_Tp>` in different
7482 // modules, it will compare the type of `_Tp` in different modules.
7483 // However, the type of `_Tp` in different modules refer to different
7484 // types here naturally. So we couldn't compare the profiling result
7485 // for the template args directly.
7486 return isSameConstraintExpr(XCE: XTC->getImmediatelyDeclaredConstraint(),
7487 YCE: YTC->getImmediatelyDeclaredConstraint());
7488}
7489
7490bool ASTContext::isSameTemplateParameter(const NamedDecl *X,
7491 const NamedDecl *Y) const {
7492 if (X->getKind() != Y->getKind())
7493 return false;
7494
7495 if (auto *TX = dyn_cast<TemplateTypeParmDecl>(Val: X)) {
7496 auto *TY = cast<TemplateTypeParmDecl>(Val: Y);
7497 if (TX->isParameterPack() != TY->isParameterPack())
7498 return false;
7499 if (TX->hasTypeConstraint() != TY->hasTypeConstraint())
7500 return false;
7501 return isSameTypeConstraint(XTC: TX->getTypeConstraint(),
7502 YTC: TY->getTypeConstraint());
7503 }
7504
7505 if (auto *TX = dyn_cast<NonTypeTemplateParmDecl>(Val: X)) {
7506 auto *TY = cast<NonTypeTemplateParmDecl>(Val: Y);
7507 return TX->isParameterPack() == TY->isParameterPack() &&
7508 TX->getASTContext().hasSameType(T1: TX->getType(), T2: TY->getType()) &&
7509 isSameConstraintExpr(XCE: TX->getPlaceholderTypeConstraint(),
7510 YCE: TY->getPlaceholderTypeConstraint());
7511 }
7512
7513 auto *TX = cast<TemplateTemplateParmDecl>(Val: X);
7514 auto *TY = cast<TemplateTemplateParmDecl>(Val: Y);
7515 return TX->isParameterPack() == TY->isParameterPack() &&
7516 isSameTemplateParameterList(X: TX->getTemplateParameters(),
7517 Y: TY->getTemplateParameters());
7518}
7519
7520bool ASTContext::isSameTemplateParameterList(
7521 const TemplateParameterList *X, const TemplateParameterList *Y) const {
7522 if (X->size() != Y->size())
7523 return false;
7524
7525 for (unsigned I = 0, N = X->size(); I != N; ++I)
7526 if (!isSameTemplateParameter(X: X->getParam(Idx: I), Y: Y->getParam(Idx: I)))
7527 return false;
7528
7529 return isSameConstraintExpr(XCE: X->getRequiresClause(), YCE: Y->getRequiresClause());
7530}
7531
7532bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl *X,
7533 const NamedDecl *Y) const {
7534 // If the type parameter isn't the same already, we don't need to check the
7535 // default argument further.
7536 if (!isSameTemplateParameter(X, Y))
7537 return false;
7538
7539 if (auto *TTPX = dyn_cast<TemplateTypeParmDecl>(Val: X)) {
7540 auto *TTPY = cast<TemplateTypeParmDecl>(Val: Y);
7541 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument())
7542 return false;
7543
7544 return hasSameType(T1: TTPX->getDefaultArgument().getArgument().getAsType(),
7545 T2: TTPY->getDefaultArgument().getArgument().getAsType());
7546 }
7547
7548 if (auto *NTTPX = dyn_cast<NonTypeTemplateParmDecl>(Val: X)) {
7549 auto *NTTPY = cast<NonTypeTemplateParmDecl>(Val: Y);
7550 if (!NTTPX->hasDefaultArgument() || !NTTPY->hasDefaultArgument())
7551 return false;
7552
7553 Expr *DefaultArgumentX =
7554 NTTPX->getDefaultArgument().getArgument().getAsExpr()->IgnoreImpCasts();
7555 Expr *DefaultArgumentY =
7556 NTTPY->getDefaultArgument().getArgument().getAsExpr()->IgnoreImpCasts();
7557 llvm::FoldingSetNodeID XID, YID;
7558 DefaultArgumentX->Profile(ID&: XID, Context: *this, /*Canonical=*/true);
7559 DefaultArgumentY->Profile(ID&: YID, Context: *this, /*Canonical=*/true);
7560 return XID == YID;
7561 }
7562
7563 auto *TTPX = cast<TemplateTemplateParmDecl>(Val: X);
7564 auto *TTPY = cast<TemplateTemplateParmDecl>(Val: Y);
7565
7566 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument())
7567 return false;
7568
7569 const TemplateArgument &TAX = TTPX->getDefaultArgument().getArgument();
7570 const TemplateArgument &TAY = TTPY->getDefaultArgument().getArgument();
7571 return hasSameTemplateName(X: TAX.getAsTemplate(), Y: TAY.getAsTemplate());
7572}
7573
7574static bool isSameQualifier(const NestedNameSpecifier X,
7575 const NestedNameSpecifier Y) {
7576 if (X == Y)
7577 return true;
7578 if (!X || !Y)
7579 return false;
7580
7581 auto Kind = X.getKind();
7582 if (Kind != Y.getKind())
7583 return false;
7584
7585 // FIXME: For namespaces and types, we're permitted to check that the entity
7586 // is named via the same tokens. We should probably do so.
7587 switch (Kind) {
7588 case NestedNameSpecifier::Kind::Namespace: {
7589 auto [NamespaceX, PrefixX] = X.getAsNamespaceAndPrefix();
7590 auto [NamespaceY, PrefixY] = Y.getAsNamespaceAndPrefix();
7591 if (!declaresSameEntity(D1: NamespaceX->getNamespace(),
7592 D2: NamespaceY->getNamespace()))
7593 return false;
7594 return isSameQualifier(X: PrefixX, Y: PrefixY);
7595 }
7596 case NestedNameSpecifier::Kind::Type: {
7597 const auto *TX = X.getAsType(), *TY = Y.getAsType();
7598 if (TX->getCanonicalTypeInternal() != TY->getCanonicalTypeInternal())
7599 return false;
7600 return isSameQualifier(X: TX->getPrefix(), Y: TY->getPrefix());
7601 }
7602 case NestedNameSpecifier::Kind::Null:
7603 case NestedNameSpecifier::Kind::Global:
7604 case NestedNameSpecifier::Kind::MicrosoftSuper:
7605 return true;
7606 }
7607 llvm_unreachable("unhandled qualifier kind");
7608}
7609
7610static bool hasSameCudaAttrs(const FunctionDecl *A, const FunctionDecl *B) {
7611 if (!A->getASTContext().getLangOpts().CUDA)
7612 return true; // Target attributes are overloadable in CUDA compilation only.
7613 if (A->hasAttr<CUDADeviceAttr>() != B->hasAttr<CUDADeviceAttr>())
7614 return false;
7615 if (A->hasAttr<CUDADeviceAttr>() && B->hasAttr<CUDADeviceAttr>())
7616 return A->hasAttr<CUDAHostAttr>() == B->hasAttr<CUDAHostAttr>();
7617 return true; // unattributed and __host__ functions are the same.
7618}
7619
7620/// Determine whether the attributes we can overload on are identical for A and
7621/// B. Will ignore any overloadable attrs represented in the type of A and B.
7622static bool hasSameOverloadableAttrs(const FunctionDecl *A,
7623 const FunctionDecl *B) {
7624 // Note that pass_object_size attributes are represented in the function's
7625 // ExtParameterInfo, so we don't need to check them here.
7626
7627 llvm::FoldingSetNodeID Cand1ID, Cand2ID;
7628 auto AEnableIfAttrs = A->specific_attrs<EnableIfAttr>();
7629 auto BEnableIfAttrs = B->specific_attrs<EnableIfAttr>();
7630
7631 for (auto Pair : zip_longest(t&: AEnableIfAttrs, u&: BEnableIfAttrs)) {
7632 std::optional<EnableIfAttr *> Cand1A = std::get<0>(t&: Pair);
7633 std::optional<EnableIfAttr *> Cand2A = std::get<1>(t&: Pair);
7634
7635 // Return false if the number of enable_if attributes is different.
7636 if (!Cand1A || !Cand2A)
7637 return false;
7638
7639 Cand1ID.clear();
7640 Cand2ID.clear();
7641
7642 (*Cand1A)->getCond()->Profile(ID&: Cand1ID, Context: A->getASTContext(), Canonical: true);
7643 (*Cand2A)->getCond()->Profile(ID&: Cand2ID, Context: B->getASTContext(), Canonical: true);
7644
7645 // Return false if any of the enable_if expressions of A and B are
7646 // different.
7647 if (Cand1ID != Cand2ID)
7648 return false;
7649 }
7650 return hasSameCudaAttrs(A, B);
7651}
7652
7653bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) const {
7654 // Caution: this function is called by the AST reader during deserialization,
7655 // so it cannot rely on AST invariants being met. Non-trivial accessors
7656 // should be avoided, along with any traversal of redeclaration chains.
7657
7658 if (X == Y)
7659 return true;
7660
7661 if (X->getDeclName() != Y->getDeclName())
7662 return false;
7663
7664 // Must be in the same context.
7665 //
7666 // Note that we can't use DeclContext::Equals here, because the DeclContexts
7667 // could be two different declarations of the same function. (We will fix the
7668 // semantic DC to refer to the primary definition after merging.)
7669 if (!declaresSameEntity(D1: cast<Decl>(Val: X->getDeclContext()->getRedeclContext()),
7670 D2: cast<Decl>(Val: Y->getDeclContext()->getRedeclContext())))
7671 return false;
7672
7673 // If either X or Y are local to the owning module, they are only possible to
7674 // be the same entity if they are in the same module.
7675 if (X->isModuleLocal() || Y->isModuleLocal())
7676 if (!isInSameModule(M1: X->getOwningModule(), M2: Y->getOwningModule()))
7677 return false;
7678
7679 // Two typedefs refer to the same entity if they have the same underlying
7680 // type.
7681 if (const auto *TypedefX = dyn_cast<TypedefNameDecl>(Val: X))
7682 if (const auto *TypedefY = dyn_cast<TypedefNameDecl>(Val: Y))
7683 return hasSameType(T1: TypedefX->getUnderlyingType(),
7684 T2: TypedefY->getUnderlyingType());
7685
7686 // Must have the same kind.
7687 if (X->getKind() != Y->getKind())
7688 return false;
7689
7690 // Objective-C classes and protocols with the same name always match.
7691 if (isa<ObjCInterfaceDecl>(Val: X) || isa<ObjCProtocolDecl>(Val: X))
7692 return true;
7693
7694 if (isa<ClassTemplateSpecializationDecl>(Val: X)) {
7695 // No need to handle these here: we merge them when adding them to the
7696 // template.
7697 return false;
7698 }
7699
7700 // Compatible tags match.
7701 if (const auto *TagX = dyn_cast<TagDecl>(Val: X)) {
7702 const auto *TagY = cast<TagDecl>(Val: Y);
7703 return (TagX->getTagKind() == TagY->getTagKind()) ||
7704 ((TagX->getTagKind() == TagTypeKind::Struct ||
7705 TagX->getTagKind() == TagTypeKind::Class ||
7706 TagX->getTagKind() == TagTypeKind::Interface) &&
7707 (TagY->getTagKind() == TagTypeKind::Struct ||
7708 TagY->getTagKind() == TagTypeKind::Class ||
7709 TagY->getTagKind() == TagTypeKind::Interface));
7710 }
7711
7712 // Functions with the same type and linkage match.
7713 // FIXME: This needs to cope with merging of prototyped/non-prototyped
7714 // functions, etc.
7715 if (const auto *FuncX = dyn_cast<FunctionDecl>(Val: X)) {
7716 const auto *FuncY = cast<FunctionDecl>(Val: Y);
7717 if (const auto *CtorX = dyn_cast<CXXConstructorDecl>(Val: X)) {
7718 const auto *CtorY = cast<CXXConstructorDecl>(Val: Y);
7719 if (CtorX->getInheritedConstructor() &&
7720 !isSameEntity(X: CtorX->getInheritedConstructor().getConstructor(),
7721 Y: CtorY->getInheritedConstructor().getConstructor()))
7722 return false;
7723 }
7724
7725 if (FuncX->isMultiVersion() != FuncY->isMultiVersion())
7726 return false;
7727
7728 // Multiversioned functions with different feature strings are represented
7729 // as separate declarations.
7730 if (FuncX->isMultiVersion()) {
7731 const auto *TAX = FuncX->getAttr<TargetAttr>();
7732 const auto *TAY = FuncY->getAttr<TargetAttr>();
7733 assert(TAX && TAY && "Multiversion Function without target attribute");
7734
7735 if (TAX->getFeaturesStr() != TAY->getFeaturesStr())
7736 return false;
7737 }
7738
7739 // Per C++20 [temp.over.link]/4, friends in different classes are sometimes
7740 // not the same entity if they are constrained.
7741 if ((FuncX->isMemberLikeConstrainedFriend() ||
7742 FuncY->isMemberLikeConstrainedFriend()) &&
7743 !FuncX->getLexicalDeclContext()->Equals(
7744 DC: FuncY->getLexicalDeclContext())) {
7745 return false;
7746 }
7747
7748 if (!isSameAssociatedConstraint(ACX: FuncX->getTrailingRequiresClause(),
7749 ACY: FuncY->getTrailingRequiresClause()))
7750 return false;
7751
7752 auto GetTypeAsWritten = [](const FunctionDecl *FD) {
7753 // Map to the first declaration that we've already merged into this one.
7754 // The TSI of redeclarations might not match (due to calling conventions
7755 // being inherited onto the type but not the TSI), but the TSI type of
7756 // the first declaration of the function should match across modules.
7757 FD = FD->getCanonicalDecl();
7758 return FD->getTypeSourceInfo() ? FD->getTypeSourceInfo()->getType()
7759 : FD->getType();
7760 };
7761 QualType XT = GetTypeAsWritten(FuncX), YT = GetTypeAsWritten(FuncY);
7762 if (!hasSameType(T1: XT, T2: YT)) {
7763 // We can get functions with different types on the redecl chain in C++17
7764 // if they have differing exception specifications and at least one of
7765 // the excpetion specs is unresolved.
7766 auto *XFPT = XT->getAs<FunctionProtoType>();
7767 auto *YFPT = YT->getAs<FunctionProtoType>();
7768 if (getLangOpts().CPlusPlus17 && XFPT && YFPT &&
7769 (isUnresolvedExceptionSpec(ESpecType: XFPT->getExceptionSpecType()) ||
7770 isUnresolvedExceptionSpec(ESpecType: YFPT->getExceptionSpecType())) &&
7771 hasSameFunctionTypeIgnoringExceptionSpec(T: XT, U: YT))
7772 return true;
7773 return false;
7774 }
7775
7776 return FuncX->getLinkageInternal() == FuncY->getLinkageInternal() &&
7777 hasSameOverloadableAttrs(A: FuncX, B: FuncY);
7778 }
7779
7780 // Variables with the same type and linkage match.
7781 if (const auto *VarX = dyn_cast<VarDecl>(Val: X)) {
7782 const auto *VarY = cast<VarDecl>(Val: Y);
7783 if (VarX->getLinkageInternal() == VarY->getLinkageInternal()) {
7784 // During deserialization, we might compare variables before we load
7785 // their types. Assume the types will end up being the same.
7786 if (VarX->getType().isNull() || VarY->getType().isNull())
7787 return true;
7788
7789 if (hasSameType(T1: VarX->getType(), T2: VarY->getType()))
7790 return true;
7791
7792 // We can get decls with different types on the redecl chain. Eg.
7793 // template <typename T> struct S { static T Var[]; }; // #1
7794 // template <typename T> T S<T>::Var[sizeof(T)]; // #2
7795 // Only? happens when completing an incomplete array type. In this case
7796 // when comparing #1 and #2 we should go through their element type.
7797 const ArrayType *VarXTy = getAsArrayType(T: VarX->getType());
7798 const ArrayType *VarYTy = getAsArrayType(T: VarY->getType());
7799 if (!VarXTy || !VarYTy)
7800 return false;
7801 if (VarXTy->isIncompleteArrayType() || VarYTy->isIncompleteArrayType())
7802 return hasSameType(T1: VarXTy->getElementType(), T2: VarYTy->getElementType());
7803 }
7804 return false;
7805 }
7806
7807 // Namespaces with the same name and inlinedness match.
7808 if (const auto *NamespaceX = dyn_cast<NamespaceDecl>(Val: X)) {
7809 const auto *NamespaceY = cast<NamespaceDecl>(Val: Y);
7810 return NamespaceX->isInline() == NamespaceY->isInline();
7811 }
7812
7813 // Identical template names and kinds match if their template parameter lists
7814 // and patterns match.
7815 if (const auto *TemplateX = dyn_cast<TemplateDecl>(Val: X)) {
7816 const auto *TemplateY = cast<TemplateDecl>(Val: Y);
7817
7818 // ConceptDecl wouldn't be the same if their constraint expression differs.
7819 if (const auto *ConceptX = dyn_cast<ConceptDecl>(Val: X)) {
7820 const auto *ConceptY = cast<ConceptDecl>(Val: Y);
7821 if (!isSameConstraintExpr(XCE: ConceptX->getConstraintExpr(),
7822 YCE: ConceptY->getConstraintExpr()))
7823 return false;
7824 }
7825
7826 return isSameEntity(X: TemplateX->getTemplatedDecl(),
7827 Y: TemplateY->getTemplatedDecl()) &&
7828 isSameTemplateParameterList(X: TemplateX->getTemplateParameters(),
7829 Y: TemplateY->getTemplateParameters());
7830 }
7831
7832 // Fields with the same name and the same type match.
7833 if (const auto *FDX = dyn_cast<FieldDecl>(Val: X)) {
7834 const auto *FDY = cast<FieldDecl>(Val: Y);
7835 // FIXME: Also check the bitwidth is odr-equivalent, if any.
7836 return hasSameType(T1: FDX->getType(), T2: FDY->getType());
7837 }
7838
7839 // Indirect fields with the same target field match.
7840 if (const auto *IFDX = dyn_cast<IndirectFieldDecl>(Val: X)) {
7841 const auto *IFDY = cast<IndirectFieldDecl>(Val: Y);
7842 return IFDX->getAnonField()->getCanonicalDecl() ==
7843 IFDY->getAnonField()->getCanonicalDecl();
7844 }
7845
7846 // Enumerators with the same name match.
7847 if (isa<EnumConstantDecl>(Val: X))
7848 // FIXME: Also check the value is odr-equivalent.
7849 return true;
7850
7851 // Using shadow declarations with the same target match.
7852 if (const auto *USX = dyn_cast<UsingShadowDecl>(Val: X)) {
7853 const auto *USY = cast<UsingShadowDecl>(Val: Y);
7854 return declaresSameEntity(D1: USX->getTargetDecl(), D2: USY->getTargetDecl());
7855 }
7856
7857 // Using declarations with the same qualifier match. (We already know that
7858 // the name matches.)
7859 if (const auto *UX = dyn_cast<UsingDecl>(Val: X)) {
7860 const auto *UY = cast<UsingDecl>(Val: Y);
7861 return isSameQualifier(X: UX->getQualifier(), Y: UY->getQualifier()) &&
7862 UX->hasTypename() == UY->hasTypename() &&
7863 UX->isAccessDeclaration() == UY->isAccessDeclaration();
7864 }
7865 if (const auto *UX = dyn_cast<UnresolvedUsingValueDecl>(Val: X)) {
7866 const auto *UY = cast<UnresolvedUsingValueDecl>(Val: Y);
7867 return isSameQualifier(X: UX->getQualifier(), Y: UY->getQualifier()) &&
7868 UX->isAccessDeclaration() == UY->isAccessDeclaration();
7869 }
7870 if (const auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(Val: X)) {
7871 return isSameQualifier(
7872 X: UX->getQualifier(),
7873 Y: cast<UnresolvedUsingTypenameDecl>(Val: Y)->getQualifier());
7874 }
7875
7876 // Using-pack declarations are only created by instantiation, and match if
7877 // they're instantiated from matching UnresolvedUsing...Decls.
7878 if (const auto *UX = dyn_cast<UsingPackDecl>(Val: X)) {
7879 return declaresSameEntity(
7880 D1: UX->getInstantiatedFromUsingDecl(),
7881 D2: cast<UsingPackDecl>(Val: Y)->getInstantiatedFromUsingDecl());
7882 }
7883
7884 // Namespace alias definitions with the same target match.
7885 if (const auto *NAX = dyn_cast<NamespaceAliasDecl>(Val: X)) {
7886 const auto *NAY = cast<NamespaceAliasDecl>(Val: Y);
7887 return NAX->getNamespace()->Equals(DC: NAY->getNamespace());
7888 }
7889
7890 return false;
7891}
7892
7893TemplateArgument
7894ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const {
7895 switch (Arg.getKind()) {
7896 case TemplateArgument::Null:
7897 return Arg;
7898
7899 case TemplateArgument::Expression:
7900 return TemplateArgument(Arg.getAsExpr(), /*IsCanonical=*/true,
7901 Arg.getIsDefaulted());
7902
7903 case TemplateArgument::Declaration: {
7904 auto *D = cast<ValueDecl>(Val: Arg.getAsDecl()->getCanonicalDecl());
7905 return TemplateArgument(D, getCanonicalType(T: Arg.getParamTypeForDecl()),
7906 Arg.getIsDefaulted());
7907 }
7908
7909 case TemplateArgument::NullPtr:
7910 return TemplateArgument(getCanonicalType(T: Arg.getNullPtrType()),
7911 /*isNullPtr*/ true, Arg.getIsDefaulted());
7912
7913 case TemplateArgument::Template:
7914 return TemplateArgument(getCanonicalTemplateName(Name: Arg.getAsTemplate()),
7915 Arg.getIsDefaulted());
7916
7917 case TemplateArgument::TemplateExpansion:
7918 return TemplateArgument(
7919 getCanonicalTemplateName(Name: Arg.getAsTemplateOrTemplatePattern()),
7920 Arg.getNumTemplateExpansions(), Arg.getIsDefaulted());
7921
7922 case TemplateArgument::Integral:
7923 return TemplateArgument(Arg, getCanonicalType(T: Arg.getIntegralType()));
7924
7925 case TemplateArgument::StructuralValue:
7926 return TemplateArgument(*this,
7927 getCanonicalType(T: Arg.getStructuralValueType()),
7928 Arg.getAsStructuralValue(), Arg.getIsDefaulted());
7929
7930 case TemplateArgument::Type:
7931 return TemplateArgument(getCanonicalType(T: Arg.getAsType()),
7932 /*isNullPtr*/ false, Arg.getIsDefaulted());
7933
7934 case TemplateArgument::Pack: {
7935 bool AnyNonCanonArgs = false;
7936 auto CanonArgs = ::getCanonicalTemplateArguments(
7937 C: *this, Args: Arg.pack_elements(), AnyNonCanonArgs);
7938 if (!AnyNonCanonArgs)
7939 return Arg;
7940 auto NewArg = TemplateArgument::CreatePackCopy(
7941 Context&: const_cast<ASTContext &>(*this), Args: CanonArgs);
7942 NewArg.setIsDefaulted(Arg.getIsDefaulted());
7943 return NewArg;
7944 }
7945 }
7946
7947 // Silence GCC warning
7948 llvm_unreachable("Unhandled template argument kind");
7949}
7950
7951bool ASTContext::isSameTemplateArgument(const TemplateArgument &Arg1,
7952 const TemplateArgument &Arg2) const {
7953 if (Arg1.getKind() != Arg2.getKind())
7954 return false;
7955
7956 switch (Arg1.getKind()) {
7957 case TemplateArgument::Null:
7958 llvm_unreachable("Comparing NULL template argument");
7959
7960 case TemplateArgument::Type:
7961 return hasSameType(T1: Arg1.getAsType(), T2: Arg2.getAsType());
7962
7963 case TemplateArgument::Declaration:
7964 return Arg1.getAsDecl()->getUnderlyingDecl()->getCanonicalDecl() ==
7965 Arg2.getAsDecl()->getUnderlyingDecl()->getCanonicalDecl();
7966
7967 case TemplateArgument::NullPtr:
7968 return hasSameType(T1: Arg1.getNullPtrType(), T2: Arg2.getNullPtrType());
7969
7970 case TemplateArgument::Template:
7971 case TemplateArgument::TemplateExpansion:
7972 return getCanonicalTemplateName(Name: Arg1.getAsTemplateOrTemplatePattern()) ==
7973 getCanonicalTemplateName(Name: Arg2.getAsTemplateOrTemplatePattern());
7974
7975 case TemplateArgument::Integral:
7976 return llvm::APSInt::isSameValue(I1: Arg1.getAsIntegral(),
7977 I2: Arg2.getAsIntegral());
7978
7979 case TemplateArgument::StructuralValue:
7980 return Arg1.structurallyEquals(Other: Arg2);
7981
7982 case TemplateArgument::Expression: {
7983 llvm::FoldingSetNodeID ID1, ID2;
7984 Arg1.getAsExpr()->Profile(ID&: ID1, Context: *this, /*Canonical=*/true);
7985 Arg2.getAsExpr()->Profile(ID&: ID2, Context: *this, /*Canonical=*/true);
7986 return ID1 == ID2;
7987 }
7988
7989 case TemplateArgument::Pack:
7990 return llvm::equal(
7991 LRange: Arg1.getPackAsArray(), RRange: Arg2.getPackAsArray(),
7992 P: [&](const TemplateArgument &Arg1, const TemplateArgument &Arg2) {
7993 return isSameTemplateArgument(Arg1, Arg2);
7994 });
7995 }
7996
7997 llvm_unreachable("Unhandled template argument kind");
7998}
7999
8000const ArrayType *ASTContext::getAsArrayType(QualType T) const {
8001 // Handle the non-qualified case efficiently.
8002 if (!T.hasLocalQualifiers()) {
8003 // Handle the common positive case fast.
8004 if (const auto *AT = dyn_cast<ArrayType>(Val&: T))
8005 return AT;
8006 }
8007
8008 // Handle the common negative case fast.
8009 if (!isa<ArrayType>(Val: T.getCanonicalType()))
8010 return nullptr;
8011
8012 // Apply any qualifiers from the array type to the element type. This
8013 // implements C99 6.7.3p8: "If the specification of an array type includes
8014 // any type qualifiers, the element type is so qualified, not the array type."
8015
8016 // If we get here, we either have type qualifiers on the type, or we have
8017 // sugar such as a typedef in the way. If we have type qualifiers on the type
8018 // we must propagate them down into the element type.
8019
8020 SplitQualType split = T.getSplitDesugaredType();
8021 Qualifiers qs = split.Quals;
8022
8023 // If we have a simple case, just return now.
8024 const auto *ATy = dyn_cast<ArrayType>(Val: split.Ty);
8025 if (!ATy || qs.empty())
8026 return ATy;
8027
8028 // Otherwise, we have an array and we have qualifiers on it. Push the
8029 // qualifiers into the array element type and return a new array type.
8030 QualType NewEltTy = getQualifiedType(T: ATy->getElementType(), Qs: qs);
8031
8032 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: ATy))
8033 return cast<ArrayType>(Val: getConstantArrayType(EltTy: NewEltTy, ArySizeIn: CAT->getSize(),
8034 SizeExpr: CAT->getSizeExpr(),
8035 ASM: CAT->getSizeModifier(),
8036 IndexTypeQuals: CAT->getIndexTypeCVRQualifiers()));
8037 if (const auto *IAT = dyn_cast<IncompleteArrayType>(Val: ATy))
8038 return cast<ArrayType>(Val: getIncompleteArrayType(elementType: NewEltTy,
8039 ASM: IAT->getSizeModifier(),
8040 elementTypeQuals: IAT->getIndexTypeCVRQualifiers()));
8041
8042 if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(Val: ATy))
8043 return cast<ArrayType>(Val: getDependentSizedArrayType(
8044 elementType: NewEltTy, numElements: DSAT->getSizeExpr(), ASM: DSAT->getSizeModifier(),
8045 elementTypeQuals: DSAT->getIndexTypeCVRQualifiers()));
8046
8047 const auto *VAT = cast<VariableArrayType>(Val: ATy);
8048 return cast<ArrayType>(
8049 Val: getVariableArrayType(EltTy: NewEltTy, NumElts: VAT->getSizeExpr(), ASM: VAT->getSizeModifier(),
8050 IndexTypeQuals: VAT->getIndexTypeCVRQualifiers()));
8051}
8052
8053QualType ASTContext::getAdjustedParameterType(QualType T) const {
8054 if (getLangOpts().HLSL && T->isConstantArrayType())
8055 return getArrayParameterType(Ty: T);
8056 if (T->isArrayType() || T->isFunctionType())
8057 return getDecayedType(T);
8058 return T;
8059}
8060
8061QualType ASTContext::getSignatureParameterType(QualType T) const {
8062 T = getVariableArrayDecayedType(type: T);
8063 T = getAdjustedParameterType(T);
8064 return T.getUnqualifiedType();
8065}
8066
8067QualType ASTContext::getExceptionObjectType(QualType T) const {
8068 // C++ [except.throw]p3:
8069 // A throw-expression initializes a temporary object, called the exception
8070 // object, the type of which is determined by removing any top-level
8071 // cv-qualifiers from the static type of the operand of throw and adjusting
8072 // the type from "array of T" or "function returning T" to "pointer to T"
8073 // or "pointer to function returning T", [...]
8074 T = getVariableArrayDecayedType(type: T);
8075 if (T->isArrayType() || T->isFunctionType())
8076 T = getDecayedType(T);
8077 return T.getUnqualifiedType();
8078}
8079
8080/// getArrayDecayedType - Return the properly qualified result of decaying the
8081/// specified array type to a pointer. This operation is non-trivial when
8082/// handling typedefs etc. The canonical type of "T" must be an array type,
8083/// this returns a pointer to a properly qualified element of the array.
8084///
8085/// See C99 6.7.5.3p7 and C99 6.3.2.1p3.
8086QualType ASTContext::getArrayDecayedType(QualType Ty) const {
8087 // Get the element type with 'getAsArrayType' so that we don't lose any
8088 // typedefs in the element type of the array. This also handles propagation
8089 // of type qualifiers from the array type into the element type if present
8090 // (C99 6.7.3p8).
8091 const ArrayType *PrettyArrayType = getAsArrayType(T: Ty);
8092 assert(PrettyArrayType && "Not an array type!");
8093
8094 QualType PtrTy = getPointerType(T: PrettyArrayType->getElementType());
8095
8096 // int x[restrict 4] -> int *restrict
8097 QualType Result = getQualifiedType(T: PtrTy,
8098 Qs: PrettyArrayType->getIndexTypeQualifiers());
8099
8100 // int x[_Nullable] -> int * _Nullable
8101 if (auto Nullability = Ty->getNullability()) {
8102 Result = const_cast<ASTContext *>(this)->getAttributedType(nullability: *Nullability,
8103 modifiedType: Result, equivalentType: Result);
8104 }
8105 return Result;
8106}
8107
8108QualType ASTContext::getBaseElementType(const ArrayType *array) const {
8109 return getBaseElementType(QT: array->getElementType());
8110}
8111
8112QualType ASTContext::getBaseElementType(QualType type) const {
8113 Qualifiers qs;
8114 while (true) {
8115 SplitQualType split = type.getSplitDesugaredType();
8116 const ArrayType *array = split.Ty->getAsArrayTypeUnsafe();
8117 if (!array) break;
8118
8119 type = array->getElementType();
8120 qs.addConsistentQualifiers(qs: split.Quals);
8121 }
8122
8123 return getQualifiedType(T: type, Qs: qs);
8124}
8125
8126/// getConstantArrayElementCount - Returns number of constant array elements.
8127uint64_t
8128ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const {
8129 uint64_t ElementCount = 1;
8130 do {
8131 ElementCount *= CA->getZExtSize();
8132 CA = dyn_cast_or_null<ConstantArrayType>(
8133 Val: CA->getElementType()->getAsArrayTypeUnsafe());
8134 } while (CA);
8135 return ElementCount;
8136}
8137
8138uint64_t ASTContext::getArrayInitLoopExprElementCount(
8139 const ArrayInitLoopExpr *AILE) const {
8140 if (!AILE)
8141 return 0;
8142
8143 uint64_t ElementCount = 1;
8144
8145 do {
8146 ElementCount *= AILE->getArraySize().getZExtValue();
8147 AILE = dyn_cast<ArrayInitLoopExpr>(Val: AILE->getSubExpr());
8148 } while (AILE);
8149
8150 return ElementCount;
8151}
8152
8153/// getFloatingRank - Return a relative rank for floating point types.
8154/// This routine will assert if passed a built-in type that isn't a float.
8155static FloatingRank getFloatingRank(QualType T) {
8156 if (const auto *CT = T->getAs<ComplexType>())
8157 return getFloatingRank(T: CT->getElementType());
8158
8159 switch (T->castAs<BuiltinType>()->getKind()) {
8160 default: llvm_unreachable("getFloatingRank(): not a floating type");
8161 case BuiltinType::Float16: return Float16Rank;
8162 case BuiltinType::Half: return HalfRank;
8163 case BuiltinType::Float: return FloatRank;
8164 case BuiltinType::Double: return DoubleRank;
8165 case BuiltinType::LongDouble: return LongDoubleRank;
8166 case BuiltinType::Float128: return Float128Rank;
8167 case BuiltinType::BFloat16: return BFloat16Rank;
8168 case BuiltinType::Ibm128: return Ibm128Rank;
8169 }
8170}
8171
8172/// getFloatingTypeOrder - Compare the rank of the two specified floating
8173/// point types, ignoring the domain of the type (i.e. 'double' ==
8174/// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If
8175/// LHS < RHS, return -1.
8176int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const {
8177 FloatingRank LHSR = getFloatingRank(T: LHS);
8178 FloatingRank RHSR = getFloatingRank(T: RHS);
8179
8180 if (LHSR == RHSR)
8181 return 0;
8182 if (LHSR > RHSR)
8183 return 1;
8184 return -1;
8185}
8186
8187int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const {
8188 if (&getFloatTypeSemantics(T: LHS) == &getFloatTypeSemantics(T: RHS))
8189 return 0;
8190 return getFloatingTypeOrder(LHS, RHS);
8191}
8192
8193/// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This
8194/// routine will assert if passed a built-in type that isn't an integer or enum,
8195/// or if it is not canonicalized.
8196unsigned ASTContext::getIntegerRank(const Type *T) const {
8197 assert(T->isCanonicalUnqualified() && "T should be canonicalized");
8198
8199 // Results in this 'losing' to any type of the same size, but winning if
8200 // larger.
8201 if (const auto *EIT = dyn_cast<BitIntType>(Val: T))
8202 return 0 + (EIT->getNumBits() << 3);
8203
8204 if (const auto *OBT = dyn_cast<OverflowBehaviorType>(Val: T))
8205 return getIntegerRank(T: OBT->getUnderlyingType().getTypePtr());
8206
8207 switch (cast<BuiltinType>(Val: T)->getKind()) {
8208 default: llvm_unreachable("getIntegerRank(): not a built-in integer");
8209 case BuiltinType::Bool:
8210 return 1 + (getIntWidth(T: BoolTy) << 3);
8211 case BuiltinType::Char_S:
8212 case BuiltinType::Char_U:
8213 case BuiltinType::SChar:
8214 case BuiltinType::UChar:
8215 return 2 + (getIntWidth(T: CharTy) << 3);
8216 case BuiltinType::Short:
8217 case BuiltinType::UShort:
8218 return 3 + (getIntWidth(T: ShortTy) << 3);
8219 case BuiltinType::Int:
8220 case BuiltinType::UInt:
8221 return 4 + (getIntWidth(T: IntTy) << 3);
8222 case BuiltinType::Long:
8223 case BuiltinType::ULong:
8224 return 5 + (getIntWidth(T: LongTy) << 3);
8225 case BuiltinType::LongLong:
8226 case BuiltinType::ULongLong:
8227 return 6 + (getIntWidth(T: LongLongTy) << 3);
8228 case BuiltinType::Int128:
8229 case BuiltinType::UInt128:
8230 return 7 + (getIntWidth(T: Int128Ty) << 3);
8231
8232 // "The ranks of char8_t, char16_t, char32_t, and wchar_t equal the ranks of
8233 // their underlying types" [c++20 conv.rank]
8234 case BuiltinType::Char8:
8235 return getIntegerRank(T: UnsignedCharTy.getTypePtr());
8236 case BuiltinType::Char16:
8237 return getIntegerRank(
8238 T: getFromTargetType(Type: Target->getChar16Type()).getTypePtr());
8239 case BuiltinType::Char32:
8240 return getIntegerRank(
8241 T: getFromTargetType(Type: Target->getChar32Type()).getTypePtr());
8242 case BuiltinType::WChar_S:
8243 case BuiltinType::WChar_U:
8244 return getIntegerRank(
8245 T: getFromTargetType(Type: Target->getWCharType()).getTypePtr());
8246 }
8247}
8248
8249/// Whether this is a promotable bitfield reference according
8250/// to C99 6.3.1.1p2, bullet 2 (and GCC extensions).
8251///
8252/// \returns the type this bit-field will promote to, or NULL if no
8253/// promotion occurs.
8254QualType ASTContext::isPromotableBitField(Expr *E) const {
8255 if (E->isTypeDependent() || E->isValueDependent())
8256 return {};
8257
8258 // C++ [conv.prom]p5:
8259 // If the bit-field has an enumerated type, it is treated as any other
8260 // value of that type for promotion purposes.
8261 if (getLangOpts().CPlusPlus && E->getType()->isEnumeralType())
8262 return {};
8263
8264 // FIXME: We should not do this unless E->refersToBitField() is true. This
8265 // matters in C where getSourceBitField() will find bit-fields for various
8266 // cases where the source expression is not a bit-field designator.
8267
8268 FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields?
8269 if (!Field)
8270 return {};
8271
8272 QualType FT = Field->getType();
8273
8274 uint64_t BitWidth = Field->getBitWidthValue();
8275 uint64_t IntSize = getTypeSize(T: IntTy);
8276 // C++ [conv.prom]p5:
8277 // A prvalue for an integral bit-field can be converted to a prvalue of type
8278 // int if int can represent all the values of the bit-field; otherwise, it
8279 // can be converted to unsigned int if unsigned int can represent all the
8280 // values of the bit-field. If the bit-field is larger yet, no integral
8281 // promotion applies to it.
8282 // C11 6.3.1.1/2:
8283 // [For a bit-field of type _Bool, int, signed int, or unsigned int:]
8284 // If an int can represent all values of the original type (as restricted by
8285 // the width, for a bit-field), the value is converted to an int; otherwise,
8286 // it is converted to an unsigned int.
8287 //
8288 // FIXME: C does not permit promotion of a 'long : 3' bitfield to int.
8289 // We perform that promotion here to match GCC and C++.
8290 // FIXME: C does not permit promotion of an enum bit-field whose rank is
8291 // greater than that of 'int'. We perform that promotion to match GCC.
8292 //
8293 // C23 6.3.1.1p2:
8294 // The value from a bit-field of a bit-precise integer type is converted to
8295 // the corresponding bit-precise integer type. (The rest is the same as in
8296 // C11.)
8297 if (QualType QT = Field->getType(); QT->isBitIntType())
8298 return QT;
8299
8300 if (BitWidth < IntSize)
8301 return IntTy;
8302
8303 if (BitWidth == IntSize)
8304 return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy;
8305
8306 // Bit-fields wider than int are not subject to promotions, and therefore act
8307 // like the base type. GCC has some weird bugs in this area that we
8308 // deliberately do not follow (GCC follows a pre-standard resolution to
8309 // C's DR315 which treats bit-width as being part of the type, and this leaks
8310 // into their semantics in some cases).
8311 return {};
8312}
8313
8314/// getPromotedIntegerType - Returns the type that Promotable will
8315/// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable
8316/// integer type.
8317QualType ASTContext::getPromotedIntegerType(QualType Promotable) const {
8318 assert(!Promotable.isNull());
8319 assert(isPromotableIntegerType(Promotable));
8320 if (const auto *ED = Promotable->getAsEnumDecl())
8321 return ED->getPromotionType();
8322
8323 // OverflowBehaviorTypes promote their underlying type and preserve OBT
8324 // qualifier.
8325 if (const auto *OBT = Promotable->getAs<OverflowBehaviorType>()) {
8326 QualType PromotedUnderlying =
8327 getPromotedIntegerType(Promotable: OBT->getUnderlyingType());
8328 return getOverflowBehaviorType(Kind: OBT->getBehaviorKind(), Underlying: PromotedUnderlying);
8329 }
8330
8331 if (const auto *BT = Promotable->getAs<BuiltinType>()) {
8332 // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t
8333 // (3.9.1) can be converted to a prvalue of the first of the following
8334 // types that can represent all the values of its underlying type:
8335 // int, unsigned int, long int, unsigned long int, long long int, or
8336 // unsigned long long int [...]
8337 // FIXME: Is there some better way to compute this?
8338 if (BT->getKind() == BuiltinType::WChar_S ||
8339 BT->getKind() == BuiltinType::WChar_U ||
8340 BT->getKind() == BuiltinType::Char8 ||
8341 BT->getKind() == BuiltinType::Char16 ||
8342 BT->getKind() == BuiltinType::Char32) {
8343 bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S;
8344 uint64_t FromSize = getTypeSize(T: BT);
8345 QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy,
8346 LongLongTy, UnsignedLongLongTy };
8347 for (const auto &PT : PromoteTypes) {
8348 uint64_t ToSize = getTypeSize(T: PT);
8349 if (FromSize < ToSize ||
8350 (FromSize == ToSize && FromIsSigned == PT->isSignedIntegerType()))
8351 return PT;
8352 }
8353 llvm_unreachable("char type should fit into long long");
8354 }
8355 }
8356
8357 // At this point, we should have a signed or unsigned integer type.
8358 if (Promotable->isSignedIntegerType())
8359 return IntTy;
8360 uint64_t PromotableSize = getIntWidth(T: Promotable);
8361 uint64_t IntSize = getIntWidth(T: IntTy);
8362 assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize);
8363 return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy;
8364}
8365
8366/// Recurses in pointer/array types until it finds an objc retainable
8367/// type and returns its ownership.
8368Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const {
8369 while (!T.isNull()) {
8370 if (T.getObjCLifetime() != Qualifiers::OCL_None)
8371 return T.getObjCLifetime();
8372 if (T->isArrayType())
8373 T = getBaseElementType(type: T);
8374 else if (const auto *PT = T->getAs<PointerType>())
8375 T = PT->getPointeeType();
8376 else if (const auto *RT = T->getAs<ReferenceType>())
8377 T = RT->getPointeeType();
8378 else
8379 break;
8380 }
8381
8382 return Qualifiers::OCL_None;
8383}
8384
8385static const Type *getIntegerTypeForEnum(const EnumType *ET) {
8386 // Incomplete enum types are not treated as integer types.
8387 // FIXME: In C++, enum types are never integer types.
8388 const EnumDecl *ED = ET->getDecl()->getDefinitionOrSelf();
8389 if (ED->isComplete() && !ED->isScoped())
8390 return ED->getIntegerType().getTypePtr();
8391 return nullptr;
8392}
8393
8394/// getIntegerTypeOrder - Returns the highest ranked integer type:
8395/// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If
8396/// LHS < RHS, return -1.
8397int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const {
8398 const Type *LHSC = getCanonicalType(T: LHS).getTypePtr();
8399 const Type *RHSC = getCanonicalType(T: RHS).getTypePtr();
8400
8401 // Unwrap enums to their underlying type.
8402 if (const auto *ET = dyn_cast<EnumType>(Val: LHSC))
8403 LHSC = getIntegerTypeForEnum(ET);
8404 if (const auto *ET = dyn_cast<EnumType>(Val: RHSC))
8405 RHSC = getIntegerTypeForEnum(ET);
8406
8407 if (LHSC == RHSC) return 0;
8408
8409 bool LHSUnsigned = LHSC->isUnsignedIntegerType();
8410 bool RHSUnsigned = RHSC->isUnsignedIntegerType();
8411
8412 unsigned LHSRank = getIntegerRank(T: LHSC);
8413 unsigned RHSRank = getIntegerRank(T: RHSC);
8414
8415 if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned.
8416 if (LHSRank == RHSRank) return 0;
8417 return LHSRank > RHSRank ? 1 : -1;
8418 }
8419
8420 // Otherwise, the LHS is signed and the RHS is unsigned or visa versa.
8421 if (LHSUnsigned) {
8422 // If the unsigned [LHS] type is larger, return it.
8423 if (LHSRank >= RHSRank)
8424 return 1;
8425
8426 // If the signed type can represent all values of the unsigned type, it
8427 // wins. Because we are dealing with 2's complement and types that are
8428 // powers of two larger than each other, this is always safe.
8429 return -1;
8430 }
8431
8432 // If the unsigned [RHS] type is larger, return it.
8433 if (RHSRank >= LHSRank)
8434 return -1;
8435
8436 // If the signed type can represent all values of the unsigned type, it
8437 // wins. Because we are dealing with 2's complement and types that are
8438 // powers of two larger than each other, this is always safe.
8439 return 1;
8440}
8441
8442TypedefDecl *ASTContext::getCFConstantStringDecl() const {
8443 if (CFConstantStringTypeDecl)
8444 return CFConstantStringTypeDecl;
8445
8446 assert(!CFConstantStringTagDecl &&
8447 "tag and typedef should be initialized together");
8448 CFConstantStringTagDecl = buildImplicitRecord(Name: "__NSConstantString_tag");
8449 CFConstantStringTagDecl->startDefinition();
8450
8451 struct {
8452 QualType Type;
8453 const char *Name;
8454 } Fields[5];
8455 unsigned Count = 0;
8456
8457 /// Objective-C ABI
8458 ///
8459 /// typedef struct __NSConstantString_tag {
8460 /// const int *isa;
8461 /// int flags;
8462 /// const char *str;
8463 /// long length;
8464 /// } __NSConstantString;
8465 ///
8466 /// Swift ABI (4.1, 4.2)
8467 ///
8468 /// typedef struct __NSConstantString_tag {
8469 /// uintptr_t _cfisa;
8470 /// uintptr_t _swift_rc;
8471 /// _Atomic(uint64_t) _cfinfoa;
8472 /// const char *_ptr;
8473 /// uint32_t _length;
8474 /// } __NSConstantString;
8475 ///
8476 /// Swift ABI (5.0)
8477 ///
8478 /// typedef struct __NSConstantString_tag {
8479 /// uintptr_t _cfisa;
8480 /// uintptr_t _swift_rc;
8481 /// _Atomic(uint64_t) _cfinfoa;
8482 /// const char *_ptr;
8483 /// uintptr_t _length;
8484 /// } __NSConstantString;
8485
8486 const auto CFRuntime = getLangOpts().CFRuntime;
8487 if (static_cast<unsigned>(CFRuntime) <
8488 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift)) {
8489 Fields[Count++] = { .Type: getPointerType(T: IntTy.withConst()), .Name: "isa" };
8490 Fields[Count++] = { .Type: IntTy, .Name: "flags" };
8491 Fields[Count++] = { .Type: getPointerType(T: CharTy.withConst()), .Name: "str" };
8492 Fields[Count++] = { .Type: LongTy, .Name: "length" };
8493 } else {
8494 Fields[Count++] = { .Type: getUIntPtrType(), .Name: "_cfisa" };
8495 Fields[Count++] = { .Type: getUIntPtrType(), .Name: "_swift_rc" };
8496 Fields[Count++] = { .Type: getFromTargetType(Type: Target->getUInt64Type()), .Name: "_swift_rc" };
8497 Fields[Count++] = { .Type: getPointerType(T: CharTy.withConst()), .Name: "_ptr" };
8498 if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 ||
8499 CFRuntime == LangOptions::CoreFoundationABI::Swift4_2)
8500 Fields[Count++] = { .Type: IntTy, .Name: "_ptr" };
8501 else
8502 Fields[Count++] = { .Type: getUIntPtrType(), .Name: "_ptr" };
8503 }
8504
8505 // Create fields
8506 for (unsigned i = 0; i < Count; ++i) {
8507 FieldDecl *Field =
8508 FieldDecl::Create(C: *this, DC: CFConstantStringTagDecl, StartLoc: SourceLocation(),
8509 IdLoc: SourceLocation(), Id: &Idents.get(Name: Fields[i].Name),
8510 T: Fields[i].Type, /*TInfo=*/nullptr,
8511 /*BitWidth=*/BW: nullptr, /*Mutable=*/false, InitStyle: ICIS_NoInit);
8512 Field->setAccess(AS_public);
8513 CFConstantStringTagDecl->addDecl(D: Field);
8514 }
8515
8516 CFConstantStringTagDecl->completeDefinition();
8517 // This type is designed to be compatible with NSConstantString, but cannot
8518 // use the same name, since NSConstantString is an interface.
8519 CanQualType tagType = getCanonicalTagType(TD: CFConstantStringTagDecl);
8520 CFConstantStringTypeDecl =
8521 buildImplicitTypedef(T: tagType, Name: "__NSConstantString");
8522
8523 return CFConstantStringTypeDecl;
8524}
8525
8526RecordDecl *ASTContext::getCFConstantStringTagDecl() const {
8527 if (!CFConstantStringTagDecl)
8528 getCFConstantStringDecl(); // Build the tag and the typedef.
8529 return CFConstantStringTagDecl;
8530}
8531
8532// getCFConstantStringType - Return the type used for constant CFStrings.
8533QualType ASTContext::getCFConstantStringType() const {
8534 return getTypedefType(Keyword: ElaboratedTypeKeyword::None, /*Qualifier=*/std::nullopt,
8535 Decl: getCFConstantStringDecl());
8536}
8537
8538QualType ASTContext::getObjCSuperType() const {
8539 if (ObjCSuperType.isNull()) {
8540 RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord(Name: "objc_super");
8541 getTranslationUnitDecl()->addDecl(D: ObjCSuperTypeDecl);
8542 ObjCSuperType = getCanonicalTagType(TD: ObjCSuperTypeDecl);
8543 }
8544 return ObjCSuperType;
8545}
8546
8547void ASTContext::setCFConstantStringType(QualType T) {
8548 const auto *TT = T->castAs<TypedefType>();
8549 CFConstantStringTypeDecl = cast<TypedefDecl>(Val: TT->getDecl());
8550 CFConstantStringTagDecl = TT->castAsRecordDecl();
8551}
8552
8553QualType ASTContext::getBlockDescriptorType() const {
8554 if (BlockDescriptorType)
8555 return getCanonicalTagType(TD: BlockDescriptorType);
8556
8557 RecordDecl *RD;
8558 // FIXME: Needs the FlagAppleBlock bit.
8559 RD = buildImplicitRecord(Name: "__block_descriptor");
8560 RD->startDefinition();
8561
8562 QualType FieldTypes[] = {
8563 UnsignedLongTy,
8564 UnsignedLongTy,
8565 };
8566
8567 static const char *const FieldNames[] = {
8568 "reserved",
8569 "Size"
8570 };
8571
8572 for (size_t i = 0; i < 2; ++i) {
8573 FieldDecl *Field = FieldDecl::Create(
8574 C: *this, DC: RD, StartLoc: SourceLocation(), IdLoc: SourceLocation(),
8575 Id: &Idents.get(Name: FieldNames[i]), T: FieldTypes[i], /*TInfo=*/nullptr,
8576 /*BitWidth=*/BW: nullptr, /*Mutable=*/false, InitStyle: ICIS_NoInit);
8577 Field->setAccess(AS_public);
8578 RD->addDecl(D: Field);
8579 }
8580
8581 RD->completeDefinition();
8582
8583 BlockDescriptorType = RD;
8584
8585 return getCanonicalTagType(TD: BlockDescriptorType);
8586}
8587
8588QualType ASTContext::getBlockDescriptorExtendedType() const {
8589 if (BlockDescriptorExtendedType)
8590 return getCanonicalTagType(TD: BlockDescriptorExtendedType);
8591
8592 RecordDecl *RD;
8593 // FIXME: Needs the FlagAppleBlock bit.
8594 RD = buildImplicitRecord(Name: "__block_descriptor_withcopydispose");
8595 RD->startDefinition();
8596
8597 QualType FieldTypes[] = {
8598 UnsignedLongTy,
8599 UnsignedLongTy,
8600 getPointerType(T: VoidPtrTy),
8601 getPointerType(T: VoidPtrTy)
8602 };
8603
8604 static const char *const FieldNames[] = {
8605 "reserved",
8606 "Size",
8607 "CopyFuncPtr",
8608 "DestroyFuncPtr"
8609 };
8610
8611 for (size_t i = 0; i < 4; ++i) {
8612 FieldDecl *Field = FieldDecl::Create(
8613 C: *this, DC: RD, StartLoc: SourceLocation(), IdLoc: SourceLocation(),
8614 Id: &Idents.get(Name: FieldNames[i]), T: FieldTypes[i], /*TInfo=*/nullptr,
8615 /*BitWidth=*/BW: nullptr,
8616 /*Mutable=*/false, InitStyle: ICIS_NoInit);
8617 Field->setAccess(AS_public);
8618 RD->addDecl(D: Field);
8619 }
8620
8621 RD->completeDefinition();
8622
8623 BlockDescriptorExtendedType = RD;
8624 return getCanonicalTagType(TD: BlockDescriptorExtendedType);
8625}
8626
8627OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const {
8628 const auto *BT = dyn_cast<BuiltinType>(Val: T);
8629
8630 if (!BT) {
8631 if (isa<PipeType>(Val: T))
8632 return OCLTK_Pipe;
8633
8634 return OCLTK_Default;
8635 }
8636
8637 switch (BT->getKind()) {
8638#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
8639 case BuiltinType::Id: \
8640 return OCLTK_Image;
8641#include "clang/Basic/OpenCLImageTypes.def"
8642
8643 case BuiltinType::OCLClkEvent:
8644 return OCLTK_ClkEvent;
8645
8646 case BuiltinType::OCLEvent:
8647 return OCLTK_Event;
8648
8649 case BuiltinType::OCLQueue:
8650 return OCLTK_Queue;
8651
8652 case BuiltinType::OCLReserveID:
8653 return OCLTK_ReserveID;
8654
8655 case BuiltinType::OCLSampler:
8656 return OCLTK_Sampler;
8657
8658 default:
8659 return OCLTK_Default;
8660 }
8661}
8662
8663LangAS ASTContext::getOpenCLTypeAddrSpace(const Type *T) const {
8664 return Target->getOpenCLTypeAddrSpace(TK: getOpenCLTypeKind(T));
8665}
8666
8667/// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty"
8668/// requires copy/dispose. Note that this must match the logic
8669/// in buildByrefHelpers.
8670bool ASTContext::BlockRequiresCopying(QualType Ty,
8671 const VarDecl *D) {
8672 if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) {
8673 const Expr *copyExpr = getBlockVarCopyInit(VD: D).getCopyExpr();
8674 if (!copyExpr && record->hasTrivialDestructor()) return false;
8675
8676 return true;
8677 }
8678
8679 if (Ty.hasAddressDiscriminatedPointerAuth())
8680 return true;
8681
8682 // The block needs copy/destroy helpers if Ty is non-trivial to destructively
8683 // move or destroy.
8684 if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType())
8685 return true;
8686
8687 if (!Ty->isObjCRetainableType()) return false;
8688
8689 Qualifiers qs = Ty.getQualifiers();
8690
8691 // If we have lifetime, that dominates.
8692 if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) {
8693 switch (lifetime) {
8694 case Qualifiers::OCL_None: llvm_unreachable("impossible");
8695
8696 // These are just bits as far as the runtime is concerned.
8697 case Qualifiers::OCL_ExplicitNone:
8698 case Qualifiers::OCL_Autoreleasing:
8699 return false;
8700
8701 // These cases should have been taken care of when checking the type's
8702 // non-triviality.
8703 case Qualifiers::OCL_Weak:
8704 case Qualifiers::OCL_Strong:
8705 llvm_unreachable("impossible");
8706 }
8707 llvm_unreachable("fell out of lifetime switch!");
8708 }
8709 return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) ||
8710 Ty->isObjCObjectPointerType());
8711}
8712
8713bool ASTContext::getByrefLifetime(QualType Ty,
8714 Qualifiers::ObjCLifetime &LifeTime,
8715 bool &HasByrefExtendedLayout) const {
8716 if (!getLangOpts().ObjC ||
8717 getLangOpts().getGC() != LangOptions::NonGC)
8718 return false;
8719
8720 HasByrefExtendedLayout = false;
8721 if (Ty->isRecordType()) {
8722 HasByrefExtendedLayout = true;
8723 LifeTime = Qualifiers::OCL_None;
8724 } else if ((LifeTime = Ty.getObjCLifetime())) {
8725 // Honor the ARC qualifiers.
8726 } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) {
8727 // The MRR rule.
8728 LifeTime = Qualifiers::OCL_ExplicitNone;
8729 } else {
8730 LifeTime = Qualifiers::OCL_None;
8731 }
8732 return true;
8733}
8734
8735CanQualType ASTContext::getNSUIntegerType() const {
8736 assert(Target && "Expected target to be initialized");
8737 const llvm::Triple &T = Target->getTriple();
8738 // Windows is LLP64 rather than LP64
8739 if (T.isOSWindows() && T.isArch64Bit())
8740 return UnsignedLongLongTy;
8741 return UnsignedLongTy;
8742}
8743
8744CanQualType ASTContext::getNSIntegerType() const {
8745 assert(Target && "Expected target to be initialized");
8746 const llvm::Triple &T = Target->getTriple();
8747 // Windows is LLP64 rather than LP64
8748 if (T.isOSWindows() && T.isArch64Bit())
8749 return LongLongTy;
8750 return LongTy;
8751}
8752
8753TypedefDecl *ASTContext::getObjCInstanceTypeDecl() {
8754 if (!ObjCInstanceTypeDecl)
8755 ObjCInstanceTypeDecl =
8756 buildImplicitTypedef(T: getObjCIdType(), Name: "instancetype");
8757 return ObjCInstanceTypeDecl;
8758}
8759
8760// This returns true if a type has been typedefed to BOOL:
8761// typedef <type> BOOL;
8762static bool isTypeTypedefedAsBOOL(QualType T) {
8763 if (const auto *TT = dyn_cast<TypedefType>(Val&: T))
8764 if (IdentifierInfo *II = TT->getDecl()->getIdentifier())
8765 return II->isStr(Str: "BOOL");
8766
8767 return false;
8768}
8769
8770/// getObjCEncodingTypeSize returns size of type for objective-c encoding
8771/// purpose.
8772CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const {
8773 if (!type->isIncompleteArrayType() && type->isIncompleteType())
8774 return CharUnits::Zero();
8775
8776 CharUnits sz = getTypeSizeInChars(T: type);
8777
8778 // Make all integer and enum types at least as large as an int
8779 if (sz.isPositive() && type->isIntegralOrEnumerationType())
8780 sz = std::max(a: sz, b: getTypeSizeInChars(T: IntTy));
8781 // Treat arrays as pointers, since that's how they're passed in.
8782 else if (type->isArrayType())
8783 sz = getTypeSizeInChars(T: VoidPtrTy);
8784 return sz;
8785}
8786
8787bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const {
8788 return getTargetInfo().getCXXABI().isMicrosoft() &&
8789 VD->isStaticDataMember() &&
8790 VD->getType()->isIntegralOrEnumerationType() &&
8791 !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit();
8792}
8793
8794ASTContext::InlineVariableDefinitionKind
8795ASTContext::getInlineVariableDefinitionKind(const VarDecl *VD) const {
8796 if (!VD->isInline())
8797 return InlineVariableDefinitionKind::None;
8798
8799 // In almost all cases, it's a weak definition.
8800 auto *First = VD->getFirstDecl();
8801 if (First->isInlineSpecified() || !First->isStaticDataMember())
8802 return InlineVariableDefinitionKind::Weak;
8803
8804 // If there's a file-context declaration in this translation unit, it's a
8805 // non-discardable definition.
8806 for (auto *D : VD->redecls())
8807 if (D->getLexicalDeclContext()->isFileContext() &&
8808 !D->isInlineSpecified() && (D->isConstexpr() || First->isConstexpr()))
8809 return InlineVariableDefinitionKind::Strong;
8810
8811 // If we've not seen one yet, we don't know.
8812 return InlineVariableDefinitionKind::WeakUnknown;
8813}
8814
8815static std::string charUnitsToString(const CharUnits &CU) {
8816 return llvm::itostr(X: CU.getQuantity());
8817}
8818
8819/// getObjCEncodingForBlock - Return the encoded type for this block
8820/// declaration.
8821std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const {
8822 std::string S;
8823
8824 const BlockDecl *Decl = Expr->getBlockDecl();
8825 QualType BlockTy =
8826 Expr->getType()->castAs<BlockPointerType>()->getPointeeType();
8827 QualType BlockReturnTy = BlockTy->castAs<FunctionType>()->getReturnType();
8828 // Encode result type.
8829 if (getLangOpts().EncodeExtendedBlockSig)
8830 getObjCEncodingForMethodParameter(QT: Decl::OBJC_TQ_None, T: BlockReturnTy, S,
8831 Extended: true /*Extended*/);
8832 else
8833 getObjCEncodingForType(T: BlockReturnTy, S);
8834 // Compute size of all parameters.
8835 // Start with computing size of a pointer in number of bytes.
8836 // FIXME: There might(should) be a better way of doing this computation!
8837 CharUnits PtrSize = getTypeSizeInChars(T: VoidPtrTy);
8838 CharUnits ParmOffset = PtrSize;
8839 for (auto *PI : Decl->parameters()) {
8840 QualType PType = PI->getType();
8841 CharUnits sz = getObjCEncodingTypeSize(type: PType);
8842 if (sz.isZero())
8843 continue;
8844 assert(sz.isPositive() && "BlockExpr - Incomplete param type");
8845 ParmOffset += sz;
8846 }
8847 // Size of the argument frame
8848 S += charUnitsToString(CU: ParmOffset);
8849 // Block pointer and offset.
8850 S += "@?0";
8851
8852 // Argument types.
8853 ParmOffset = PtrSize;
8854 for (auto *PVDecl : Decl->parameters()) {
8855 QualType PType = PVDecl->getOriginalType();
8856 if (const auto *AT =
8857 dyn_cast<ArrayType>(Val: PType->getCanonicalTypeInternal())) {
8858 // Use array's original type only if it has known number of
8859 // elements.
8860 if (!isa<ConstantArrayType>(Val: AT))
8861 PType = PVDecl->getType();
8862 } else if (PType->isFunctionType())
8863 PType = PVDecl->getType();
8864 if (getLangOpts().EncodeExtendedBlockSig)
8865 getObjCEncodingForMethodParameter(QT: Decl::OBJC_TQ_None, T: PType,
8866 S, Extended: true /*Extended*/);
8867 else
8868 getObjCEncodingForType(T: PType, S);
8869 S += charUnitsToString(CU: ParmOffset);
8870 ParmOffset += getObjCEncodingTypeSize(type: PType);
8871 }
8872
8873 return S;
8874}
8875
8876std::string
8877ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const {
8878 std::string S;
8879 // Encode result type.
8880 getObjCEncodingForType(T: Decl->getReturnType(), S);
8881 CharUnits ParmOffset;
8882 // Compute size of all parameters.
8883 for (auto *PI : Decl->parameters()) {
8884 QualType PType = PI->getType();
8885 CharUnits sz = getObjCEncodingTypeSize(type: PType);
8886 if (sz.isZero())
8887 continue;
8888
8889 assert(sz.isPositive() &&
8890 "getObjCEncodingForFunctionDecl - Incomplete param type");
8891 ParmOffset += sz;
8892 }
8893 S += charUnitsToString(CU: ParmOffset);
8894 ParmOffset = CharUnits::Zero();
8895
8896 // Argument types.
8897 for (auto *PVDecl : Decl->parameters()) {
8898 QualType PType = PVDecl->getOriginalType();
8899 if (const auto *AT =
8900 dyn_cast<ArrayType>(Val: PType->getCanonicalTypeInternal())) {
8901 // Use array's original type only if it has known number of
8902 // elements.
8903 if (!isa<ConstantArrayType>(Val: AT))
8904 PType = PVDecl->getType();
8905 } else if (PType->isFunctionType())
8906 PType = PVDecl->getType();
8907 getObjCEncodingForType(T: PType, S);
8908 S += charUnitsToString(CU: ParmOffset);
8909 ParmOffset += getObjCEncodingTypeSize(type: PType);
8910 }
8911
8912 return S;
8913}
8914
8915/// getObjCEncodingForMethodParameter - Return the encoded type for a single
8916/// method parameter or return type. If Extended, include class names and
8917/// block object types.
8918void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT,
8919 QualType T, std::string& S,
8920 bool Extended) const {
8921 // Encode type qualifier, 'in', 'inout', etc. for the parameter.
8922 getObjCEncodingForTypeQualifier(QT, S);
8923 // Encode parameter type.
8924 ObjCEncOptions Options = ObjCEncOptions()
8925 .setExpandPointedToStructures()
8926 .setExpandStructures()
8927 .setIsOutermostType();
8928 if (Extended)
8929 Options.setEncodeBlockParameters().setEncodeClassNames();
8930 getObjCEncodingForTypeImpl(t: T, S, Options, /*Field=*/nullptr);
8931}
8932
8933/// getObjCEncodingForMethodDecl - Return the encoded type for this method
8934/// declaration.
8935std::string ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl,
8936 bool Extended) const {
8937 // FIXME: This is not very efficient.
8938 // Encode return type.
8939 std::string S;
8940 getObjCEncodingForMethodParameter(QT: Decl->getObjCDeclQualifier(),
8941 T: Decl->getReturnType(), S, Extended);
8942 // Compute size of all parameters.
8943 // Start with computing size of a pointer in number of bytes.
8944 // FIXME: There might(should) be a better way of doing this computation!
8945 CharUnits PtrSize = getTypeSizeInChars(T: VoidPtrTy);
8946 // The first two arguments (self and _cmd) are pointers; account for
8947 // their size.
8948 CharUnits ParmOffset = 2 * PtrSize;
8949 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(),
8950 E = Decl->sel_param_end(); PI != E; ++PI) {
8951 QualType PType = (*PI)->getType();
8952 CharUnits sz = getObjCEncodingTypeSize(type: PType);
8953 if (sz.isZero())
8954 continue;
8955
8956 assert(sz.isPositive() &&
8957 "getObjCEncodingForMethodDecl - Incomplete param type");
8958 ParmOffset += sz;
8959 }
8960 S += charUnitsToString(CU: ParmOffset);
8961 S += "@0:";
8962 S += charUnitsToString(CU: PtrSize);
8963
8964 // Argument types.
8965 ParmOffset = 2 * PtrSize;
8966 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(),
8967 E = Decl->sel_param_end(); PI != E; ++PI) {
8968 const ParmVarDecl *PVDecl = *PI;
8969 QualType PType = PVDecl->getOriginalType();
8970 if (const auto *AT =
8971 dyn_cast<ArrayType>(Val: PType->getCanonicalTypeInternal())) {
8972 // Use array's original type only if it has known number of
8973 // elements.
8974 if (!isa<ConstantArrayType>(Val: AT))
8975 PType = PVDecl->getType();
8976 } else if (PType->isFunctionType())
8977 PType = PVDecl->getType();
8978 getObjCEncodingForMethodParameter(QT: PVDecl->getObjCDeclQualifier(),
8979 T: PType, S, Extended);
8980 S += charUnitsToString(CU: ParmOffset);
8981 ParmOffset += getObjCEncodingTypeSize(type: PType);
8982 }
8983
8984 return S;
8985}
8986
8987ObjCPropertyImplDecl *
8988ASTContext::getObjCPropertyImplDeclForPropertyDecl(
8989 const ObjCPropertyDecl *PD,
8990 const Decl *Container) const {
8991 if (!Container)
8992 return nullptr;
8993 if (const auto *CID = dyn_cast<ObjCCategoryImplDecl>(Val: Container)) {
8994 for (auto *PID : CID->property_impls())
8995 if (PID->getPropertyDecl() == PD)
8996 return PID;
8997 } else {
8998 const auto *OID = cast<ObjCImplementationDecl>(Val: Container);
8999 for (auto *PID : OID->property_impls())
9000 if (PID->getPropertyDecl() == PD)
9001 return PID;
9002 }
9003 return nullptr;
9004}
9005
9006/// getObjCEncodingForPropertyDecl - Return the encoded type for this
9007/// property declaration. If non-NULL, Container must be either an
9008/// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be
9009/// NULL when getting encodings for protocol properties.
9010/// Property attributes are stored as a comma-delimited C string. The simple
9011/// attributes readonly and bycopy are encoded as single characters. The
9012/// parametrized attributes, getter=name, setter=name, and ivar=name, are
9013/// encoded as single characters, followed by an identifier. Property types
9014/// are also encoded as a parametrized attribute. The characters used to encode
9015/// these attributes are defined by the following enumeration:
9016/// @code
9017/// enum PropertyAttributes {
9018/// kPropertyReadOnly = 'R', // property is read-only.
9019/// kPropertyBycopy = 'C', // property is a copy of the value last assigned
9020/// kPropertyByref = '&', // property is a reference to the value last assigned
9021/// kPropertyDynamic = 'D', // property is dynamic
9022/// kPropertyGetter = 'G', // followed by getter selector name
9023/// kPropertySetter = 'S', // followed by setter selector name
9024/// kPropertyInstanceVariable = 'V' // followed by instance variable name
9025/// kPropertyType = 'T' // followed by old-style type encoding.
9026/// kPropertyWeak = 'W' // 'weak' property
9027/// kPropertyStrong = 'P' // property GC'able
9028/// kPropertyNonAtomic = 'N' // property non-atomic
9029/// kPropertyOptional = '?' // property optional
9030/// };
9031/// @endcode
9032std::string
9033ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
9034 const Decl *Container) const {
9035 // Collect information from the property implementation decl(s).
9036 bool Dynamic = false;
9037 ObjCPropertyImplDecl *SynthesizePID = nullptr;
9038
9039 if (ObjCPropertyImplDecl *PropertyImpDecl =
9040 getObjCPropertyImplDeclForPropertyDecl(PD, Container)) {
9041 if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
9042 Dynamic = true;
9043 else
9044 SynthesizePID = PropertyImpDecl;
9045 }
9046
9047 // FIXME: This is not very efficient.
9048 std::string S = "T";
9049
9050 // Encode result type.
9051 // GCC has some special rules regarding encoding of properties which
9052 // closely resembles encoding of ivars.
9053 getObjCEncodingForPropertyType(T: PD->getType(), S);
9054
9055 if (PD->isOptional())
9056 S += ",?";
9057
9058 if (PD->isReadOnly()) {
9059 S += ",R";
9060 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy)
9061 S += ",C";
9062 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain)
9063 S += ",&";
9064 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak)
9065 S += ",W";
9066 } else {
9067 switch (PD->getSetterKind()) {
9068 case ObjCPropertyDecl::Assign: break;
9069 case ObjCPropertyDecl::Copy: S += ",C"; break;
9070 case ObjCPropertyDecl::Retain: S += ",&"; break;
9071 case ObjCPropertyDecl::Weak: S += ",W"; break;
9072 }
9073 }
9074
9075 // It really isn't clear at all what this means, since properties
9076 // are "dynamic by default".
9077 if (Dynamic)
9078 S += ",D";
9079
9080 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic)
9081 S += ",N";
9082
9083 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) {
9084 S += ",G";
9085 S += PD->getGetterName().getAsString();
9086 }
9087
9088 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) {
9089 S += ",S";
9090 S += PD->getSetterName().getAsString();
9091 }
9092
9093 if (SynthesizePID) {
9094 const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl();
9095 S += ",V";
9096 S += OID->getNameAsString();
9097 }
9098
9099 // FIXME: OBJCGC: weak & strong
9100 return S;
9101}
9102
9103/// getLegacyIntegralTypeEncoding -
9104/// Another legacy compatibility encoding: 32-bit longs are encoded as
9105/// 'l' or 'L' , but not always. For typedefs, we need to use
9106/// 'i' or 'I' instead if encoding a struct field, or a pointer!
9107void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const {
9108 if (PointeeTy->getAs<TypedefType>()) {
9109 if (const auto *BT = PointeeTy->getAs<BuiltinType>()) {
9110 if (BT->getKind() == BuiltinType::ULong && getIntWidth(T: PointeeTy) == 32)
9111 PointeeTy = UnsignedIntTy;
9112 else
9113 if (BT->getKind() == BuiltinType::Long && getIntWidth(T: PointeeTy) == 32)
9114 PointeeTy = IntTy;
9115 }
9116 }
9117}
9118
9119void ASTContext::getObjCEncodingForType(QualType T, std::string& S,
9120 const FieldDecl *Field,
9121 QualType *NotEncodedT) const {
9122 // We follow the behavior of gcc, expanding structures which are
9123 // directly pointed to, and expanding embedded structures. Note that
9124 // these rules are sufficient to prevent recursive encoding of the
9125 // same type.
9126 getObjCEncodingForTypeImpl(t: T, S,
9127 Options: ObjCEncOptions()
9128 .setExpandPointedToStructures()
9129 .setExpandStructures()
9130 .setIsOutermostType(),
9131 Field, NotEncodedT);
9132}
9133
9134void ASTContext::getObjCEncodingForPropertyType(QualType T,
9135 std::string& S) const {
9136 // Encode result type.
9137 // GCC has some special rules regarding encoding of properties which
9138 // closely resembles encoding of ivars.
9139 getObjCEncodingForTypeImpl(t: T, S,
9140 Options: ObjCEncOptions()
9141 .setExpandPointedToStructures()
9142 .setExpandStructures()
9143 .setIsOutermostType()
9144 .setEncodingProperty(),
9145 /*Field=*/nullptr);
9146}
9147
9148static char getObjCEncodingForPrimitiveType(const ASTContext *C,
9149 const BuiltinType *BT) {
9150 BuiltinType::Kind kind = BT->getKind();
9151 switch (kind) {
9152 case BuiltinType::Void: return 'v';
9153 case BuiltinType::Bool: return 'B';
9154 case BuiltinType::Char8:
9155 case BuiltinType::Char_U:
9156 case BuiltinType::UChar: return 'C';
9157 case BuiltinType::Char16:
9158 case BuiltinType::UShort: return 'S';
9159 case BuiltinType::Char32:
9160 case BuiltinType::UInt: return 'I';
9161 case BuiltinType::ULong:
9162 return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q';
9163 case BuiltinType::UInt128: return 'T';
9164 case BuiltinType::ULongLong: return 'Q';
9165 case BuiltinType::Char_S:
9166 case BuiltinType::SChar: return 'c';
9167 case BuiltinType::Short: return 's';
9168 case BuiltinType::WChar_S:
9169 case BuiltinType::WChar_U:
9170 case BuiltinType::Int: return 'i';
9171 case BuiltinType::Long:
9172 return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q';
9173 case BuiltinType::LongLong: return 'q';
9174 case BuiltinType::Int128: return 't';
9175 case BuiltinType::Float: return 'f';
9176 case BuiltinType::Double: return 'd';
9177 case BuiltinType::LongDouble: return 'D';
9178 case BuiltinType::NullPtr: return '*'; // like char*
9179
9180 case BuiltinType::BFloat16:
9181 case BuiltinType::Float16:
9182 case BuiltinType::Float128:
9183 case BuiltinType::Ibm128:
9184 case BuiltinType::Half:
9185 case BuiltinType::ShortAccum:
9186 case BuiltinType::Accum:
9187 case BuiltinType::LongAccum:
9188 case BuiltinType::UShortAccum:
9189 case BuiltinType::UAccum:
9190 case BuiltinType::ULongAccum:
9191 case BuiltinType::ShortFract:
9192 case BuiltinType::Fract:
9193 case BuiltinType::LongFract:
9194 case BuiltinType::UShortFract:
9195 case BuiltinType::UFract:
9196 case BuiltinType::ULongFract:
9197 case BuiltinType::SatShortAccum:
9198 case BuiltinType::SatAccum:
9199 case BuiltinType::SatLongAccum:
9200 case BuiltinType::SatUShortAccum:
9201 case BuiltinType::SatUAccum:
9202 case BuiltinType::SatULongAccum:
9203 case BuiltinType::SatShortFract:
9204 case BuiltinType::SatFract:
9205 case BuiltinType::SatLongFract:
9206 case BuiltinType::SatUShortFract:
9207 case BuiltinType::SatUFract:
9208 case BuiltinType::SatULongFract:
9209 // FIXME: potentially need @encodes for these!
9210 return ' ';
9211
9212#define SVE_TYPE(Name, Id, SingletonId) \
9213 case BuiltinType::Id:
9214#include "clang/Basic/AArch64ACLETypes.def"
9215#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
9216#include "clang/Basic/RISCVVTypes.def"
9217#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
9218#include "clang/Basic/WebAssemblyReferenceTypes.def"
9219#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) case BuiltinType::Id:
9220#include "clang/Basic/AMDGPUTypes.def"
9221 {
9222 DiagnosticsEngine &Diags = C->getDiagnostics();
9223 unsigned DiagID = Diags.getCustomDiagID(L: DiagnosticsEngine::Error,
9224 FormatString: "cannot yet @encode type %0");
9225 Diags.Report(DiagID) << BT->getName(Policy: C->getPrintingPolicy());
9226 return ' ';
9227 }
9228
9229 case BuiltinType::ObjCId:
9230 case BuiltinType::ObjCClass:
9231 case BuiltinType::ObjCSel:
9232 llvm_unreachable("@encoding ObjC primitive type");
9233
9234 // OpenCL and placeholder types don't need @encodings.
9235#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
9236 case BuiltinType::Id:
9237#include "clang/Basic/OpenCLImageTypes.def"
9238#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
9239 case BuiltinType::Id:
9240#include "clang/Basic/OpenCLExtensionTypes.def"
9241 case BuiltinType::OCLEvent:
9242 case BuiltinType::OCLClkEvent:
9243 case BuiltinType::OCLQueue:
9244 case BuiltinType::OCLReserveID:
9245 case BuiltinType::OCLSampler:
9246 case BuiltinType::Dependent:
9247#define PPC_VECTOR_TYPE(Name, Id, Size) \
9248 case BuiltinType::Id:
9249#include "clang/Basic/PPCTypes.def"
9250#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
9251#include "clang/Basic/HLSLIntangibleTypes.def"
9252#define BUILTIN_TYPE(KIND, ID)
9253#define PLACEHOLDER_TYPE(KIND, ID) \
9254 case BuiltinType::KIND:
9255#include "clang/AST/BuiltinTypes.def"
9256 llvm_unreachable("invalid builtin type for @encode");
9257 }
9258 llvm_unreachable("invalid BuiltinType::Kind value");
9259}
9260
9261static char ObjCEncodingForEnumDecl(const ASTContext *C, const EnumDecl *ED) {
9262 EnumDecl *Enum = ED->getDefinitionOrSelf();
9263
9264 // The encoding of an non-fixed enum type is always 'i', regardless of size.
9265 if (!Enum->isFixed())
9266 return 'i';
9267
9268 // The encoding of a fixed enum type matches its fixed underlying type.
9269 const auto *BT = Enum->getIntegerType()->castAs<BuiltinType>();
9270 return getObjCEncodingForPrimitiveType(C, BT);
9271}
9272
9273static void EncodeBitField(const ASTContext *Ctx, std::string& S,
9274 QualType T, const FieldDecl *FD) {
9275 assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl");
9276 S += 'b';
9277 // The NeXT runtime encodes bit fields as b followed by the number of bits.
9278 // The GNU runtime requires more information; bitfields are encoded as b,
9279 // then the offset (in bits) of the first element, then the type of the
9280 // bitfield, then the size in bits. For example, in this structure:
9281 //
9282 // struct
9283 // {
9284 // int integer;
9285 // int flags:2;
9286 // };
9287 // On a 32-bit system, the encoding for flags would be b2 for the NeXT
9288 // runtime, but b32i2 for the GNU runtime. The reason for this extra
9289 // information is not especially sensible, but we're stuck with it for
9290 // compatibility with GCC, although providing it breaks anything that
9291 // actually uses runtime introspection and wants to work on both runtimes...
9292 if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) {
9293 uint64_t Offset;
9294
9295 if (const auto *IVD = dyn_cast<ObjCIvarDecl>(Val: FD)) {
9296 Offset = Ctx->lookupFieldBitOffset(OID: IVD->getContainingInterface(), Ivar: IVD);
9297 } else {
9298 const RecordDecl *RD = FD->getParent();
9299 const ASTRecordLayout &RL = Ctx->getASTRecordLayout(D: RD);
9300 Offset = RL.getFieldOffset(FieldNo: FD->getFieldIndex());
9301 }
9302
9303 S += llvm::utostr(X: Offset);
9304
9305 if (const auto *ET = T->getAsCanonical<EnumType>())
9306 S += ObjCEncodingForEnumDecl(C: Ctx, ED: ET->getDecl());
9307 else {
9308 const auto *BT = T->castAs<BuiltinType>();
9309 S += getObjCEncodingForPrimitiveType(C: Ctx, BT);
9310 }
9311 }
9312 S += llvm::utostr(X: FD->getBitWidthValue());
9313}
9314
9315// Helper function for determining whether the encoded type string would include
9316// a template specialization type.
9317static bool hasTemplateSpecializationInEncodedString(const Type *T,
9318 bool VisitBasesAndFields) {
9319 T = T->getBaseElementTypeUnsafe();
9320
9321 if (auto *PT = T->getAs<PointerType>())
9322 return hasTemplateSpecializationInEncodedString(
9323 T: PT->getPointeeType().getTypePtr(), VisitBasesAndFields: false);
9324
9325 auto *CXXRD = T->getAsCXXRecordDecl();
9326
9327 if (!CXXRD)
9328 return false;
9329
9330 if (isa<ClassTemplateSpecializationDecl>(Val: CXXRD))
9331 return true;
9332
9333 if (!CXXRD->hasDefinition() || !VisitBasesAndFields)
9334 return false;
9335
9336 for (const auto &B : CXXRD->bases())
9337 if (hasTemplateSpecializationInEncodedString(T: B.getType().getTypePtr(),
9338 VisitBasesAndFields: true))
9339 return true;
9340
9341 for (auto *FD : CXXRD->fields())
9342 if (hasTemplateSpecializationInEncodedString(T: FD->getType().getTypePtr(),
9343 VisitBasesAndFields: true))
9344 return true;
9345
9346 return false;
9347}
9348
9349// FIXME: Use SmallString for accumulating string.
9350void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
9351 const ObjCEncOptions Options,
9352 const FieldDecl *FD,
9353 QualType *NotEncodedT) const {
9354 CanQualType CT = getCanonicalType(T);
9355 switch (CT->getTypeClass()) {
9356 case Type::Builtin:
9357 case Type::Enum:
9358 if (FD && FD->isBitField())
9359 return EncodeBitField(Ctx: this, S, T, FD);
9360 if (const auto *BT = dyn_cast<BuiltinType>(Val&: CT))
9361 S += getObjCEncodingForPrimitiveType(C: this, BT);
9362 else
9363 S += ObjCEncodingForEnumDecl(C: this, ED: cast<EnumType>(Val&: CT)->getDecl());
9364 return;
9365
9366 case Type::Complex:
9367 S += 'j';
9368 getObjCEncodingForTypeImpl(T: T->castAs<ComplexType>()->getElementType(), S,
9369 Options: ObjCEncOptions(),
9370 /*Field=*/FD: nullptr);
9371 return;
9372
9373 case Type::Atomic:
9374 S += 'A';
9375 getObjCEncodingForTypeImpl(T: T->castAs<AtomicType>()->getValueType(), S,
9376 Options: ObjCEncOptions(),
9377 /*Field=*/FD: nullptr);
9378 return;
9379
9380 // encoding for pointer or reference types.
9381 case Type::Pointer:
9382 case Type::LValueReference:
9383 case Type::RValueReference: {
9384 QualType PointeeTy;
9385 if (isa<PointerType>(Val: CT)) {
9386 const auto *PT = T->castAs<PointerType>();
9387 if (PT->isObjCSelType()) {
9388 S += ':';
9389 return;
9390 }
9391 PointeeTy = PT->getPointeeType();
9392 } else {
9393 PointeeTy = T->castAs<ReferenceType>()->getPointeeType();
9394 }
9395
9396 bool isReadOnly = false;
9397 // For historical/compatibility reasons, the read-only qualifier of the
9398 // pointee gets emitted _before_ the '^'. The read-only qualifier of
9399 // the pointer itself gets ignored, _unless_ we are looking at a typedef!
9400 // Also, do not emit the 'r' for anything but the outermost type!
9401 if (T->getAs<TypedefType>()) {
9402 if (Options.IsOutermostType() && T.isConstQualified()) {
9403 isReadOnly = true;
9404 S += 'r';
9405 }
9406 } else if (Options.IsOutermostType()) {
9407 QualType P = PointeeTy;
9408 while (auto PT = P->getAs<PointerType>())
9409 P = PT->getPointeeType();
9410 if (P.isConstQualified()) {
9411 isReadOnly = true;
9412 S += 'r';
9413 }
9414 }
9415 if (isReadOnly) {
9416 // Another legacy compatibility encoding. Some ObjC qualifier and type
9417 // combinations need to be rearranged.
9418 // Rewrite "in const" from "nr" to "rn"
9419 if (StringRef(S).ends_with(Suffix: "nr"))
9420 S.replace(i1: S.end()-2, i2: S.end(), s: "rn");
9421 }
9422
9423 if (PointeeTy->isCharType()) {
9424 // char pointer types should be encoded as '*' unless it is a
9425 // type that has been typedef'd to 'BOOL'.
9426 if (!isTypeTypedefedAsBOOL(T: PointeeTy)) {
9427 S += '*';
9428 return;
9429 }
9430 } else if (const auto *RTy = PointeeTy->getAsCanonical<RecordType>()) {
9431 const IdentifierInfo *II = RTy->getDecl()->getIdentifier();
9432 // GCC binary compat: Need to convert "struct objc_class *" to "#".
9433 if (II == &Idents.get(Name: "objc_class")) {
9434 S += '#';
9435 return;
9436 }
9437 // GCC binary compat: Need to convert "struct objc_object *" to "@".
9438 if (II == &Idents.get(Name: "objc_object")) {
9439 S += '@';
9440 return;
9441 }
9442 // If the encoded string for the class includes template names, just emit
9443 // "^v" for pointers to the class.
9444 if (getLangOpts().CPlusPlus &&
9445 (!getLangOpts().EncodeCXXClassTemplateSpec &&
9446 hasTemplateSpecializationInEncodedString(
9447 T: RTy, VisitBasesAndFields: Options.ExpandPointedToStructures()))) {
9448 S += "^v";
9449 return;
9450 }
9451 // fall through...
9452 }
9453 S += '^';
9454 getLegacyIntegralTypeEncoding(PointeeTy);
9455
9456 ObjCEncOptions NewOptions;
9457 if (Options.ExpandPointedToStructures())
9458 NewOptions.setExpandStructures();
9459 getObjCEncodingForTypeImpl(T: PointeeTy, S, Options: NewOptions,
9460 /*Field=*/FD: nullptr, NotEncodedT);
9461 return;
9462 }
9463
9464 case Type::ConstantArray:
9465 case Type::IncompleteArray:
9466 case Type::VariableArray: {
9467 const auto *AT = cast<ArrayType>(Val&: CT);
9468
9469 if (isa<IncompleteArrayType>(Val: AT) && !Options.IsStructField()) {
9470 // Incomplete arrays are encoded as a pointer to the array element.
9471 S += '^';
9472
9473 getObjCEncodingForTypeImpl(
9474 T: AT->getElementType(), S,
9475 Options: Options.keepingOnly(Mask: ObjCEncOptions().setExpandStructures()), FD);
9476 } else {
9477 S += '[';
9478
9479 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: AT))
9480 S += llvm::utostr(X: CAT->getZExtSize());
9481 else {
9482 //Variable length arrays are encoded as a regular array with 0 elements.
9483 assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) &&
9484 "Unknown array type!");
9485 S += '0';
9486 }
9487
9488 getObjCEncodingForTypeImpl(
9489 T: AT->getElementType(), S,
9490 Options: Options.keepingOnly(Mask: ObjCEncOptions().setExpandStructures()), FD,
9491 NotEncodedT);
9492 S += ']';
9493 }
9494 return;
9495 }
9496
9497 case Type::FunctionNoProto:
9498 case Type::FunctionProto:
9499 S += '?';
9500 return;
9501
9502 case Type::Record: {
9503 RecordDecl *RDecl = cast<RecordType>(Val&: CT)->getDecl();
9504 S += RDecl->isUnion() ? '(' : '{';
9505 // Anonymous structures print as '?'
9506 if (const IdentifierInfo *II = RDecl->getIdentifier()) {
9507 S += II->getName();
9508 if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(Val: RDecl)) {
9509 const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
9510 llvm::raw_string_ostream OS(S);
9511 printTemplateArgumentList(OS, Args: TemplateArgs.asArray(),
9512 Policy: getPrintingPolicy());
9513 }
9514 } else {
9515 S += '?';
9516 }
9517 if (Options.ExpandStructures()) {
9518 S += '=';
9519 if (!RDecl->isUnion()) {
9520 getObjCEncodingForStructureImpl(RD: RDecl, S, Field: FD, includeVBases: true, NotEncodedT);
9521 } else {
9522 for (const auto *Field : RDecl->fields()) {
9523 if (FD) {
9524 S += '"';
9525 S += Field->getNameAsString();
9526 S += '"';
9527 }
9528
9529 // Special case bit-fields.
9530 if (Field->isBitField()) {
9531 getObjCEncodingForTypeImpl(T: Field->getType(), S,
9532 Options: ObjCEncOptions().setExpandStructures(),
9533 FD: Field);
9534 } else {
9535 QualType qt = Field->getType();
9536 getLegacyIntegralTypeEncoding(PointeeTy&: qt);
9537 getObjCEncodingForTypeImpl(
9538 T: qt, S,
9539 Options: ObjCEncOptions().setExpandStructures().setIsStructField(), FD,
9540 NotEncodedT);
9541 }
9542 }
9543 }
9544 }
9545 S += RDecl->isUnion() ? ')' : '}';
9546 return;
9547 }
9548
9549 case Type::BlockPointer: {
9550 const auto *BT = T->castAs<BlockPointerType>();
9551 S += "@?"; // Unlike a pointer-to-function, which is "^?".
9552 if (Options.EncodeBlockParameters()) {
9553 const auto *FT = BT->getPointeeType()->castAs<FunctionType>();
9554
9555 S += '<';
9556 // Block return type
9557 getObjCEncodingForTypeImpl(T: FT->getReturnType(), S,
9558 Options: Options.forComponentType(), FD, NotEncodedT);
9559 // Block self
9560 S += "@?";
9561 // Block parameters
9562 if (const auto *FPT = dyn_cast<FunctionProtoType>(Val: FT)) {
9563 for (const auto &I : FPT->param_types())
9564 getObjCEncodingForTypeImpl(T: I, S, Options: Options.forComponentType(), FD,
9565 NotEncodedT);
9566 }
9567 S += '>';
9568 }
9569 return;
9570 }
9571
9572 case Type::ObjCObject: {
9573 // hack to match legacy encoding of *id and *Class
9574 QualType Ty = getObjCObjectPointerType(ObjectT: CT);
9575 if (Ty->isObjCIdType()) {
9576 S += "{objc_object=}";
9577 return;
9578 }
9579 else if (Ty->isObjCClassType()) {
9580 S += "{objc_class=}";
9581 return;
9582 }
9583 // TODO: Double check to make sure this intentionally falls through.
9584 [[fallthrough]];
9585 }
9586
9587 case Type::ObjCInterface: {
9588 // Ignore protocol qualifiers when mangling at this level.
9589 // @encode(class_name)
9590 ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface();
9591 S += '{';
9592 S += OI->getObjCRuntimeNameAsString();
9593 if (Options.ExpandStructures()) {
9594 S += '=';
9595 SmallVector<const ObjCIvarDecl*, 32> Ivars;
9596 DeepCollectObjCIvars(OI, leafClass: true, Ivars);
9597 for (unsigned i = 0, e = Ivars.size(); i != e; ++i) {
9598 const FieldDecl *Field = Ivars[i];
9599 if (Field->isBitField())
9600 getObjCEncodingForTypeImpl(T: Field->getType(), S,
9601 Options: ObjCEncOptions().setExpandStructures(),
9602 FD: Field);
9603 else
9604 getObjCEncodingForTypeImpl(T: Field->getType(), S,
9605 Options: ObjCEncOptions().setExpandStructures(), FD,
9606 NotEncodedT);
9607 }
9608 }
9609 S += '}';
9610 return;
9611 }
9612
9613 case Type::ObjCObjectPointer: {
9614 const auto *OPT = T->castAs<ObjCObjectPointerType>();
9615 if (OPT->isObjCIdType()) {
9616 S += '@';
9617 return;
9618 }
9619
9620 if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) {
9621 // FIXME: Consider if we need to output qualifiers for 'Class<p>'.
9622 // Since this is a binary compatibility issue, need to consult with
9623 // runtime folks. Fortunately, this is a *very* obscure construct.
9624 S += '#';
9625 return;
9626 }
9627
9628 if (OPT->isObjCQualifiedIdType()) {
9629 getObjCEncodingForTypeImpl(
9630 T: getObjCIdType(), S,
9631 Options: Options.keepingOnly(Mask: ObjCEncOptions()
9632 .setExpandPointedToStructures()
9633 .setExpandStructures()),
9634 FD);
9635 if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) {
9636 // Note that we do extended encoding of protocol qualifier list
9637 // Only when doing ivar or property encoding.
9638 S += '"';
9639 for (const auto *I : OPT->quals()) {
9640 S += '<';
9641 S += I->getObjCRuntimeNameAsString();
9642 S += '>';
9643 }
9644 S += '"';
9645 }
9646 return;
9647 }
9648
9649 S += '@';
9650 if (OPT->getInterfaceDecl() &&
9651 (FD || Options.EncodingProperty() || Options.EncodeClassNames())) {
9652 S += '"';
9653 S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString();
9654 for (const auto *I : OPT->quals()) {
9655 S += '<';
9656 S += I->getObjCRuntimeNameAsString();
9657 S += '>';
9658 }
9659 S += '"';
9660 }
9661 return;
9662 }
9663
9664 // gcc just blithely ignores member pointers.
9665 // FIXME: we should do better than that. 'M' is available.
9666 case Type::MemberPointer:
9667 // This matches gcc's encoding, even though technically it is insufficient.
9668 //FIXME. We should do a better job than gcc.
9669 case Type::Vector:
9670 case Type::ExtVector:
9671 // Until we have a coherent encoding of these three types, issue warning.
9672 if (NotEncodedT)
9673 *NotEncodedT = T;
9674 return;
9675
9676 case Type::ConstantMatrix:
9677 if (NotEncodedT)
9678 *NotEncodedT = T;
9679 return;
9680
9681 case Type::BitInt:
9682 if (NotEncodedT)
9683 *NotEncodedT = T;
9684 return;
9685
9686 // We could see an undeduced auto type here during error recovery.
9687 // Just ignore it.
9688 case Type::Auto:
9689 case Type::DeducedTemplateSpecialization:
9690 return;
9691
9692 case Type::HLSLAttributedResource:
9693 case Type::HLSLInlineSpirv:
9694 case Type::OverflowBehavior:
9695 llvm_unreachable("unexpected type");
9696
9697 case Type::ArrayParameter:
9698 case Type::Pipe:
9699#define ABSTRACT_TYPE(KIND, BASE)
9700#define TYPE(KIND, BASE)
9701#define DEPENDENT_TYPE(KIND, BASE) \
9702 case Type::KIND:
9703#define NON_CANONICAL_TYPE(KIND, BASE) \
9704 case Type::KIND:
9705#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \
9706 case Type::KIND:
9707#include "clang/AST/TypeNodes.inc"
9708 llvm_unreachable("@encode for dependent type!");
9709 }
9710 llvm_unreachable("bad type kind!");
9711}
9712
9713void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
9714 std::string &S,
9715 const FieldDecl *FD,
9716 bool includeVBases,
9717 QualType *NotEncodedT) const {
9718 assert(RDecl && "Expected non-null RecordDecl");
9719 assert(!RDecl->isUnion() && "Should not be called for unions");
9720 if (!RDecl->getDefinition() || RDecl->getDefinition()->isInvalidDecl())
9721 return;
9722
9723 const auto *CXXRec = dyn_cast<CXXRecordDecl>(Val: RDecl);
9724 std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets;
9725 const ASTRecordLayout &layout = getASTRecordLayout(D: RDecl);
9726
9727 if (CXXRec) {
9728 for (const auto &BI : CXXRec->bases()) {
9729 if (!BI.isVirtual()) {
9730 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl();
9731 if (base->isEmpty())
9732 continue;
9733 uint64_t offs = toBits(CharSize: layout.getBaseClassOffset(Base: base));
9734 FieldOrBaseOffsets.insert(position: FieldOrBaseOffsets.upper_bound(x: offs),
9735 x: std::make_pair(x&: offs, y&: base));
9736 }
9737 }
9738 }
9739
9740 for (FieldDecl *Field : RDecl->fields()) {
9741 if (!Field->isZeroLengthBitField() && Field->isZeroSize(Ctx: *this))
9742 continue;
9743 uint64_t offs = layout.getFieldOffset(FieldNo: Field->getFieldIndex());
9744 FieldOrBaseOffsets.insert(position: FieldOrBaseOffsets.upper_bound(x: offs),
9745 x: std::make_pair(x&: offs, y&: Field));
9746 }
9747
9748 if (CXXRec && includeVBases) {
9749 for (const auto &BI : CXXRec->vbases()) {
9750 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl();
9751 if (base->isEmpty())
9752 continue;
9753 uint64_t offs = toBits(CharSize: layout.getVBaseClassOffset(VBase: base));
9754 if (offs >= uint64_t(toBits(CharSize: layout.getNonVirtualSize())) &&
9755 FieldOrBaseOffsets.find(x: offs) == FieldOrBaseOffsets.end())
9756 FieldOrBaseOffsets.insert(position: FieldOrBaseOffsets.end(),
9757 x: std::make_pair(x&: offs, y&: base));
9758 }
9759 }
9760
9761 CharUnits size;
9762 if (CXXRec) {
9763 size = includeVBases ? layout.getSize() : layout.getNonVirtualSize();
9764 } else {
9765 size = layout.getSize();
9766 }
9767
9768#ifndef NDEBUG
9769 uint64_t CurOffs = 0;
9770#endif
9771 std::multimap<uint64_t, NamedDecl *>::iterator
9772 CurLayObj = FieldOrBaseOffsets.begin();
9773
9774 if (CXXRec && CXXRec->isDynamicClass() &&
9775 (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) {
9776 if (FD) {
9777 S += "\"_vptr$";
9778 std::string recname = CXXRec->getNameAsString();
9779 if (recname.empty()) recname = "?";
9780 S += recname;
9781 S += '"';
9782 }
9783 S += "^^?";
9784#ifndef NDEBUG
9785 CurOffs += getTypeSize(VoidPtrTy);
9786#endif
9787 }
9788
9789 if (!RDecl->hasFlexibleArrayMember()) {
9790 // Mark the end of the structure.
9791 uint64_t offs = toBits(CharSize: size);
9792 FieldOrBaseOffsets.insert(position: FieldOrBaseOffsets.upper_bound(x: offs),
9793 x: std::make_pair(x&: offs, y: nullptr));
9794 }
9795
9796 for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) {
9797#ifndef NDEBUG
9798 assert(CurOffs <= CurLayObj->first);
9799 if (CurOffs < CurLayObj->first) {
9800 uint64_t padding = CurLayObj->first - CurOffs;
9801 // FIXME: There doesn't seem to be a way to indicate in the encoding that
9802 // packing/alignment of members is different that normal, in which case
9803 // the encoding will be out-of-sync with the real layout.
9804 // If the runtime switches to just consider the size of types without
9805 // taking into account alignment, we could make padding explicit in the
9806 // encoding (e.g. using arrays of chars). The encoding strings would be
9807 // longer then though.
9808 CurOffs += padding;
9809 }
9810#endif
9811
9812 NamedDecl *dcl = CurLayObj->second;
9813 if (!dcl)
9814 break; // reached end of structure.
9815
9816 if (auto *base = dyn_cast<CXXRecordDecl>(Val: dcl)) {
9817 // We expand the bases without their virtual bases since those are going
9818 // in the initial structure. Note that this differs from gcc which
9819 // expands virtual bases each time one is encountered in the hierarchy,
9820 // making the encoding type bigger than it really is.
9821 getObjCEncodingForStructureImpl(RDecl: base, S, FD, /*includeVBases*/false,
9822 NotEncodedT);
9823 assert(!base->isEmpty());
9824#ifndef NDEBUG
9825 CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize());
9826#endif
9827 } else {
9828 const auto *field = cast<FieldDecl>(Val: dcl);
9829 if (FD) {
9830 S += '"';
9831 S += field->getNameAsString();
9832 S += '"';
9833 }
9834
9835 if (field->isBitField()) {
9836 EncodeBitField(Ctx: this, S, T: field->getType(), FD: field);
9837#ifndef NDEBUG
9838 CurOffs += field->getBitWidthValue();
9839#endif
9840 } else {
9841 QualType qt = field->getType();
9842 getLegacyIntegralTypeEncoding(PointeeTy&: qt);
9843 getObjCEncodingForTypeImpl(
9844 T: qt, S, Options: ObjCEncOptions().setExpandStructures().setIsStructField(),
9845 FD, NotEncodedT);
9846#ifndef NDEBUG
9847 CurOffs += getTypeSize(field->getType());
9848#endif
9849 }
9850 }
9851 }
9852}
9853
9854void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT,
9855 std::string& S) const {
9856 if (QT & Decl::OBJC_TQ_In)
9857 S += 'n';
9858 if (QT & Decl::OBJC_TQ_Inout)
9859 S += 'N';
9860 if (QT & Decl::OBJC_TQ_Out)
9861 S += 'o';
9862 if (QT & Decl::OBJC_TQ_Bycopy)
9863 S += 'O';
9864 if (QT & Decl::OBJC_TQ_Byref)
9865 S += 'R';
9866 if (QT & Decl::OBJC_TQ_Oneway)
9867 S += 'V';
9868}
9869
9870TypedefDecl *ASTContext::getObjCIdDecl() const {
9871 if (!ObjCIdDecl) {
9872 QualType T = getObjCObjectType(BaseType: ObjCBuiltinIdTy, Protocols: {}, NumProtocols: {});
9873 T = getObjCObjectPointerType(ObjectT: T);
9874 ObjCIdDecl = buildImplicitTypedef(T, Name: "id");
9875 }
9876 return ObjCIdDecl;
9877}
9878
9879TypedefDecl *ASTContext::getObjCSelDecl() const {
9880 if (!ObjCSelDecl) {
9881 QualType T = getPointerType(T: ObjCBuiltinSelTy);
9882 ObjCSelDecl = buildImplicitTypedef(T, Name: "SEL");
9883 }
9884 return ObjCSelDecl;
9885}
9886
9887TypedefDecl *ASTContext::getObjCClassDecl() const {
9888 if (!ObjCClassDecl) {
9889 QualType T = getObjCObjectType(BaseType: ObjCBuiltinClassTy, Protocols: {}, NumProtocols: {});
9890 T = getObjCObjectPointerType(ObjectT: T);
9891 ObjCClassDecl = buildImplicitTypedef(T, Name: "Class");
9892 }
9893 return ObjCClassDecl;
9894}
9895
9896ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const {
9897 if (!ObjCProtocolClassDecl) {
9898 ObjCProtocolClassDecl
9899 = ObjCInterfaceDecl::Create(C: *this, DC: getTranslationUnitDecl(),
9900 atLoc: SourceLocation(),
9901 Id: &Idents.get(Name: "Protocol"),
9902 /*typeParamList=*/nullptr,
9903 /*PrevDecl=*/nullptr,
9904 ClassLoc: SourceLocation(), isInternal: true);
9905 }
9906
9907 return ObjCProtocolClassDecl;
9908}
9909
9910PointerAuthQualifier ASTContext::getObjCMemberSelTypePtrAuth() {
9911 if (!getLangOpts().PointerAuthObjcInterfaceSel)
9912 return PointerAuthQualifier();
9913 return PointerAuthQualifier::Create(
9914 Key: getLangOpts().PointerAuthObjcInterfaceSelKey,
9915 /*isAddressDiscriminated=*/IsAddressDiscriminated: true, ExtraDiscriminator: SelPointerConstantDiscriminator,
9916 AuthenticationMode: PointerAuthenticationMode::SignAndAuth,
9917 /*isIsaPointer=*/IsIsaPointer: false,
9918 /*authenticatesNullValues=*/AuthenticatesNullValues: false);
9919}
9920
9921//===----------------------------------------------------------------------===//
9922// __builtin_va_list Construction Functions
9923//===----------------------------------------------------------------------===//
9924
9925static TypedefDecl *CreateCharPtrNamedVaListDecl(const ASTContext *Context,
9926 StringRef Name) {
9927 // typedef char* __builtin[_ms]_va_list;
9928 QualType T = Context->getPointerType(T: Context->CharTy);
9929 return Context->buildImplicitTypedef(T, Name);
9930}
9931
9932static TypedefDecl *CreateMSVaListDecl(const ASTContext *Context) {
9933 return CreateCharPtrNamedVaListDecl(Context, Name: "__builtin_ms_va_list");
9934}
9935
9936static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) {
9937 return CreateCharPtrNamedVaListDecl(Context, Name: "__builtin_va_list");
9938}
9939
9940static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) {
9941 // typedef void* __builtin_va_list;
9942 QualType T = Context->getPointerType(T: Context->VoidTy);
9943 return Context->buildImplicitTypedef(T, Name: "__builtin_va_list");
9944}
9945
9946static TypedefDecl *
9947CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) {
9948 // struct __va_list
9949 RecordDecl *VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list");
9950 if (Context->getLangOpts().CPlusPlus) {
9951 // namespace std { struct __va_list {
9952 auto *NS = NamespaceDecl::Create(
9953 C&: const_cast<ASTContext &>(*Context), DC: Context->getTranslationUnitDecl(),
9954 /*Inline=*/false, StartLoc: SourceLocation(), IdLoc: SourceLocation(),
9955 Id: &Context->Idents.get(Name: "std"),
9956 /*PrevDecl=*/nullptr, /*Nested=*/false);
9957 NS->setImplicit();
9958 VaListTagDecl->setDeclContext(NS);
9959 }
9960
9961 VaListTagDecl->startDefinition();
9962
9963 const size_t NumFields = 5;
9964 QualType FieldTypes[NumFields];
9965 const char *FieldNames[NumFields];
9966
9967 // void *__stack;
9968 FieldTypes[0] = Context->getPointerType(T: Context->VoidTy);
9969 FieldNames[0] = "__stack";
9970
9971 // void *__gr_top;
9972 FieldTypes[1] = Context->getPointerType(T: Context->VoidTy);
9973 FieldNames[1] = "__gr_top";
9974
9975 // void *__vr_top;
9976 FieldTypes[2] = Context->getPointerType(T: Context->VoidTy);
9977 FieldNames[2] = "__vr_top";
9978
9979 // int __gr_offs;
9980 FieldTypes[3] = Context->IntTy;
9981 FieldNames[3] = "__gr_offs";
9982
9983 // int __vr_offs;
9984 FieldTypes[4] = Context->IntTy;
9985 FieldNames[4] = "__vr_offs";
9986
9987 // Create fields
9988 for (unsigned i = 0; i < NumFields; ++i) {
9989 FieldDecl *Field = FieldDecl::Create(C: const_cast<ASTContext &>(*Context),
9990 DC: VaListTagDecl,
9991 StartLoc: SourceLocation(),
9992 IdLoc: SourceLocation(),
9993 Id: &Context->Idents.get(Name: FieldNames[i]),
9994 T: FieldTypes[i], /*TInfo=*/nullptr,
9995 /*BitWidth=*/BW: nullptr,
9996 /*Mutable=*/false,
9997 InitStyle: ICIS_NoInit);
9998 Field->setAccess(AS_public);
9999 VaListTagDecl->addDecl(D: Field);
10000 }
10001 VaListTagDecl->completeDefinition();
10002 Context->VaListTagDecl = VaListTagDecl;
10003 CanQualType VaListTagType = Context->getCanonicalTagType(TD: VaListTagDecl);
10004
10005 // } __builtin_va_list;
10006 return Context->buildImplicitTypedef(T: VaListTagType, Name: "__builtin_va_list");
10007}
10008
10009static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) {
10010 // typedef struct __va_list_tag {
10011 RecordDecl *VaListTagDecl;
10012
10013 VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
10014 VaListTagDecl->startDefinition();
10015
10016 const size_t NumFields = 5;
10017 QualType FieldTypes[NumFields];
10018 const char *FieldNames[NumFields];
10019
10020 // unsigned char gpr;
10021 FieldTypes[0] = Context->UnsignedCharTy;
10022 FieldNames[0] = "gpr";
10023
10024 // unsigned char fpr;
10025 FieldTypes[1] = Context->UnsignedCharTy;
10026 FieldNames[1] = "fpr";
10027
10028 // unsigned short reserved;
10029 FieldTypes[2] = Context->UnsignedShortTy;
10030 FieldNames[2] = "reserved";
10031
10032 // void* overflow_arg_area;
10033 FieldTypes[3] = Context->getPointerType(T: Context->VoidTy);
10034 FieldNames[3] = "overflow_arg_area";
10035
10036 // void* reg_save_area;
10037 FieldTypes[4] = Context->getPointerType(T: Context->VoidTy);
10038 FieldNames[4] = "reg_save_area";
10039
10040 // Create fields
10041 for (unsigned i = 0; i < NumFields; ++i) {
10042 FieldDecl *Field = FieldDecl::Create(C: *Context, DC: VaListTagDecl,
10043 StartLoc: SourceLocation(),
10044 IdLoc: SourceLocation(),
10045 Id: &Context->Idents.get(Name: FieldNames[i]),
10046 T: FieldTypes[i], /*TInfo=*/nullptr,
10047 /*BitWidth=*/BW: nullptr,
10048 /*Mutable=*/false,
10049 InitStyle: ICIS_NoInit);
10050 Field->setAccess(AS_public);
10051 VaListTagDecl->addDecl(D: Field);
10052 }
10053 VaListTagDecl->completeDefinition();
10054 Context->VaListTagDecl = VaListTagDecl;
10055 CanQualType VaListTagType = Context->getCanonicalTagType(TD: VaListTagDecl);
10056
10057 // } __va_list_tag;
10058 TypedefDecl *VaListTagTypedefDecl =
10059 Context->buildImplicitTypedef(T: VaListTagType, Name: "__va_list_tag");
10060
10061 QualType VaListTagTypedefType =
10062 Context->getTypedefType(Keyword: ElaboratedTypeKeyword::None,
10063 /*Qualifier=*/std::nullopt, Decl: VaListTagTypedefDecl);
10064
10065 // typedef __va_list_tag __builtin_va_list[1];
10066 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 1);
10067 QualType VaListTagArrayType = Context->getConstantArrayType(
10068 EltTy: VaListTagTypedefType, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
10069 return Context->buildImplicitTypedef(T: VaListTagArrayType, Name: "__builtin_va_list");
10070}
10071
10072static TypedefDecl *
10073CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) {
10074 // struct __va_list_tag {
10075 RecordDecl *VaListTagDecl;
10076 VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
10077 VaListTagDecl->startDefinition();
10078
10079 const size_t NumFields = 4;
10080 QualType FieldTypes[NumFields];
10081 const char *FieldNames[NumFields];
10082
10083 // unsigned gp_offset;
10084 FieldTypes[0] = Context->UnsignedIntTy;
10085 FieldNames[0] = "gp_offset";
10086
10087 // unsigned fp_offset;
10088 FieldTypes[1] = Context->UnsignedIntTy;
10089 FieldNames[1] = "fp_offset";
10090
10091 // void* overflow_arg_area;
10092 FieldTypes[2] = Context->getPointerType(T: Context->VoidTy);
10093 FieldNames[2] = "overflow_arg_area";
10094
10095 // void* reg_save_area;
10096 FieldTypes[3] = Context->getPointerType(T: Context->VoidTy);
10097 FieldNames[3] = "reg_save_area";
10098
10099 // Create fields
10100 for (unsigned i = 0; i < NumFields; ++i) {
10101 FieldDecl *Field = FieldDecl::Create(C: const_cast<ASTContext &>(*Context),
10102 DC: VaListTagDecl,
10103 StartLoc: SourceLocation(),
10104 IdLoc: SourceLocation(),
10105 Id: &Context->Idents.get(Name: FieldNames[i]),
10106 T: FieldTypes[i], /*TInfo=*/nullptr,
10107 /*BitWidth=*/BW: nullptr,
10108 /*Mutable=*/false,
10109 InitStyle: ICIS_NoInit);
10110 Field->setAccess(AS_public);
10111 VaListTagDecl->addDecl(D: Field);
10112 }
10113 VaListTagDecl->completeDefinition();
10114 Context->VaListTagDecl = VaListTagDecl;
10115 CanQualType VaListTagType = Context->getCanonicalTagType(TD: VaListTagDecl);
10116
10117 // };
10118
10119 // typedef struct __va_list_tag __builtin_va_list[1];
10120 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 1);
10121 QualType VaListTagArrayType = Context->getConstantArrayType(
10122 EltTy: VaListTagType, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
10123 return Context->buildImplicitTypedef(T: VaListTagArrayType, Name: "__builtin_va_list");
10124}
10125
10126static TypedefDecl *
10127CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) {
10128 // struct __va_list
10129 RecordDecl *VaListDecl = Context->buildImplicitRecord(Name: "__va_list");
10130 if (Context->getLangOpts().CPlusPlus) {
10131 // namespace std { struct __va_list {
10132 NamespaceDecl *NS;
10133 NS = NamespaceDecl::Create(C&: const_cast<ASTContext &>(*Context),
10134 DC: Context->getTranslationUnitDecl(),
10135 /*Inline=*/false, StartLoc: SourceLocation(),
10136 IdLoc: SourceLocation(), Id: &Context->Idents.get(Name: "std"),
10137 /*PrevDecl=*/nullptr, /*Nested=*/false);
10138 NS->setImplicit();
10139 VaListDecl->setDeclContext(NS);
10140 }
10141
10142 VaListDecl->startDefinition();
10143
10144 // void * __ap;
10145 FieldDecl *Field = FieldDecl::Create(C: const_cast<ASTContext &>(*Context),
10146 DC: VaListDecl,
10147 StartLoc: SourceLocation(),
10148 IdLoc: SourceLocation(),
10149 Id: &Context->Idents.get(Name: "__ap"),
10150 T: Context->getPointerType(T: Context->VoidTy),
10151 /*TInfo=*/nullptr,
10152 /*BitWidth=*/BW: nullptr,
10153 /*Mutable=*/false,
10154 InitStyle: ICIS_NoInit);
10155 Field->setAccess(AS_public);
10156 VaListDecl->addDecl(D: Field);
10157
10158 // };
10159 VaListDecl->completeDefinition();
10160 Context->VaListTagDecl = VaListDecl;
10161
10162 // typedef struct __va_list __builtin_va_list;
10163 CanQualType T = Context->getCanonicalTagType(TD: VaListDecl);
10164 return Context->buildImplicitTypedef(T, Name: "__builtin_va_list");
10165}
10166
10167static TypedefDecl *
10168CreateSystemZBuiltinVaListDecl(const ASTContext *Context) {
10169 // struct __va_list_tag {
10170 RecordDecl *VaListTagDecl;
10171 VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
10172 VaListTagDecl->startDefinition();
10173
10174 const size_t NumFields = 4;
10175 QualType FieldTypes[NumFields];
10176 const char *FieldNames[NumFields];
10177
10178 // long __gpr;
10179 FieldTypes[0] = Context->LongTy;
10180 FieldNames[0] = "__gpr";
10181
10182 // long __fpr;
10183 FieldTypes[1] = Context->LongTy;
10184 FieldNames[1] = "__fpr";
10185
10186 // void *__overflow_arg_area;
10187 FieldTypes[2] = Context->getPointerType(T: Context->VoidTy);
10188 FieldNames[2] = "__overflow_arg_area";
10189
10190 // void *__reg_save_area;
10191 FieldTypes[3] = Context->getPointerType(T: Context->VoidTy);
10192 FieldNames[3] = "__reg_save_area";
10193
10194 // Create fields
10195 for (unsigned i = 0; i < NumFields; ++i) {
10196 FieldDecl *Field = FieldDecl::Create(C: const_cast<ASTContext &>(*Context),
10197 DC: VaListTagDecl,
10198 StartLoc: SourceLocation(),
10199 IdLoc: SourceLocation(),
10200 Id: &Context->Idents.get(Name: FieldNames[i]),
10201 T: FieldTypes[i], /*TInfo=*/nullptr,
10202 /*BitWidth=*/BW: nullptr,
10203 /*Mutable=*/false,
10204 InitStyle: ICIS_NoInit);
10205 Field->setAccess(AS_public);
10206 VaListTagDecl->addDecl(D: Field);
10207 }
10208 VaListTagDecl->completeDefinition();
10209 Context->VaListTagDecl = VaListTagDecl;
10210 CanQualType VaListTagType = Context->getCanonicalTagType(TD: VaListTagDecl);
10211
10212 // };
10213
10214 // typedef __va_list_tag __builtin_va_list[1];
10215 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 1);
10216 QualType VaListTagArrayType = Context->getConstantArrayType(
10217 EltTy: VaListTagType, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
10218
10219 return Context->buildImplicitTypedef(T: VaListTagArrayType, Name: "__builtin_va_list");
10220}
10221
10222static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) {
10223 // typedef struct __va_list_tag {
10224 RecordDecl *VaListTagDecl;
10225 VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
10226 VaListTagDecl->startDefinition();
10227
10228 const size_t NumFields = 3;
10229 QualType FieldTypes[NumFields];
10230 const char *FieldNames[NumFields];
10231
10232 // void *CurrentSavedRegisterArea;
10233 FieldTypes[0] = Context->getPointerType(T: Context->VoidTy);
10234 FieldNames[0] = "__current_saved_reg_area_pointer";
10235
10236 // void *SavedRegAreaEnd;
10237 FieldTypes[1] = Context->getPointerType(T: Context->VoidTy);
10238 FieldNames[1] = "__saved_reg_area_end_pointer";
10239
10240 // void *OverflowArea;
10241 FieldTypes[2] = Context->getPointerType(T: Context->VoidTy);
10242 FieldNames[2] = "__overflow_area_pointer";
10243
10244 // Create fields
10245 for (unsigned i = 0; i < NumFields; ++i) {
10246 FieldDecl *Field = FieldDecl::Create(
10247 C: const_cast<ASTContext &>(*Context), DC: VaListTagDecl, StartLoc: SourceLocation(),
10248 IdLoc: SourceLocation(), Id: &Context->Idents.get(Name: FieldNames[i]), T: FieldTypes[i],
10249 /*TInfo=*/nullptr,
10250 /*BitWidth=*/BW: nullptr,
10251 /*Mutable=*/false, InitStyle: ICIS_NoInit);
10252 Field->setAccess(AS_public);
10253 VaListTagDecl->addDecl(D: Field);
10254 }
10255 VaListTagDecl->completeDefinition();
10256 Context->VaListTagDecl = VaListTagDecl;
10257 CanQualType VaListTagType = Context->getCanonicalTagType(TD: VaListTagDecl);
10258
10259 // } __va_list_tag;
10260 TypedefDecl *VaListTagTypedefDecl =
10261 Context->buildImplicitTypedef(T: VaListTagType, Name: "__va_list_tag");
10262
10263 QualType VaListTagTypedefType =
10264 Context->getTypedefType(Keyword: ElaboratedTypeKeyword::None,
10265 /*Qualifier=*/std::nullopt, Decl: VaListTagTypedefDecl);
10266
10267 // typedef __va_list_tag __builtin_va_list[1];
10268 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 1);
10269 QualType VaListTagArrayType = Context->getConstantArrayType(
10270 EltTy: VaListTagTypedefType, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
10271
10272 return Context->buildImplicitTypedef(T: VaListTagArrayType, Name: "__builtin_va_list");
10273}
10274
10275static TypedefDecl *
10276CreateXtensaABIBuiltinVaListDecl(const ASTContext *Context) {
10277 // typedef struct __va_list_tag {
10278 RecordDecl *VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
10279
10280 VaListTagDecl->startDefinition();
10281
10282 // int* __va_stk;
10283 // int* __va_reg;
10284 // int __va_ndx;
10285 constexpr size_t NumFields = 3;
10286 QualType FieldTypes[NumFields] = {Context->getPointerType(T: Context->IntTy),
10287 Context->getPointerType(T: Context->IntTy),
10288 Context->IntTy};
10289 const char *FieldNames[NumFields] = {"__va_stk", "__va_reg", "__va_ndx"};
10290
10291 // Create fields
10292 for (unsigned i = 0; i < NumFields; ++i) {
10293 FieldDecl *Field = FieldDecl::Create(
10294 C: *Context, DC: VaListTagDecl, StartLoc: SourceLocation(), IdLoc: SourceLocation(),
10295 Id: &Context->Idents.get(Name: FieldNames[i]), T: FieldTypes[i], /*TInfo=*/nullptr,
10296 /*BitWidth=*/BW: nullptr,
10297 /*Mutable=*/false, InitStyle: ICIS_NoInit);
10298 Field->setAccess(AS_public);
10299 VaListTagDecl->addDecl(D: Field);
10300 }
10301 VaListTagDecl->completeDefinition();
10302 Context->VaListTagDecl = VaListTagDecl;
10303 CanQualType VaListTagType = Context->getCanonicalTagType(TD: VaListTagDecl);
10304
10305 // } __va_list_tag;
10306 TypedefDecl *VaListTagTypedefDecl =
10307 Context->buildImplicitTypedef(T: VaListTagType, Name: "__builtin_va_list");
10308
10309 return VaListTagTypedefDecl;
10310}
10311
10312static TypedefDecl *CreateVaListDecl(const ASTContext *Context,
10313 TargetInfo::BuiltinVaListKind Kind) {
10314 switch (Kind) {
10315 case TargetInfo::CharPtrBuiltinVaList:
10316 return CreateCharPtrBuiltinVaListDecl(Context);
10317 case TargetInfo::VoidPtrBuiltinVaList:
10318 return CreateVoidPtrBuiltinVaListDecl(Context);
10319 case TargetInfo::AArch64ABIBuiltinVaList:
10320 return CreateAArch64ABIBuiltinVaListDecl(Context);
10321 case TargetInfo::PowerABIBuiltinVaList:
10322 return CreatePowerABIBuiltinVaListDecl(Context);
10323 case TargetInfo::X86_64ABIBuiltinVaList:
10324 return CreateX86_64ABIBuiltinVaListDecl(Context);
10325 case TargetInfo::AAPCSABIBuiltinVaList:
10326 return CreateAAPCSABIBuiltinVaListDecl(Context);
10327 case TargetInfo::SystemZBuiltinVaList:
10328 return CreateSystemZBuiltinVaListDecl(Context);
10329 case TargetInfo::HexagonBuiltinVaList:
10330 return CreateHexagonBuiltinVaListDecl(Context);
10331 case TargetInfo::XtensaABIBuiltinVaList:
10332 return CreateXtensaABIBuiltinVaListDecl(Context);
10333 }
10334
10335 llvm_unreachable("Unhandled __builtin_va_list type kind");
10336}
10337
10338TypedefDecl *ASTContext::getBuiltinVaListDecl() const {
10339 if (!BuiltinVaListDecl) {
10340 BuiltinVaListDecl = CreateVaListDecl(Context: this, Kind: Target->getBuiltinVaListKind());
10341 assert(BuiltinVaListDecl->isImplicit());
10342 }
10343
10344 return BuiltinVaListDecl;
10345}
10346
10347Decl *ASTContext::getVaListTagDecl() const {
10348 // Force the creation of VaListTagDecl by building the __builtin_va_list
10349 // declaration.
10350 if (!VaListTagDecl)
10351 (void)getBuiltinVaListDecl();
10352
10353 return VaListTagDecl;
10354}
10355
10356TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const {
10357 if (!BuiltinMSVaListDecl)
10358 BuiltinMSVaListDecl = CreateMSVaListDecl(Context: this);
10359
10360 return BuiltinMSVaListDecl;
10361}
10362
10363bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const {
10364 // Allow redecl custom type checking builtin for HLSL.
10365 if (LangOpts.HLSL && FD->getBuiltinID() != Builtin::NotBuiltin &&
10366 BuiltinInfo.hasCustomTypechecking(ID: FD->getBuiltinID()))
10367 return true;
10368 // Allow redecl custom type checking builtin for SPIR-V.
10369 if (getTargetInfo().getTriple().isSPIROrSPIRV() &&
10370 BuiltinInfo.isTSBuiltin(ID: FD->getBuiltinID()) &&
10371 BuiltinInfo.hasCustomTypechecking(ID: FD->getBuiltinID()))
10372 return true;
10373 return BuiltinInfo.canBeRedeclared(ID: FD->getBuiltinID());
10374}
10375
10376void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) {
10377 assert(ObjCConstantStringType.isNull() &&
10378 "'NSConstantString' type already set!");
10379
10380 ObjCConstantStringType = getObjCInterfaceType(Decl);
10381}
10382
10383/// Retrieve the template name that corresponds to a non-empty
10384/// lookup.
10385TemplateName
10386ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin,
10387 UnresolvedSetIterator End) const {
10388 unsigned size = End - Begin;
10389 assert(size > 1 && "set is not overloaded!");
10390
10391 void *memory = Allocate(Size: sizeof(OverloadedTemplateStorage) +
10392 size * sizeof(FunctionTemplateDecl*));
10393 auto *OT = new (memory) OverloadedTemplateStorage(size);
10394
10395 NamedDecl **Storage = OT->getStorage();
10396 for (UnresolvedSetIterator I = Begin; I != End; ++I) {
10397 NamedDecl *D = *I;
10398 assert(isa<FunctionTemplateDecl>(D) ||
10399 isa<UnresolvedUsingValueDecl>(D) ||
10400 (isa<UsingShadowDecl>(D) &&
10401 isa<FunctionTemplateDecl>(D->getUnderlyingDecl())));
10402 *Storage++ = D;
10403 }
10404
10405 return TemplateName(OT);
10406}
10407
10408/// Retrieve a template name representing an unqualified-id that has been
10409/// assumed to name a template for ADL purposes.
10410TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const {
10411 auto *OT = new (*this) AssumedTemplateStorage(Name);
10412 return TemplateName(OT);
10413}
10414
10415/// Retrieve the template name that represents a qualified
10416/// template name such as \c std::vector.
10417TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier Qualifier,
10418 bool TemplateKeyword,
10419 TemplateName Template) const {
10420 assert(Template.getKind() == TemplateName::Template ||
10421 Template.getKind() == TemplateName::UsingTemplate);
10422
10423 if (Template.getAsTemplateDecl()->getKind() == Decl::TemplateTemplateParm) {
10424 assert(!Qualifier && "unexpected qualified template template parameter");
10425 assert(TemplateKeyword == false);
10426 return Template;
10427 }
10428
10429 // FIXME: Canonicalization?
10430 llvm::FoldingSetNodeID ID;
10431 QualifiedTemplateName::Profile(ID, NNS: Qualifier, TemplateKeyword, TN: Template);
10432
10433 void *InsertPos = nullptr;
10434 QualifiedTemplateName *QTN =
10435 QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
10436 if (!QTN) {
10437 QTN = new (*this, alignof(QualifiedTemplateName))
10438 QualifiedTemplateName(Qualifier, TemplateKeyword, Template);
10439 QualifiedTemplateNames.InsertNode(N: QTN, InsertPos);
10440 }
10441
10442 return TemplateName(QTN);
10443}
10444
10445/// Retrieve the template name that represents a dependent
10446/// template name such as \c MetaFun::template operator+.
10447TemplateName
10448ASTContext::getDependentTemplateName(const DependentTemplateStorage &S) const {
10449 llvm::FoldingSetNodeID ID;
10450 S.Profile(ID);
10451
10452 void *InsertPos = nullptr;
10453 if (DependentTemplateName *QTN =
10454 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos))
10455 return TemplateName(QTN);
10456
10457 DependentTemplateName *QTN =
10458 new (*this, alignof(DependentTemplateName)) DependentTemplateName(S);
10459 DependentTemplateNames.InsertNode(N: QTN, InsertPos);
10460 return TemplateName(QTN);
10461}
10462
10463TemplateName ASTContext::getSubstTemplateTemplateParm(TemplateName Replacement,
10464 Decl *AssociatedDecl,
10465 unsigned Index,
10466 UnsignedOrNone PackIndex,
10467 bool Final) const {
10468 llvm::FoldingSetNodeID ID;
10469 SubstTemplateTemplateParmStorage::Profile(ID, Replacement, AssociatedDecl,
10470 Index, PackIndex, Final);
10471
10472 void *insertPos = nullptr;
10473 SubstTemplateTemplateParmStorage *subst
10474 = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
10475
10476 if (!subst) {
10477 subst = new (*this) SubstTemplateTemplateParmStorage(
10478 Replacement, AssociatedDecl, Index, PackIndex, Final);
10479 SubstTemplateTemplateParms.InsertNode(N: subst, InsertPos: insertPos);
10480 }
10481
10482 return TemplateName(subst);
10483}
10484
10485TemplateName
10486ASTContext::getSubstTemplateTemplateParmPack(const TemplateArgument &ArgPack,
10487 Decl *AssociatedDecl,
10488 unsigned Index, bool Final) const {
10489 auto &Self = const_cast<ASTContext &>(*this);
10490 llvm::FoldingSetNodeID ID;
10491 SubstTemplateTemplateParmPackStorage::Profile(ID, Context&: Self, ArgPack,
10492 AssociatedDecl, Index, Final);
10493
10494 void *InsertPos = nullptr;
10495 SubstTemplateTemplateParmPackStorage *Subst
10496 = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos);
10497
10498 if (!Subst) {
10499 Subst = new (*this) SubstTemplateTemplateParmPackStorage(
10500 ArgPack.pack_elements(), AssociatedDecl, Index, Final);
10501 SubstTemplateTemplateParmPacks.InsertNode(N: Subst, InsertPos);
10502 }
10503
10504 return TemplateName(Subst);
10505}
10506
10507/// Retrieve the template name that represents a template name
10508/// deduced from a specialization.
10509TemplateName
10510ASTContext::getDeducedTemplateName(TemplateName Underlying,
10511 DefaultArguments DefaultArgs) const {
10512 if (!DefaultArgs)
10513 return Underlying;
10514
10515 llvm::FoldingSetNodeID ID;
10516 DeducedTemplateStorage::Profile(ID, Context: *this, Underlying, DefArgs: DefaultArgs);
10517
10518 void *InsertPos = nullptr;
10519 DeducedTemplateStorage *DTS =
10520 DeducedTemplates.FindNodeOrInsertPos(ID, InsertPos);
10521 if (!DTS) {
10522 void *Mem = Allocate(Size: sizeof(DeducedTemplateStorage) +
10523 sizeof(TemplateArgument) * DefaultArgs.Args.size(),
10524 Align: alignof(DeducedTemplateStorage));
10525 DTS = new (Mem) DeducedTemplateStorage(Underlying, DefaultArgs);
10526 DeducedTemplates.InsertNode(N: DTS, InsertPos);
10527 }
10528 return TemplateName(DTS);
10529}
10530
10531/// getFromTargetType - Given one of the integer types provided by
10532/// TargetInfo, produce the corresponding type. The unsigned @p Type
10533/// is actually a value of type @c TargetInfo::IntType.
10534CanQualType ASTContext::getFromTargetType(unsigned Type) const {
10535 switch (Type) {
10536 case TargetInfo::NoInt: return {};
10537 case TargetInfo::SignedChar: return SignedCharTy;
10538 case TargetInfo::UnsignedChar: return UnsignedCharTy;
10539 case TargetInfo::SignedShort: return ShortTy;
10540 case TargetInfo::UnsignedShort: return UnsignedShortTy;
10541 case TargetInfo::SignedInt: return IntTy;
10542 case TargetInfo::UnsignedInt: return UnsignedIntTy;
10543 case TargetInfo::SignedLong: return LongTy;
10544 case TargetInfo::UnsignedLong: return UnsignedLongTy;
10545 case TargetInfo::SignedLongLong: return LongLongTy;
10546 case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy;
10547 }
10548
10549 llvm_unreachable("Unhandled TargetInfo::IntType value");
10550}
10551
10552//===----------------------------------------------------------------------===//
10553// Type Predicates.
10554//===----------------------------------------------------------------------===//
10555
10556/// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's
10557/// garbage collection attribute.
10558///
10559Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const {
10560 if (getLangOpts().getGC() == LangOptions::NonGC)
10561 return Qualifiers::GCNone;
10562
10563 assert(getLangOpts().ObjC);
10564 Qualifiers::GC GCAttrs = Ty.getObjCGCAttr();
10565
10566 // Default behaviour under objective-C's gc is for ObjC pointers
10567 // (or pointers to them) be treated as though they were declared
10568 // as __strong.
10569 if (GCAttrs == Qualifiers::GCNone) {
10570 if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType())
10571 return Qualifiers::Strong;
10572 else if (Ty->isPointerType())
10573 return getObjCGCAttrKind(Ty: Ty->castAs<PointerType>()->getPointeeType());
10574 } else {
10575 // It's not valid to set GC attributes on anything that isn't a
10576 // pointer.
10577#ifndef NDEBUG
10578 QualType CT = Ty->getCanonicalTypeInternal();
10579 while (const auto *AT = dyn_cast<ArrayType>(CT))
10580 CT = AT->getElementType();
10581 assert(CT->isAnyPointerType() || CT->isBlockPointerType());
10582#endif
10583 }
10584 return GCAttrs;
10585}
10586
10587//===----------------------------------------------------------------------===//
10588// Type Compatibility Testing
10589//===----------------------------------------------------------------------===//
10590
10591/// areCompatVectorTypes - Return true if the two specified vector types are
10592/// compatible.
10593static bool areCompatVectorTypes(const VectorType *LHS,
10594 const VectorType *RHS) {
10595 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified());
10596 return LHS->getElementType() == RHS->getElementType() &&
10597 LHS->getNumElements() == RHS->getNumElements();
10598}
10599
10600/// areCompatMatrixTypes - Return true if the two specified matrix types are
10601/// compatible.
10602static bool areCompatMatrixTypes(const ConstantMatrixType *LHS,
10603 const ConstantMatrixType *RHS) {
10604 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified());
10605 return LHS->getElementType() == RHS->getElementType() &&
10606 LHS->getNumRows() == RHS->getNumRows() &&
10607 LHS->getNumColumns() == RHS->getNumColumns();
10608}
10609
10610bool ASTContext::areCompatibleVectorTypes(QualType FirstVec,
10611 QualType SecondVec) {
10612 assert(FirstVec->isVectorType() && "FirstVec should be a vector type");
10613 assert(SecondVec->isVectorType() && "SecondVec should be a vector type");
10614
10615 if (hasSameUnqualifiedType(T1: FirstVec, T2: SecondVec))
10616 return true;
10617
10618 // Treat Neon vector types and most AltiVec vector types as if they are the
10619 // equivalent GCC vector types.
10620 const auto *First = FirstVec->castAs<VectorType>();
10621 const auto *Second = SecondVec->castAs<VectorType>();
10622 if (First->getNumElements() == Second->getNumElements() &&
10623 hasSameType(T1: First->getElementType(), T2: Second->getElementType()) &&
10624 First->getVectorKind() != VectorKind::AltiVecPixel &&
10625 First->getVectorKind() != VectorKind::AltiVecBool &&
10626 Second->getVectorKind() != VectorKind::AltiVecPixel &&
10627 Second->getVectorKind() != VectorKind::AltiVecBool &&
10628 First->getVectorKind() != VectorKind::SveFixedLengthData &&
10629 First->getVectorKind() != VectorKind::SveFixedLengthPredicate &&
10630 Second->getVectorKind() != VectorKind::SveFixedLengthData &&
10631 Second->getVectorKind() != VectorKind::SveFixedLengthPredicate &&
10632 First->getVectorKind() != VectorKind::RVVFixedLengthData &&
10633 Second->getVectorKind() != VectorKind::RVVFixedLengthData &&
10634 First->getVectorKind() != VectorKind::RVVFixedLengthMask &&
10635 Second->getVectorKind() != VectorKind::RVVFixedLengthMask &&
10636 First->getVectorKind() != VectorKind::RVVFixedLengthMask_1 &&
10637 Second->getVectorKind() != VectorKind::RVVFixedLengthMask_1 &&
10638 First->getVectorKind() != VectorKind::RVVFixedLengthMask_2 &&
10639 Second->getVectorKind() != VectorKind::RVVFixedLengthMask_2 &&
10640 First->getVectorKind() != VectorKind::RVVFixedLengthMask_4 &&
10641 Second->getVectorKind() != VectorKind::RVVFixedLengthMask_4)
10642 return true;
10643
10644 // In OpenCL, treat half and _Float16 vector types as compatible.
10645 if (getLangOpts().OpenCL &&
10646 First->getNumElements() == Second->getNumElements()) {
10647 QualType FirstElt = First->getElementType();
10648 QualType SecondElt = Second->getElementType();
10649
10650 if ((FirstElt->isFloat16Type() && SecondElt->isHalfType()) ||
10651 (FirstElt->isHalfType() && SecondElt->isFloat16Type())) {
10652 if (First->getVectorKind() != VectorKind::AltiVecPixel &&
10653 First->getVectorKind() != VectorKind::AltiVecBool &&
10654 Second->getVectorKind() != VectorKind::AltiVecPixel &&
10655 Second->getVectorKind() != VectorKind::AltiVecBool)
10656 return true;
10657 }
10658 }
10659 return false;
10660}
10661
10662bool ASTContext::areCompatibleOverflowBehaviorTypes(QualType LHS,
10663 QualType RHS) {
10664 auto Result = checkOBTAssignmentCompatibility(LHS, RHS);
10665 return Result != OBTAssignResult::IncompatibleKinds;
10666}
10667
10668ASTContext::OBTAssignResult
10669ASTContext::checkOBTAssignmentCompatibility(QualType LHS, QualType RHS) {
10670 const auto *LHSOBT = LHS->getAs<OverflowBehaviorType>();
10671 const auto *RHSOBT = RHS->getAs<OverflowBehaviorType>();
10672
10673 if (!LHSOBT && !RHSOBT)
10674 return OBTAssignResult::Compatible;
10675
10676 if (LHSOBT && RHSOBT) {
10677 if (LHSOBT->getBehaviorKind() != RHSOBT->getBehaviorKind())
10678 return OBTAssignResult::IncompatibleKinds;
10679 return OBTAssignResult::Compatible;
10680 }
10681
10682 QualType LHSUnderlying = LHSOBT ? LHSOBT->desugar() : LHS;
10683 QualType RHSUnderlying = RHSOBT ? RHSOBT->desugar() : RHS;
10684
10685 if (RHSOBT && !LHSOBT) {
10686 if (LHSUnderlying->isIntegerType() && RHSUnderlying->isIntegerType())
10687 return OBTAssignResult::Discards;
10688 }
10689
10690 return OBTAssignResult::NotApplicable;
10691}
10692
10693/// getRVVTypeSize - Return RVV vector register size.
10694static uint64_t getRVVTypeSize(ASTContext &Context, const BuiltinType *Ty) {
10695 assert(Ty->isRVVVLSBuiltinType() && "Invalid RVV Type");
10696 auto VScale = Context.getTargetInfo().getVScaleRange(
10697 LangOpts: Context.getLangOpts(), Mode: TargetInfo::ArmStreamingKind::NotStreaming);
10698 if (!VScale)
10699 return 0;
10700
10701 ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(Ty);
10702
10703 uint64_t EltSize = Context.getTypeSize(T: Info.ElementType);
10704 if (Info.ElementType == Context.BoolTy)
10705 EltSize = 1;
10706
10707 uint64_t MinElts = Info.EC.getKnownMinValue();
10708 return VScale->first * MinElts * EltSize;
10709}
10710
10711bool ASTContext::areCompatibleRVVTypes(QualType FirstType,
10712 QualType SecondType) {
10713 assert(
10714 ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) ||
10715 (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) &&
10716 "Expected RVV builtin type and vector type!");
10717
10718 auto IsValidCast = [this](QualType FirstType, QualType SecondType) {
10719 if (const auto *BT = FirstType->getAs<BuiltinType>()) {
10720 if (const auto *VT = SecondType->getAs<VectorType>()) {
10721 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask) {
10722 BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(Ty: BT);
10723 return FirstType->isRVVVLSBuiltinType() &&
10724 Info.ElementType == BoolTy &&
10725 getTypeSize(T: SecondType) == ((getRVVTypeSize(Context&: *this, Ty: BT)));
10726 }
10727 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask_1) {
10728 BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(Ty: BT);
10729 return FirstType->isRVVVLSBuiltinType() &&
10730 Info.ElementType == BoolTy &&
10731 getTypeSize(T: SecondType) == ((getRVVTypeSize(Context&: *this, Ty: BT) * 8));
10732 }
10733 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask_2) {
10734 BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(Ty: BT);
10735 return FirstType->isRVVVLSBuiltinType() &&
10736 Info.ElementType == BoolTy &&
10737 getTypeSize(T: SecondType) == ((getRVVTypeSize(Context&: *this, Ty: BT)) * 4);
10738 }
10739 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask_4) {
10740 BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(Ty: BT);
10741 return FirstType->isRVVVLSBuiltinType() &&
10742 Info.ElementType == BoolTy &&
10743 getTypeSize(T: SecondType) == ((getRVVTypeSize(Context&: *this, Ty: BT)) * 2);
10744 }
10745 if (VT->getVectorKind() == VectorKind::RVVFixedLengthData ||
10746 VT->getVectorKind() == VectorKind::Generic)
10747 return FirstType->isRVVVLSBuiltinType() &&
10748 getTypeSize(T: SecondType) == getRVVTypeSize(Context&: *this, Ty: BT) &&
10749 hasSameType(T1: VT->getElementType(),
10750 T2: getBuiltinVectorTypeInfo(Ty: BT).ElementType);
10751 }
10752 }
10753 return false;
10754 };
10755
10756 return IsValidCast(FirstType, SecondType) ||
10757 IsValidCast(SecondType, FirstType);
10758}
10759
10760bool ASTContext::areLaxCompatibleRVVTypes(QualType FirstType,
10761 QualType SecondType) {
10762 assert(
10763 ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) ||
10764 (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) &&
10765 "Expected RVV builtin type and vector type!");
10766
10767 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) {
10768 const auto *BT = FirstType->getAs<BuiltinType>();
10769 if (!BT)
10770 return false;
10771
10772 if (!BT->isRVVVLSBuiltinType())
10773 return false;
10774
10775 const auto *VecTy = SecondType->getAs<VectorType>();
10776 if (VecTy && VecTy->getVectorKind() == VectorKind::Generic) {
10777 const LangOptions::LaxVectorConversionKind LVCKind =
10778 getLangOpts().getLaxVectorConversions();
10779
10780 // If __riscv_v_fixed_vlen != N do not allow vector lax conversion.
10781 if (getTypeSize(T: SecondType) != getRVVTypeSize(Context&: *this, Ty: BT))
10782 return false;
10783
10784 // If -flax-vector-conversions=all is specified, the types are
10785 // certainly compatible.
10786 if (LVCKind == LangOptions::LaxVectorConversionKind::All)
10787 return true;
10788
10789 // If -flax-vector-conversions=integer is specified, the types are
10790 // compatible if the elements are integer types.
10791 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer)
10792 return VecTy->getElementType().getCanonicalType()->isIntegerType() &&
10793 FirstType->getRVVEltType(Ctx: *this)->isIntegerType();
10794 }
10795
10796 return false;
10797 };
10798
10799 return IsLaxCompatible(FirstType, SecondType) ||
10800 IsLaxCompatible(SecondType, FirstType);
10801}
10802
10803bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const {
10804 while (true) {
10805 // __strong id
10806 if (const AttributedType *Attr = dyn_cast<AttributedType>(Val&: Ty)) {
10807 if (Attr->getAttrKind() == attr::ObjCOwnership)
10808 return true;
10809
10810 Ty = Attr->getModifiedType();
10811
10812 // X *__strong (...)
10813 } else if (const ParenType *Paren = dyn_cast<ParenType>(Val&: Ty)) {
10814 Ty = Paren->getInnerType();
10815
10816 // We do not want to look through typedefs, typeof(expr),
10817 // typeof(type), or any other way that the type is somehow
10818 // abstracted.
10819 } else {
10820 return false;
10821 }
10822 }
10823}
10824
10825//===----------------------------------------------------------------------===//
10826// ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's.
10827//===----------------------------------------------------------------------===//
10828
10829/// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the
10830/// inheritance hierarchy of 'rProto'.
10831bool
10832ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto,
10833 ObjCProtocolDecl *rProto) const {
10834 if (declaresSameEntity(D1: lProto, D2: rProto))
10835 return true;
10836 for (auto *PI : rProto->protocols())
10837 if (ProtocolCompatibleWithProtocol(lProto, rProto: PI))
10838 return true;
10839 return false;
10840}
10841
10842/// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and
10843/// Class<pr1, ...>.
10844bool ASTContext::ObjCQualifiedClassTypesAreCompatible(
10845 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs) {
10846 for (auto *lhsProto : lhs->quals()) {
10847 bool match = false;
10848 for (auto *rhsProto : rhs->quals()) {
10849 if (ProtocolCompatibleWithProtocol(lProto: lhsProto, rProto: rhsProto)) {
10850 match = true;
10851 break;
10852 }
10853 }
10854 if (!match)
10855 return false;
10856 }
10857 return true;
10858}
10859
10860/// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an
10861/// ObjCQualifiedIDType.
10862bool ASTContext::ObjCQualifiedIdTypesAreCompatible(
10863 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs,
10864 bool compare) {
10865 // Allow id<P..> and an 'id' in all cases.
10866 if (lhs->isObjCIdType() || rhs->isObjCIdType())
10867 return true;
10868
10869 // Don't allow id<P..> to convert to Class or Class<P..> in either direction.
10870 if (lhs->isObjCClassType() || lhs->isObjCQualifiedClassType() ||
10871 rhs->isObjCClassType() || rhs->isObjCQualifiedClassType())
10872 return false;
10873
10874 if (lhs->isObjCQualifiedIdType()) {
10875 if (rhs->qual_empty()) {
10876 // If the RHS is a unqualified interface pointer "NSString*",
10877 // make sure we check the class hierarchy.
10878 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) {
10879 for (auto *I : lhs->quals()) {
10880 // when comparing an id<P> on lhs with a static type on rhs,
10881 // see if static class implements all of id's protocols, directly or
10882 // through its super class and categories.
10883 if (!rhsID->ClassImplementsProtocol(lProto: I, lookupCategory: true))
10884 return false;
10885 }
10886 }
10887 // If there are no qualifiers and no interface, we have an 'id'.
10888 return true;
10889 }
10890 // Both the right and left sides have qualifiers.
10891 for (auto *lhsProto : lhs->quals()) {
10892 bool match = false;
10893
10894 // when comparing an id<P> on lhs with a static type on rhs,
10895 // see if static class implements all of id's protocols, directly or
10896 // through its super class and categories.
10897 for (auto *rhsProto : rhs->quals()) {
10898 if (ProtocolCompatibleWithProtocol(lProto: lhsProto, rProto: rhsProto) ||
10899 (compare && ProtocolCompatibleWithProtocol(lProto: rhsProto, rProto: lhsProto))) {
10900 match = true;
10901 break;
10902 }
10903 }
10904 // If the RHS is a qualified interface pointer "NSString<P>*",
10905 // make sure we check the class hierarchy.
10906 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) {
10907 for (auto *I : lhs->quals()) {
10908 // when comparing an id<P> on lhs with a static type on rhs,
10909 // see if static class implements all of id's protocols, directly or
10910 // through its super class and categories.
10911 if (rhsID->ClassImplementsProtocol(lProto: I, lookupCategory: true)) {
10912 match = true;
10913 break;
10914 }
10915 }
10916 }
10917 if (!match)
10918 return false;
10919 }
10920
10921 return true;
10922 }
10923
10924 assert(rhs->isObjCQualifiedIdType() && "One of the LHS/RHS should be id<x>");
10925
10926 if (lhs->getInterfaceType()) {
10927 // If both the right and left sides have qualifiers.
10928 for (auto *lhsProto : lhs->quals()) {
10929 bool match = false;
10930
10931 // when comparing an id<P> on rhs with a static type on lhs,
10932 // see if static class implements all of id's protocols, directly or
10933 // through its super class and categories.
10934 // First, lhs protocols in the qualifier list must be found, direct
10935 // or indirect in rhs's qualifier list or it is a mismatch.
10936 for (auto *rhsProto : rhs->quals()) {
10937 if (ProtocolCompatibleWithProtocol(lProto: lhsProto, rProto: rhsProto) ||
10938 (compare && ProtocolCompatibleWithProtocol(lProto: rhsProto, rProto: lhsProto))) {
10939 match = true;
10940 break;
10941 }
10942 }
10943 if (!match)
10944 return false;
10945 }
10946
10947 // Static class's protocols, or its super class or category protocols
10948 // must be found, direct or indirect in rhs's qualifier list or it is a mismatch.
10949 if (ObjCInterfaceDecl *lhsID = lhs->getInterfaceDecl()) {
10950 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols;
10951 CollectInheritedProtocols(CDecl: lhsID, Protocols&: LHSInheritedProtocols);
10952 // This is rather dubious but matches gcc's behavior. If lhs has
10953 // no type qualifier and its class has no static protocol(s)
10954 // assume that it is mismatch.
10955 if (LHSInheritedProtocols.empty() && lhs->qual_empty())
10956 return false;
10957 for (auto *lhsProto : LHSInheritedProtocols) {
10958 bool match = false;
10959 for (auto *rhsProto : rhs->quals()) {
10960 if (ProtocolCompatibleWithProtocol(lProto: lhsProto, rProto: rhsProto) ||
10961 (compare && ProtocolCompatibleWithProtocol(lProto: rhsProto, rProto: lhsProto))) {
10962 match = true;
10963 break;
10964 }
10965 }
10966 if (!match)
10967 return false;
10968 }
10969 }
10970 return true;
10971 }
10972 return false;
10973}
10974
10975/// canAssignObjCInterfaces - Return true if the two interface types are
10976/// compatible for assignment from RHS to LHS. This handles validation of any
10977/// protocol qualifiers on the LHS or RHS.
10978bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT,
10979 const ObjCObjectPointerType *RHSOPT) {
10980 const ObjCObjectType* LHS = LHSOPT->getObjectType();
10981 const ObjCObjectType* RHS = RHSOPT->getObjectType();
10982
10983 // If either type represents the built-in 'id' type, return true.
10984 if (LHS->isObjCUnqualifiedId() || RHS->isObjCUnqualifiedId())
10985 return true;
10986
10987 // Function object that propagates a successful result or handles
10988 // __kindof types.
10989 auto finish = [&](bool succeeded) -> bool {
10990 if (succeeded)
10991 return true;
10992
10993 if (!RHS->isKindOfType())
10994 return false;
10995
10996 // Strip off __kindof and protocol qualifiers, then check whether
10997 // we can assign the other way.
10998 return canAssignObjCInterfaces(LHSOPT: RHSOPT->stripObjCKindOfTypeAndQuals(ctx: *this),
10999 RHSOPT: LHSOPT->stripObjCKindOfTypeAndQuals(ctx: *this));
11000 };
11001
11002 // Casts from or to id<P> are allowed when the other side has compatible
11003 // protocols.
11004 if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) {
11005 return finish(ObjCQualifiedIdTypesAreCompatible(lhs: LHSOPT, rhs: RHSOPT, compare: false));
11006 }
11007
11008 // Verify protocol compatibility for casts from Class<P1> to Class<P2>.
11009 if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) {
11010 return finish(ObjCQualifiedClassTypesAreCompatible(lhs: LHSOPT, rhs: RHSOPT));
11011 }
11012
11013 // Casts from Class to Class<Foo>, or vice-versa, are allowed.
11014 if (LHS->isObjCClass() && RHS->isObjCClass()) {
11015 return true;
11016 }
11017
11018 // If we have 2 user-defined types, fall into that path.
11019 if (LHS->getInterface() && RHS->getInterface()) {
11020 return finish(canAssignObjCInterfaces(LHS, RHS));
11021 }
11022
11023 return false;
11024}
11025
11026/// canAssignObjCInterfacesInBlockPointer - This routine is specifically written
11027/// for providing type-safety for objective-c pointers used to pass/return
11028/// arguments in block literals. When passed as arguments, passing 'A*' where
11029/// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is
11030/// not OK. For the return type, the opposite is not OK.
11031bool ASTContext::canAssignObjCInterfacesInBlockPointer(
11032 const ObjCObjectPointerType *LHSOPT,
11033 const ObjCObjectPointerType *RHSOPT,
11034 bool BlockReturnType) {
11035
11036 // Function object that propagates a successful result or handles
11037 // __kindof types.
11038 auto finish = [&](bool succeeded) -> bool {
11039 if (succeeded)
11040 return true;
11041
11042 const ObjCObjectPointerType *Expected = BlockReturnType ? RHSOPT : LHSOPT;
11043 if (!Expected->isKindOfType())
11044 return false;
11045
11046 // Strip off __kindof and protocol qualifiers, then check whether
11047 // we can assign the other way.
11048 return canAssignObjCInterfacesInBlockPointer(
11049 LHSOPT: RHSOPT->stripObjCKindOfTypeAndQuals(ctx: *this),
11050 RHSOPT: LHSOPT->stripObjCKindOfTypeAndQuals(ctx: *this),
11051 BlockReturnType);
11052 };
11053
11054 if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType())
11055 return true;
11056
11057 if (LHSOPT->isObjCBuiltinType()) {
11058 return finish(RHSOPT->isObjCBuiltinType() ||
11059 RHSOPT->isObjCQualifiedIdType());
11060 }
11061
11062 if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) {
11063 if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking)
11064 // Use for block parameters previous type checking for compatibility.
11065 return finish(ObjCQualifiedIdTypesAreCompatible(lhs: LHSOPT, rhs: RHSOPT, compare: false) ||
11066 // Or corrected type checking as in non-compat mode.
11067 (!BlockReturnType &&
11068 ObjCQualifiedIdTypesAreCompatible(lhs: RHSOPT, rhs: LHSOPT, compare: false)));
11069 else
11070 return finish(ObjCQualifiedIdTypesAreCompatible(
11071 lhs: (BlockReturnType ? LHSOPT : RHSOPT),
11072 rhs: (BlockReturnType ? RHSOPT : LHSOPT), compare: false));
11073 }
11074
11075 const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType();
11076 const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType();
11077 if (LHS && RHS) { // We have 2 user-defined types.
11078 if (LHS != RHS) {
11079 if (LHS->getDecl()->isSuperClassOf(I: RHS->getDecl()))
11080 return finish(BlockReturnType);
11081 if (RHS->getDecl()->isSuperClassOf(I: LHS->getDecl()))
11082 return finish(!BlockReturnType);
11083 }
11084 else
11085 return true;
11086 }
11087 return false;
11088}
11089
11090/// Comparison routine for Objective-C protocols to be used with
11091/// llvm::array_pod_sort.
11092static int compareObjCProtocolsByName(ObjCProtocolDecl * const *lhs,
11093 ObjCProtocolDecl * const *rhs) {
11094 return (*lhs)->getName().compare(RHS: (*rhs)->getName());
11095}
11096
11097/// getIntersectionOfProtocols - This routine finds the intersection of set
11098/// of protocols inherited from two distinct objective-c pointer objects with
11099/// the given common base.
11100/// It is used to build composite qualifier list of the composite type of
11101/// the conditional expression involving two objective-c pointer objects.
11102static
11103void getIntersectionOfProtocols(ASTContext &Context,
11104 const ObjCInterfaceDecl *CommonBase,
11105 const ObjCObjectPointerType *LHSOPT,
11106 const ObjCObjectPointerType *RHSOPT,
11107 SmallVectorImpl<ObjCProtocolDecl *> &IntersectionSet) {
11108
11109 const ObjCObjectType* LHS = LHSOPT->getObjectType();
11110 const ObjCObjectType* RHS = RHSOPT->getObjectType();
11111 assert(LHS->getInterface() && "LHS must have an interface base");
11112 assert(RHS->getInterface() && "RHS must have an interface base");
11113
11114 // Add all of the protocols for the LHS.
11115 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSProtocolSet;
11116
11117 // Start with the protocol qualifiers.
11118 for (auto *proto : LHS->quals()) {
11119 Context.CollectInheritedProtocols(CDecl: proto, Protocols&: LHSProtocolSet);
11120 }
11121
11122 // Also add the protocols associated with the LHS interface.
11123 Context.CollectInheritedProtocols(CDecl: LHS->getInterface(), Protocols&: LHSProtocolSet);
11124
11125 // Add all of the protocols for the RHS.
11126 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet;
11127
11128 // Start with the protocol qualifiers.
11129 for (auto *proto : RHS->quals()) {
11130 Context.CollectInheritedProtocols(CDecl: proto, Protocols&: RHSProtocolSet);
11131 }
11132
11133 // Also add the protocols associated with the RHS interface.
11134 Context.CollectInheritedProtocols(CDecl: RHS->getInterface(), Protocols&: RHSProtocolSet);
11135
11136 // Compute the intersection of the collected protocol sets.
11137 for (auto *proto : LHSProtocolSet) {
11138 if (RHSProtocolSet.count(Ptr: proto))
11139 IntersectionSet.push_back(Elt: proto);
11140 }
11141
11142 // Compute the set of protocols that is implied by either the common type or
11143 // the protocols within the intersection.
11144 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> ImpliedProtocols;
11145 Context.CollectInheritedProtocols(CDecl: CommonBase, Protocols&: ImpliedProtocols);
11146
11147 // Remove any implied protocols from the list of inherited protocols.
11148 if (!ImpliedProtocols.empty()) {
11149 llvm::erase_if(C&: IntersectionSet, P: [&](ObjCProtocolDecl *proto) -> bool {
11150 return ImpliedProtocols.contains(Ptr: proto);
11151 });
11152 }
11153
11154 // Sort the remaining protocols by name.
11155 llvm::array_pod_sort(Start: IntersectionSet.begin(), End: IntersectionSet.end(),
11156 Compare: compareObjCProtocolsByName);
11157}
11158
11159/// Determine whether the first type is a subtype of the second.
11160static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs,
11161 QualType rhs) {
11162 // Common case: two object pointers.
11163 const auto *lhsOPT = lhs->getAs<ObjCObjectPointerType>();
11164 const auto *rhsOPT = rhs->getAs<ObjCObjectPointerType>();
11165 if (lhsOPT && rhsOPT)
11166 return ctx.canAssignObjCInterfaces(LHSOPT: lhsOPT, RHSOPT: rhsOPT);
11167
11168 // Two block pointers.
11169 const auto *lhsBlock = lhs->getAs<BlockPointerType>();
11170 const auto *rhsBlock = rhs->getAs<BlockPointerType>();
11171 if (lhsBlock && rhsBlock)
11172 return ctx.typesAreBlockPointerCompatible(lhs, rhs);
11173
11174 // If either is an unqualified 'id' and the other is a block, it's
11175 // acceptable.
11176 if ((lhsOPT && lhsOPT->isObjCIdType() && rhsBlock) ||
11177 (rhsOPT && rhsOPT->isObjCIdType() && lhsBlock))
11178 return true;
11179
11180 return false;
11181}
11182
11183// Check that the given Objective-C type argument lists are equivalent.
11184static bool sameObjCTypeArgs(ASTContext &ctx,
11185 const ObjCInterfaceDecl *iface,
11186 ArrayRef<QualType> lhsArgs,
11187 ArrayRef<QualType> rhsArgs,
11188 bool stripKindOf) {
11189 if (lhsArgs.size() != rhsArgs.size())
11190 return false;
11191
11192 ObjCTypeParamList *typeParams = iface->getTypeParamList();
11193 if (!typeParams)
11194 return false;
11195
11196 for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) {
11197 if (ctx.hasSameType(T1: lhsArgs[i], T2: rhsArgs[i]))
11198 continue;
11199
11200 switch (typeParams->begin()[i]->getVariance()) {
11201 case ObjCTypeParamVariance::Invariant:
11202 if (!stripKindOf ||
11203 !ctx.hasSameType(T1: lhsArgs[i].stripObjCKindOfType(ctx),
11204 T2: rhsArgs[i].stripObjCKindOfType(ctx))) {
11205 return false;
11206 }
11207 break;
11208
11209 case ObjCTypeParamVariance::Covariant:
11210 if (!canAssignObjCObjectTypes(ctx, lhs: lhsArgs[i], rhs: rhsArgs[i]))
11211 return false;
11212 break;
11213
11214 case ObjCTypeParamVariance::Contravariant:
11215 if (!canAssignObjCObjectTypes(ctx, lhs: rhsArgs[i], rhs: lhsArgs[i]))
11216 return false;
11217 break;
11218 }
11219 }
11220
11221 return true;
11222}
11223
11224QualType ASTContext::areCommonBaseCompatible(
11225 const ObjCObjectPointerType *Lptr,
11226 const ObjCObjectPointerType *Rptr) {
11227 const ObjCObjectType *LHS = Lptr->getObjectType();
11228 const ObjCObjectType *RHS = Rptr->getObjectType();
11229 const ObjCInterfaceDecl* LDecl = LHS->getInterface();
11230 const ObjCInterfaceDecl* RDecl = RHS->getInterface();
11231
11232 if (!LDecl || !RDecl)
11233 return {};
11234
11235 // When either LHS or RHS is a kindof type, we should return a kindof type.
11236 // For example, for common base of kindof(ASub1) and kindof(ASub2), we return
11237 // kindof(A).
11238 bool anyKindOf = LHS->isKindOfType() || RHS->isKindOfType();
11239
11240 // Follow the left-hand side up the class hierarchy until we either hit a
11241 // root or find the RHS. Record the ancestors in case we don't find it.
11242 llvm::SmallDenseMap<const ObjCInterfaceDecl *, const ObjCObjectType *, 4>
11243 LHSAncestors;
11244 while (true) {
11245 // Record this ancestor. We'll need this if the common type isn't in the
11246 // path from the LHS to the root.
11247 LHSAncestors[LHS->getInterface()->getCanonicalDecl()] = LHS;
11248
11249 if (declaresSameEntity(D1: LHS->getInterface(), D2: RDecl)) {
11250 // Get the type arguments.
11251 ArrayRef<QualType> LHSTypeArgs = LHS->getTypeArgsAsWritten();
11252 bool anyChanges = false;
11253 if (LHS->isSpecialized() && RHS->isSpecialized()) {
11254 // Both have type arguments, compare them.
11255 if (!sameObjCTypeArgs(ctx&: *this, iface: LHS->getInterface(),
11256 lhsArgs: LHS->getTypeArgs(), rhsArgs: RHS->getTypeArgs(),
11257 /*stripKindOf=*/true))
11258 return {};
11259 } else if (LHS->isSpecialized() != RHS->isSpecialized()) {
11260 // If only one has type arguments, the result will not have type
11261 // arguments.
11262 LHSTypeArgs = {};
11263 anyChanges = true;
11264 }
11265
11266 // Compute the intersection of protocols.
11267 SmallVector<ObjCProtocolDecl *, 8> Protocols;
11268 getIntersectionOfProtocols(Context&: *this, CommonBase: LHS->getInterface(), LHSOPT: Lptr, RHSOPT: Rptr,
11269 IntersectionSet&: Protocols);
11270 if (!Protocols.empty())
11271 anyChanges = true;
11272
11273 // If anything in the LHS will have changed, build a new result type.
11274 // If we need to return a kindof type but LHS is not a kindof type, we
11275 // build a new result type.
11276 if (anyChanges || LHS->isKindOfType() != anyKindOf) {
11277 QualType Result = getObjCInterfaceType(Decl: LHS->getInterface());
11278 Result = getObjCObjectType(baseType: Result, typeArgs: LHSTypeArgs, protocols: Protocols,
11279 isKindOf: anyKindOf || LHS->isKindOfType());
11280 return getObjCObjectPointerType(ObjectT: Result);
11281 }
11282
11283 return getObjCObjectPointerType(ObjectT: QualType(LHS, 0));
11284 }
11285
11286 // Find the superclass.
11287 QualType LHSSuperType = LHS->getSuperClassType();
11288 if (LHSSuperType.isNull())
11289 break;
11290
11291 LHS = LHSSuperType->castAs<ObjCObjectType>();
11292 }
11293
11294 // We didn't find anything by following the LHS to its root; now check
11295 // the RHS against the cached set of ancestors.
11296 while (true) {
11297 auto KnownLHS = LHSAncestors.find(Val: RHS->getInterface()->getCanonicalDecl());
11298 if (KnownLHS != LHSAncestors.end()) {
11299 LHS = KnownLHS->second;
11300
11301 // Get the type arguments.
11302 ArrayRef<QualType> RHSTypeArgs = RHS->getTypeArgsAsWritten();
11303 bool anyChanges = false;
11304 if (LHS->isSpecialized() && RHS->isSpecialized()) {
11305 // Both have type arguments, compare them.
11306 if (!sameObjCTypeArgs(ctx&: *this, iface: LHS->getInterface(),
11307 lhsArgs: LHS->getTypeArgs(), rhsArgs: RHS->getTypeArgs(),
11308 /*stripKindOf=*/true))
11309 return {};
11310 } else if (LHS->isSpecialized() != RHS->isSpecialized()) {
11311 // If only one has type arguments, the result will not have type
11312 // arguments.
11313 RHSTypeArgs = {};
11314 anyChanges = true;
11315 }
11316
11317 // Compute the intersection of protocols.
11318 SmallVector<ObjCProtocolDecl *, 8> Protocols;
11319 getIntersectionOfProtocols(Context&: *this, CommonBase: RHS->getInterface(), LHSOPT: Lptr, RHSOPT: Rptr,
11320 IntersectionSet&: Protocols);
11321 if (!Protocols.empty())
11322 anyChanges = true;
11323
11324 // If we need to return a kindof type but RHS is not a kindof type, we
11325 // build a new result type.
11326 if (anyChanges || RHS->isKindOfType() != anyKindOf) {
11327 QualType Result = getObjCInterfaceType(Decl: RHS->getInterface());
11328 Result = getObjCObjectType(baseType: Result, typeArgs: RHSTypeArgs, protocols: Protocols,
11329 isKindOf: anyKindOf || RHS->isKindOfType());
11330 return getObjCObjectPointerType(ObjectT: Result);
11331 }
11332
11333 return getObjCObjectPointerType(ObjectT: QualType(RHS, 0));
11334 }
11335
11336 // Find the superclass of the RHS.
11337 QualType RHSSuperType = RHS->getSuperClassType();
11338 if (RHSSuperType.isNull())
11339 break;
11340
11341 RHS = RHSSuperType->castAs<ObjCObjectType>();
11342 }
11343
11344 return {};
11345}
11346
11347bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS,
11348 const ObjCObjectType *RHS) {
11349 assert(LHS->getInterface() && "LHS is not an interface type");
11350 assert(RHS->getInterface() && "RHS is not an interface type");
11351
11352 // Verify that the base decls are compatible: the RHS must be a subclass of
11353 // the LHS.
11354 ObjCInterfaceDecl *LHSInterface = LHS->getInterface();
11355 bool IsSuperClass = LHSInterface->isSuperClassOf(I: RHS->getInterface());
11356 if (!IsSuperClass)
11357 return false;
11358
11359 // If the LHS has protocol qualifiers, determine whether all of them are
11360 // satisfied by the RHS (i.e., the RHS has a superset of the protocols in the
11361 // LHS).
11362 if (LHS->getNumProtocols() > 0) {
11363 // OK if conversion of LHS to SuperClass results in narrowing of types
11364 // ; i.e., SuperClass may implement at least one of the protocols
11365 // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok.
11366 // But not SuperObj<P1,P2,P3> = lhs<P1,P2>.
11367 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols;
11368 CollectInheritedProtocols(CDecl: RHS->getInterface(), Protocols&: SuperClassInheritedProtocols);
11369 // Also, if RHS has explicit quelifiers, include them for comparing with LHS's
11370 // qualifiers.
11371 for (auto *RHSPI : RHS->quals())
11372 CollectInheritedProtocols(CDecl: RHSPI, Protocols&: SuperClassInheritedProtocols);
11373 // If there is no protocols associated with RHS, it is not a match.
11374 if (SuperClassInheritedProtocols.empty())
11375 return false;
11376
11377 for (const auto *LHSProto : LHS->quals()) {
11378 bool SuperImplementsProtocol = false;
11379 for (auto *SuperClassProto : SuperClassInheritedProtocols)
11380 if (SuperClassProto->lookupProtocolNamed(PName: LHSProto->getIdentifier())) {
11381 SuperImplementsProtocol = true;
11382 break;
11383 }
11384 if (!SuperImplementsProtocol)
11385 return false;
11386 }
11387 }
11388
11389 // If the LHS is specialized, we may need to check type arguments.
11390 if (LHS->isSpecialized()) {
11391 // Follow the superclass chain until we've matched the LHS class in the
11392 // hierarchy. This substitutes type arguments through.
11393 const ObjCObjectType *RHSSuper = RHS;
11394 while (!declaresSameEntity(D1: RHSSuper->getInterface(), D2: LHSInterface))
11395 RHSSuper = RHSSuper->getSuperClassType()->castAs<ObjCObjectType>();
11396
11397 // If the RHS is specializd, compare type arguments.
11398 if (RHSSuper->isSpecialized() &&
11399 !sameObjCTypeArgs(ctx&: *this, iface: LHS->getInterface(),
11400 lhsArgs: LHS->getTypeArgs(), rhsArgs: RHSSuper->getTypeArgs(),
11401 /*stripKindOf=*/true)) {
11402 return false;
11403 }
11404 }
11405
11406 return true;
11407}
11408
11409bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) {
11410 // get the "pointed to" types
11411 const auto *LHSOPT = LHS->getAs<ObjCObjectPointerType>();
11412 const auto *RHSOPT = RHS->getAs<ObjCObjectPointerType>();
11413
11414 if (!LHSOPT || !RHSOPT)
11415 return false;
11416
11417 return canAssignObjCInterfaces(LHSOPT, RHSOPT) ||
11418 canAssignObjCInterfaces(LHSOPT: RHSOPT, RHSOPT: LHSOPT);
11419}
11420
11421bool ASTContext::canBindObjCObjectType(QualType To, QualType From) {
11422 return canAssignObjCInterfaces(
11423 LHSOPT: getObjCObjectPointerType(ObjectT: To)->castAs<ObjCObjectPointerType>(),
11424 RHSOPT: getObjCObjectPointerType(ObjectT: From)->castAs<ObjCObjectPointerType>());
11425}
11426
11427/// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible,
11428/// both shall have the identically qualified version of a compatible type.
11429/// C99 6.2.7p1: Two types have compatible types if their types are the
11430/// same. See 6.7.[2,3,5] for additional rules.
11431bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS,
11432 bool CompareUnqualified) {
11433 if (getLangOpts().CPlusPlus)
11434 return hasSameType(T1: LHS, T2: RHS);
11435
11436 return !mergeTypes(LHS, RHS, OfBlockPointer: false, Unqualified: CompareUnqualified).isNull();
11437}
11438
11439bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) {
11440 return typesAreCompatible(LHS, RHS);
11441}
11442
11443bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) {
11444 return !mergeTypes(LHS, RHS, OfBlockPointer: true).isNull();
11445}
11446
11447/// mergeTransparentUnionType - if T is a transparent union type and a member
11448/// of T is compatible with SubType, return the merged type, else return
11449/// QualType()
11450QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType,
11451 bool OfBlockPointer,
11452 bool Unqualified) {
11453 if (const RecordType *UT = T->getAsUnionType()) {
11454 RecordDecl *UD = UT->getDecl()->getMostRecentDecl();
11455 if (UD->hasAttr<TransparentUnionAttr>()) {
11456 for (const auto *I : UD->fields()) {
11457 QualType ET = I->getType().getUnqualifiedType();
11458 QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified);
11459 if (!MT.isNull())
11460 return MT;
11461 }
11462 }
11463 }
11464
11465 return {};
11466}
11467
11468/// mergeFunctionParameterTypes - merge two types which appear as function
11469/// parameter types
11470QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs,
11471 bool OfBlockPointer,
11472 bool Unqualified) {
11473 // GNU extension: two types are compatible if they appear as a function
11474 // argument, one of the types is a transparent union type and the other
11475 // type is compatible with a union member
11476 QualType lmerge = mergeTransparentUnionType(T: lhs, SubType: rhs, OfBlockPointer,
11477 Unqualified);
11478 if (!lmerge.isNull())
11479 return lmerge;
11480
11481 QualType rmerge = mergeTransparentUnionType(T: rhs, SubType: lhs, OfBlockPointer,
11482 Unqualified);
11483 if (!rmerge.isNull())
11484 return rmerge;
11485
11486 return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified);
11487}
11488
11489QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
11490 bool OfBlockPointer, bool Unqualified,
11491 bool AllowCXX,
11492 bool IsConditionalOperator) {
11493 const auto *lbase = lhs->castAs<FunctionType>();
11494 const auto *rbase = rhs->castAs<FunctionType>();
11495 const auto *lproto = dyn_cast<FunctionProtoType>(Val: lbase);
11496 const auto *rproto = dyn_cast<FunctionProtoType>(Val: rbase);
11497 bool allLTypes = true;
11498 bool allRTypes = true;
11499
11500 // Check return type
11501 QualType retType;
11502 if (OfBlockPointer) {
11503 QualType RHS = rbase->getReturnType();
11504 QualType LHS = lbase->getReturnType();
11505 bool UnqualifiedResult = Unqualified;
11506 if (!UnqualifiedResult)
11507 UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers());
11508 retType = mergeTypes(LHS, RHS, OfBlockPointer: true, Unqualified: UnqualifiedResult, BlockReturnType: true);
11509 }
11510 else
11511 retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), OfBlockPointer: false,
11512 Unqualified);
11513 if (retType.isNull())
11514 return {};
11515
11516 if (Unqualified)
11517 retType = retType.getUnqualifiedType();
11518
11519 CanQualType LRetType = getCanonicalType(T: lbase->getReturnType());
11520 CanQualType RRetType = getCanonicalType(T: rbase->getReturnType());
11521 if (Unqualified) {
11522 LRetType = LRetType.getUnqualifiedType();
11523 RRetType = RRetType.getUnqualifiedType();
11524 }
11525
11526 if (getCanonicalType(T: retType) != LRetType)
11527 allLTypes = false;
11528 if (getCanonicalType(T: retType) != RRetType)
11529 allRTypes = false;
11530
11531 // FIXME: double check this
11532 // FIXME: should we error if lbase->getRegParmAttr() != 0 &&
11533 // rbase->getRegParmAttr() != 0 &&
11534 // lbase->getRegParmAttr() != rbase->getRegParmAttr()?
11535 FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo();
11536 FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo();
11537
11538 // Compatible functions must have compatible calling conventions
11539 if (lbaseInfo.getCC() != rbaseInfo.getCC())
11540 return {};
11541
11542 // Regparm is part of the calling convention.
11543 if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm())
11544 return {};
11545 if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm())
11546 return {};
11547
11548 if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult())
11549 return {};
11550 if (lbaseInfo.getNoCallerSavedRegs() != rbaseInfo.getNoCallerSavedRegs())
11551 return {};
11552 if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck())
11553 return {};
11554
11555 // When merging declarations, it's common for supplemental information like
11556 // attributes to only be present in one of the declarations, and we generally
11557 // want type merging to preserve the union of information. So a merged
11558 // function type should be noreturn if it was noreturn in *either* operand
11559 // type.
11560 //
11561 // But for the conditional operator, this is backwards. The result of the
11562 // operator could be either operand, and its type should conservatively
11563 // reflect that. So a function type in a composite type is noreturn only
11564 // if it's noreturn in *both* operand types.
11565 //
11566 // Arguably, noreturn is a kind of subtype, and the conditional operator
11567 // ought to produce the most specific common supertype of its operand types.
11568 // That would differ from this rule in contravariant positions. However,
11569 // neither C nor C++ generally uses this kind of subtype reasoning. Also,
11570 // as a practical matter, it would only affect C code that does abstraction of
11571 // higher-order functions (taking noreturn callbacks!), which is uncommon to
11572 // say the least. So we use the simpler rule.
11573 bool NoReturn = IsConditionalOperator
11574 ? lbaseInfo.getNoReturn() && rbaseInfo.getNoReturn()
11575 : lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn();
11576 if (lbaseInfo.getNoReturn() != NoReturn)
11577 allLTypes = false;
11578 if (rbaseInfo.getNoReturn() != NoReturn)
11579 allRTypes = false;
11580
11581 FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(noReturn: NoReturn);
11582
11583 std::optional<FunctionEffectSet> MergedFX;
11584
11585 if (lproto && rproto) { // two C99 style function prototypes
11586 assert((AllowCXX ||
11587 (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) &&
11588 "C++ shouldn't be here");
11589 // Compatible functions must have the same number of parameters
11590 if (lproto->getNumParams() != rproto->getNumParams())
11591 return {};
11592
11593 // Variadic and non-variadic functions aren't compatible
11594 if (lproto->isVariadic() != rproto->isVariadic())
11595 return {};
11596
11597 if (lproto->getMethodQuals() != rproto->getMethodQuals())
11598 return {};
11599
11600 // Function protos with different 'cfi_salt' values aren't compatible.
11601 if (lproto->getExtraAttributeInfo().CFISalt !=
11602 rproto->getExtraAttributeInfo().CFISalt)
11603 return {};
11604
11605 // Function effects are handled similarly to noreturn, see above.
11606 FunctionEffectsRef LHSFX = lproto->getFunctionEffects();
11607 FunctionEffectsRef RHSFX = rproto->getFunctionEffects();
11608 if (LHSFX != RHSFX) {
11609 if (IsConditionalOperator)
11610 MergedFX = FunctionEffectSet::getIntersection(LHS: LHSFX, RHS: RHSFX);
11611 else {
11612 FunctionEffectSet::Conflicts Errs;
11613 MergedFX = FunctionEffectSet::getUnion(LHS: LHSFX, RHS: RHSFX, Errs);
11614 // Here we're discarding a possible error due to conflicts in the effect
11615 // sets. But we're not in a context where we can report it. The
11616 // operation does however guarantee maintenance of invariants.
11617 }
11618 if (*MergedFX != LHSFX)
11619 allLTypes = false;
11620 if (*MergedFX != RHSFX)
11621 allRTypes = false;
11622 }
11623
11624 SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos;
11625 bool canUseLeft, canUseRight;
11626 if (!mergeExtParameterInfo(FirstFnType: lproto, SecondFnType: rproto, CanUseFirst&: canUseLeft, CanUseSecond&: canUseRight,
11627 NewParamInfos&: newParamInfos))
11628 return {};
11629
11630 if (!canUseLeft)
11631 allLTypes = false;
11632 if (!canUseRight)
11633 allRTypes = false;
11634
11635 // Check parameter type compatibility
11636 SmallVector<QualType, 10> types;
11637 for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) {
11638 QualType lParamType = lproto->getParamType(i).getUnqualifiedType();
11639 QualType rParamType = rproto->getParamType(i).getUnqualifiedType();
11640 QualType paramType = mergeFunctionParameterTypes(
11641 lhs: lParamType, rhs: rParamType, OfBlockPointer, Unqualified);
11642 if (paramType.isNull())
11643 return {};
11644
11645 if (Unqualified)
11646 paramType = paramType.getUnqualifiedType();
11647
11648 types.push_back(Elt: paramType);
11649 if (Unqualified) {
11650 lParamType = lParamType.getUnqualifiedType();
11651 rParamType = rParamType.getUnqualifiedType();
11652 }
11653
11654 if (getCanonicalType(T: paramType) != getCanonicalType(T: lParamType))
11655 allLTypes = false;
11656 if (getCanonicalType(T: paramType) != getCanonicalType(T: rParamType))
11657 allRTypes = false;
11658 }
11659
11660 if (allLTypes) return lhs;
11661 if (allRTypes) return rhs;
11662
11663 FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo();
11664 EPI.ExtInfo = einfo;
11665 EPI.ExtParameterInfos =
11666 newParamInfos.empty() ? nullptr : newParamInfos.data();
11667 if (MergedFX)
11668 EPI.FunctionEffects = *MergedFX;
11669 return getFunctionType(ResultTy: retType, Args: types, EPI);
11670 }
11671
11672 if (lproto) allRTypes = false;
11673 if (rproto) allLTypes = false;
11674
11675 const FunctionProtoType *proto = lproto ? lproto : rproto;
11676 if (proto) {
11677 assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here");
11678 if (proto->isVariadic())
11679 return {};
11680 // Check that the types are compatible with the types that
11681 // would result from default argument promotions (C99 6.7.5.3p15).
11682 // The only types actually affected are promotable integer
11683 // types and floats, which would be passed as a different
11684 // type depending on whether the prototype is visible.
11685 for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) {
11686 QualType paramTy = proto->getParamType(i);
11687
11688 // Look at the converted type of enum types, since that is the type used
11689 // to pass enum values.
11690 if (const auto *ED = paramTy->getAsEnumDecl()) {
11691 paramTy = ED->getIntegerType();
11692 if (paramTy.isNull())
11693 return {};
11694 }
11695
11696 if (isPromotableIntegerType(T: paramTy) ||
11697 getCanonicalType(T: paramTy).getUnqualifiedType() == FloatTy)
11698 return {};
11699 }
11700
11701 if (allLTypes) return lhs;
11702 if (allRTypes) return rhs;
11703
11704 FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo();
11705 EPI.ExtInfo = einfo;
11706 if (MergedFX)
11707 EPI.FunctionEffects = *MergedFX;
11708 return getFunctionType(ResultTy: retType, Args: proto->getParamTypes(), EPI);
11709 }
11710
11711 if (allLTypes) return lhs;
11712 if (allRTypes) return rhs;
11713 return getFunctionNoProtoType(ResultTy: retType, Info: einfo);
11714}
11715
11716/// Given that we have an enum type and a non-enum type, try to merge them.
11717static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET,
11718 QualType other, bool isBlockReturnType) {
11719 // C99 6.7.2.2p4: Each enumerated type shall be compatible with char,
11720 // a signed integer type, or an unsigned integer type.
11721 // Compatibility is based on the underlying type, not the promotion
11722 // type.
11723 QualType underlyingType =
11724 ET->getDecl()->getDefinitionOrSelf()->getIntegerType();
11725 if (underlyingType.isNull())
11726 return {};
11727 if (Context.hasSameType(T1: underlyingType, T2: other))
11728 return other;
11729
11730 // In block return types, we're more permissive and accept any
11731 // integral type of the same size.
11732 if (isBlockReturnType && other->isIntegerType() &&
11733 Context.getTypeSize(T: underlyingType) == Context.getTypeSize(T: other))
11734 return other;
11735
11736 return {};
11737}
11738
11739QualType ASTContext::mergeTagDefinitions(QualType LHS, QualType RHS) {
11740 // C17 and earlier and C++ disallow two tag definitions within the same TU
11741 // from being compatible.
11742 if (LangOpts.CPlusPlus || !LangOpts.C23)
11743 return {};
11744
11745 // Nameless tags are comparable only within outer definitions. At the top
11746 // level they are not comparable.
11747 const TagDecl *LTagD = LHS->castAsTagDecl(), *RTagD = RHS->castAsTagDecl();
11748 if (!LTagD->getIdentifier() || !RTagD->getIdentifier())
11749 return {};
11750
11751 // C23, on the other hand, requires the members to be "the same enough", so
11752 // we use a structural equivalence check.
11753 StructuralEquivalenceContext::NonEquivalentDeclSet NonEquivalentDecls;
11754 StructuralEquivalenceContext Ctx(
11755 getLangOpts(), *this, *this, NonEquivalentDecls,
11756 StructuralEquivalenceKind::Default, /*StrictTypeSpelling=*/false,
11757 /*Complain=*/false, /*ErrorOnTagTypeMismatch=*/true);
11758 return Ctx.IsEquivalent(T1: LHS, T2: RHS) ? LHS : QualType{};
11759}
11760
11761std::optional<QualType> ASTContext::tryMergeOverflowBehaviorTypes(
11762 QualType LHS, QualType RHS, bool OfBlockPointer, bool Unqualified,
11763 bool BlockReturnType, bool IsConditionalOperator) {
11764 const auto *LHSOBT = LHS->getAs<OverflowBehaviorType>();
11765 const auto *RHSOBT = RHS->getAs<OverflowBehaviorType>();
11766
11767 if (!LHSOBT && !RHSOBT)
11768 return std::nullopt;
11769
11770 if (LHSOBT) {
11771 if (RHSOBT) {
11772 if (LHSOBT->getBehaviorKind() != RHSOBT->getBehaviorKind())
11773 return QualType();
11774
11775 QualType MergedUnderlying = mergeTypes(
11776 LHSOBT->getUnderlyingType(), RHSOBT->getUnderlyingType(),
11777 OfBlockPointer, Unqualified, BlockReturnType, IsConditionalOperator);
11778
11779 if (MergedUnderlying.isNull())
11780 return QualType();
11781
11782 if (getCanonicalType(T: LHSOBT) == getCanonicalType(T: RHSOBT)) {
11783 if (LHSOBT->getUnderlyingType() == RHSOBT->getUnderlyingType())
11784 return getCommonSugaredType(X: LHS, Y: RHS);
11785 return getOverflowBehaviorType(
11786 Kind: LHSOBT->getBehaviorKind(),
11787 Underlying: getCanonicalType(T: LHSOBT->getUnderlyingType()));
11788 }
11789
11790 // For different underlying types that successfully merge, wrap the
11791 // merged underlying type with the common overflow behavior
11792 return getOverflowBehaviorType(Kind: LHSOBT->getBehaviorKind(),
11793 Underlying: MergedUnderlying);
11794 }
11795 return mergeTypes(LHSOBT->getUnderlyingType(), RHS, OfBlockPointer,
11796 Unqualified, BlockReturnType, IsConditionalOperator);
11797 }
11798
11799 return mergeTypes(LHS, RHSOBT->getUnderlyingType(), OfBlockPointer,
11800 Unqualified, BlockReturnType, IsConditionalOperator);
11801}
11802
11803QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, bool OfBlockPointer,
11804 bool Unqualified, bool BlockReturnType,
11805 bool IsConditionalOperator) {
11806 // For C++ we will not reach this code with reference types (see below),
11807 // for OpenMP variant call overloading we might.
11808 //
11809 // C++ [expr]: If an expression initially has the type "reference to T", the
11810 // type is adjusted to "T" prior to any further analysis, the expression
11811 // designates the object or function denoted by the reference, and the
11812 // expression is an lvalue unless the reference is an rvalue reference and
11813 // the expression is a function call (possibly inside parentheses).
11814 auto *LHSRefTy = LHS->getAs<ReferenceType>();
11815 auto *RHSRefTy = RHS->getAs<ReferenceType>();
11816 if (LangOpts.OpenMP && LHSRefTy && RHSRefTy &&
11817 LHS->getTypeClass() == RHS->getTypeClass())
11818 return mergeTypes(LHS: LHSRefTy->getPointeeType(), RHS: RHSRefTy->getPointeeType(),
11819 OfBlockPointer, Unqualified, BlockReturnType);
11820 if (LHSRefTy || RHSRefTy)
11821 return {};
11822
11823 if (std::optional<QualType> MergedOBT =
11824 tryMergeOverflowBehaviorTypes(LHS, RHS, OfBlockPointer, Unqualified,
11825 BlockReturnType, IsConditionalOperator))
11826 return *MergedOBT;
11827
11828 if (Unqualified) {
11829 LHS = LHS.getUnqualifiedType();
11830 RHS = RHS.getUnqualifiedType();
11831 }
11832
11833 QualType LHSCan = getCanonicalType(T: LHS),
11834 RHSCan = getCanonicalType(T: RHS);
11835
11836 // If two types are identical, they are compatible.
11837 if (LHSCan == RHSCan)
11838 return LHS;
11839
11840 // If the qualifiers are different, the types aren't compatible... mostly.
11841 Qualifiers LQuals = LHSCan.getLocalQualifiers();
11842 Qualifiers RQuals = RHSCan.getLocalQualifiers();
11843 if (LQuals != RQuals) {
11844 // If any of these qualifiers are different, we have a type
11845 // mismatch.
11846 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() ||
11847 LQuals.getAddressSpace() != RQuals.getAddressSpace() ||
11848 LQuals.getObjCLifetime() != RQuals.getObjCLifetime() ||
11849 !LQuals.getPointerAuth().isEquivalent(Other: RQuals.getPointerAuth()) ||
11850 LQuals.hasUnaligned() != RQuals.hasUnaligned())
11851 return {};
11852
11853 // Exactly one GC qualifier difference is allowed: __strong is
11854 // okay if the other type has no GC qualifier but is an Objective
11855 // C object pointer (i.e. implicitly strong by default). We fix
11856 // this by pretending that the unqualified type was actually
11857 // qualified __strong.
11858 Qualifiers::GC GC_L = LQuals.getObjCGCAttr();
11859 Qualifiers::GC GC_R = RQuals.getObjCGCAttr();
11860 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements");
11861
11862 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak)
11863 return {};
11864
11865 if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) {
11866 return mergeTypes(LHS, RHS: getObjCGCQualType(T: RHS, GCAttr: Qualifiers::Strong));
11867 }
11868 if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) {
11869 return mergeTypes(LHS: getObjCGCQualType(T: LHS, GCAttr: Qualifiers::Strong), RHS);
11870 }
11871 return {};
11872 }
11873
11874 // Okay, qualifiers are equal.
11875
11876 Type::TypeClass LHSClass = LHSCan->getTypeClass();
11877 Type::TypeClass RHSClass = RHSCan->getTypeClass();
11878
11879 // We want to consider the two function types to be the same for these
11880 // comparisons, just force one to the other.
11881 if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto;
11882 if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto;
11883
11884 // Same as above for arrays
11885 if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray)
11886 LHSClass = Type::ConstantArray;
11887 if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray)
11888 RHSClass = Type::ConstantArray;
11889
11890 // ObjCInterfaces are just specialized ObjCObjects.
11891 if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject;
11892 if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject;
11893
11894 // Canonicalize ExtVector -> Vector.
11895 if (LHSClass == Type::ExtVector) LHSClass = Type::Vector;
11896 if (RHSClass == Type::ExtVector) RHSClass = Type::Vector;
11897
11898 // If the canonical type classes don't match.
11899 if (LHSClass != RHSClass) {
11900 // Note that we only have special rules for turning block enum
11901 // returns into block int returns, not vice-versa.
11902 if (const auto *ETy = LHS->getAsCanonical<EnumType>()) {
11903 return mergeEnumWithInteger(Context&: *this, ET: ETy, other: RHS, isBlockReturnType: false);
11904 }
11905 if (const EnumType *ETy = RHS->getAsCanonical<EnumType>()) {
11906 return mergeEnumWithInteger(Context&: *this, ET: ETy, other: LHS, isBlockReturnType: BlockReturnType);
11907 }
11908 // allow block pointer type to match an 'id' type.
11909 if (OfBlockPointer && !BlockReturnType) {
11910 if (LHS->isObjCIdType() && RHS->isBlockPointerType())
11911 return LHS;
11912 if (RHS->isObjCIdType() && LHS->isBlockPointerType())
11913 return RHS;
11914 }
11915 // Allow __auto_type to match anything; it merges to the type with more
11916 // information.
11917 if (const auto *AT = LHS->getAs<AutoType>()) {
11918 if (!AT->isDeduced() && AT->isGNUAutoType())
11919 return RHS;
11920 }
11921 if (const auto *AT = RHS->getAs<AutoType>()) {
11922 if (!AT->isDeduced() && AT->isGNUAutoType())
11923 return LHS;
11924 }
11925 return {};
11926 }
11927
11928 // The canonical type classes match.
11929 switch (LHSClass) {
11930#define TYPE(Class, Base)
11931#define ABSTRACT_TYPE(Class, Base)
11932#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
11933#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
11934#define DEPENDENT_TYPE(Class, Base) case Type::Class:
11935#include "clang/AST/TypeNodes.inc"
11936 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
11937
11938 case Type::Auto:
11939 case Type::DeducedTemplateSpecialization:
11940 case Type::LValueReference:
11941 case Type::RValueReference:
11942 case Type::MemberPointer:
11943 llvm_unreachable("C++ should never be in mergeTypes");
11944
11945 case Type::ObjCInterface:
11946 case Type::IncompleteArray:
11947 case Type::VariableArray:
11948 case Type::FunctionProto:
11949 case Type::ExtVector:
11950 case Type::OverflowBehavior:
11951 llvm_unreachable("Types are eliminated above");
11952
11953 case Type::Pointer:
11954 {
11955 // Merge two pointer types, while trying to preserve typedef info
11956 QualType LHSPointee = LHS->castAs<PointerType>()->getPointeeType();
11957 QualType RHSPointee = RHS->castAs<PointerType>()->getPointeeType();
11958 if (Unqualified) {
11959 LHSPointee = LHSPointee.getUnqualifiedType();
11960 RHSPointee = RHSPointee.getUnqualifiedType();
11961 }
11962 QualType ResultType = mergeTypes(LHS: LHSPointee, RHS: RHSPointee, OfBlockPointer: false,
11963 Unqualified);
11964 if (ResultType.isNull())
11965 return {};
11966 if (getCanonicalType(T: LHSPointee) == getCanonicalType(T: ResultType))
11967 return LHS;
11968 if (getCanonicalType(T: RHSPointee) == getCanonicalType(T: ResultType))
11969 return RHS;
11970 return getPointerType(T: ResultType);
11971 }
11972 case Type::BlockPointer:
11973 {
11974 // Merge two block pointer types, while trying to preserve typedef info
11975 QualType LHSPointee = LHS->castAs<BlockPointerType>()->getPointeeType();
11976 QualType RHSPointee = RHS->castAs<BlockPointerType>()->getPointeeType();
11977 if (Unqualified) {
11978 LHSPointee = LHSPointee.getUnqualifiedType();
11979 RHSPointee = RHSPointee.getUnqualifiedType();
11980 }
11981 if (getLangOpts().OpenCL) {
11982 Qualifiers LHSPteeQual = LHSPointee.getQualifiers();
11983 Qualifiers RHSPteeQual = RHSPointee.getQualifiers();
11984 // Blocks can't be an expression in a ternary operator (OpenCL v2.0
11985 // 6.12.5) thus the following check is asymmetric.
11986 if (!LHSPteeQual.isAddressSpaceSupersetOf(other: RHSPteeQual, Ctx: *this))
11987 return {};
11988 LHSPteeQual.removeAddressSpace();
11989 RHSPteeQual.removeAddressSpace();
11990 LHSPointee =
11991 QualType(LHSPointee.getTypePtr(), LHSPteeQual.getAsOpaqueValue());
11992 RHSPointee =
11993 QualType(RHSPointee.getTypePtr(), RHSPteeQual.getAsOpaqueValue());
11994 }
11995 QualType ResultType = mergeTypes(LHS: LHSPointee, RHS: RHSPointee, OfBlockPointer,
11996 Unqualified);
11997 if (ResultType.isNull())
11998 return {};
11999 if (getCanonicalType(T: LHSPointee) == getCanonicalType(T: ResultType))
12000 return LHS;
12001 if (getCanonicalType(T: RHSPointee) == getCanonicalType(T: ResultType))
12002 return RHS;
12003 return getBlockPointerType(T: ResultType);
12004 }
12005 case Type::Atomic:
12006 {
12007 // Merge two pointer types, while trying to preserve typedef info
12008 QualType LHSValue = LHS->castAs<AtomicType>()->getValueType();
12009 QualType RHSValue = RHS->castAs<AtomicType>()->getValueType();
12010 if (Unqualified) {
12011 LHSValue = LHSValue.getUnqualifiedType();
12012 RHSValue = RHSValue.getUnqualifiedType();
12013 }
12014 QualType ResultType = mergeTypes(LHS: LHSValue, RHS: RHSValue, OfBlockPointer: false,
12015 Unqualified);
12016 if (ResultType.isNull())
12017 return {};
12018 if (getCanonicalType(T: LHSValue) == getCanonicalType(T: ResultType))
12019 return LHS;
12020 if (getCanonicalType(T: RHSValue) == getCanonicalType(T: ResultType))
12021 return RHS;
12022 return getAtomicType(T: ResultType);
12023 }
12024 case Type::ConstantArray:
12025 {
12026 const ConstantArrayType* LCAT = getAsConstantArrayType(T: LHS);
12027 const ConstantArrayType* RCAT = getAsConstantArrayType(T: RHS);
12028 if (LCAT && RCAT && RCAT->getZExtSize() != LCAT->getZExtSize())
12029 return {};
12030
12031 QualType LHSElem = getAsArrayType(T: LHS)->getElementType();
12032 QualType RHSElem = getAsArrayType(T: RHS)->getElementType();
12033 if (Unqualified) {
12034 LHSElem = LHSElem.getUnqualifiedType();
12035 RHSElem = RHSElem.getUnqualifiedType();
12036 }
12037
12038 QualType ResultType = mergeTypes(LHS: LHSElem, RHS: RHSElem, OfBlockPointer: false, Unqualified);
12039 if (ResultType.isNull())
12040 return {};
12041
12042 const VariableArrayType* LVAT = getAsVariableArrayType(T: LHS);
12043 const VariableArrayType* RVAT = getAsVariableArrayType(T: RHS);
12044
12045 // If either side is a variable array, and both are complete, check whether
12046 // the current dimension is definite.
12047 if (LVAT || RVAT) {
12048 auto SizeFetch = [this](const VariableArrayType* VAT,
12049 const ConstantArrayType* CAT)
12050 -> std::pair<bool,llvm::APInt> {
12051 if (VAT) {
12052 std::optional<llvm::APSInt> TheInt;
12053 Expr *E = VAT->getSizeExpr();
12054 if (E && (TheInt = E->getIntegerConstantExpr(Ctx: *this)))
12055 return std::make_pair(x: true, y&: *TheInt);
12056 return std::make_pair(x: false, y: llvm::APSInt());
12057 }
12058 if (CAT)
12059 return std::make_pair(x: true, y: CAT->getSize());
12060 return std::make_pair(x: false, y: llvm::APInt());
12061 };
12062
12063 bool HaveLSize, HaveRSize;
12064 llvm::APInt LSize, RSize;
12065 std::tie(args&: HaveLSize, args&: LSize) = SizeFetch(LVAT, LCAT);
12066 std::tie(args&: HaveRSize, args&: RSize) = SizeFetch(RVAT, RCAT);
12067 if (HaveLSize && HaveRSize && !llvm::APInt::isSameValue(I1: LSize, I2: RSize))
12068 return {}; // Definite, but unequal, array dimension
12069 }
12070
12071 if (LCAT && getCanonicalType(T: LHSElem) == getCanonicalType(T: ResultType))
12072 return LHS;
12073 if (RCAT && getCanonicalType(T: RHSElem) == getCanonicalType(T: ResultType))
12074 return RHS;
12075 if (LCAT)
12076 return getConstantArrayType(EltTy: ResultType, ArySizeIn: LCAT->getSize(),
12077 SizeExpr: LCAT->getSizeExpr(), ASM: ArraySizeModifier(), IndexTypeQuals: 0);
12078 if (RCAT)
12079 return getConstantArrayType(EltTy: ResultType, ArySizeIn: RCAT->getSize(),
12080 SizeExpr: RCAT->getSizeExpr(), ASM: ArraySizeModifier(), IndexTypeQuals: 0);
12081 if (LVAT && getCanonicalType(T: LHSElem) == getCanonicalType(T: ResultType))
12082 return LHS;
12083 if (RVAT && getCanonicalType(T: RHSElem) == getCanonicalType(T: ResultType))
12084 return RHS;
12085 if (LVAT) {
12086 // FIXME: This isn't correct! But tricky to implement because
12087 // the array's size has to be the size of LHS, but the type
12088 // has to be different.
12089 return LHS;
12090 }
12091 if (RVAT) {
12092 // FIXME: This isn't correct! But tricky to implement because
12093 // the array's size has to be the size of RHS, but the type
12094 // has to be different.
12095 return RHS;
12096 }
12097 if (getCanonicalType(T: LHSElem) == getCanonicalType(T: ResultType)) return LHS;
12098 if (getCanonicalType(T: RHSElem) == getCanonicalType(T: ResultType)) return RHS;
12099 return getIncompleteArrayType(elementType: ResultType, ASM: ArraySizeModifier(), elementTypeQuals: 0);
12100 }
12101 case Type::FunctionNoProto:
12102 return mergeFunctionTypes(lhs: LHS, rhs: RHS, OfBlockPointer, Unqualified,
12103 /*AllowCXX=*/false, IsConditionalOperator);
12104 case Type::Record:
12105 case Type::Enum:
12106 return mergeTagDefinitions(LHS, RHS);
12107 case Type::Builtin:
12108 // Only exactly equal builtin types are compatible, which is tested above.
12109 return {};
12110 case Type::Complex:
12111 // Distinct complex types are incompatible.
12112 return {};
12113 case Type::Vector:
12114 // FIXME: The merged type should be an ExtVector!
12115 if (areCompatVectorTypes(LHS: LHSCan->castAs<VectorType>(),
12116 RHS: RHSCan->castAs<VectorType>()))
12117 return LHS;
12118 return {};
12119 case Type::ConstantMatrix:
12120 if (areCompatMatrixTypes(LHS: LHSCan->castAs<ConstantMatrixType>(),
12121 RHS: RHSCan->castAs<ConstantMatrixType>()))
12122 return LHS;
12123 return {};
12124 case Type::ObjCObject: {
12125 // Check if the types are assignment compatible.
12126 // FIXME: This should be type compatibility, e.g. whether
12127 // "LHS x; RHS x;" at global scope is legal.
12128 if (canAssignObjCInterfaces(LHS: LHS->castAs<ObjCObjectType>(),
12129 RHS: RHS->castAs<ObjCObjectType>()))
12130 return LHS;
12131 return {};
12132 }
12133 case Type::ObjCObjectPointer:
12134 if (OfBlockPointer) {
12135 if (canAssignObjCInterfacesInBlockPointer(
12136 LHSOPT: LHS->castAs<ObjCObjectPointerType>(),
12137 RHSOPT: RHS->castAs<ObjCObjectPointerType>(), BlockReturnType))
12138 return LHS;
12139 return {};
12140 }
12141 if (canAssignObjCInterfaces(LHSOPT: LHS->castAs<ObjCObjectPointerType>(),
12142 RHSOPT: RHS->castAs<ObjCObjectPointerType>()))
12143 return LHS;
12144 return {};
12145 case Type::Pipe:
12146 assert(LHS != RHS &&
12147 "Equivalent pipe types should have already been handled!");
12148 return {};
12149 case Type::ArrayParameter:
12150 assert(LHS != RHS &&
12151 "Equivalent ArrayParameter types should have already been handled!");
12152 return {};
12153 case Type::BitInt: {
12154 // Merge two bit-precise int types, while trying to preserve typedef info.
12155 bool LHSUnsigned = LHS->castAs<BitIntType>()->isUnsigned();
12156 bool RHSUnsigned = RHS->castAs<BitIntType>()->isUnsigned();
12157 unsigned LHSBits = LHS->castAs<BitIntType>()->getNumBits();
12158 unsigned RHSBits = RHS->castAs<BitIntType>()->getNumBits();
12159
12160 // Like unsigned/int, shouldn't have a type if they don't match.
12161 if (LHSUnsigned != RHSUnsigned)
12162 return {};
12163
12164 if (LHSBits != RHSBits)
12165 return {};
12166 return LHS;
12167 }
12168 case Type::HLSLAttributedResource: {
12169 const HLSLAttributedResourceType *LHSTy =
12170 LHS->castAs<HLSLAttributedResourceType>();
12171 const HLSLAttributedResourceType *RHSTy =
12172 RHS->castAs<HLSLAttributedResourceType>();
12173 assert(LHSTy->getWrappedType() == RHSTy->getWrappedType() &&
12174 LHSTy->getWrappedType()->isHLSLResourceType() &&
12175 "HLSLAttributedResourceType should always wrap __hlsl_resource_t");
12176
12177 if (LHSTy->getAttrs() == RHSTy->getAttrs() &&
12178 LHSTy->getContainedType() == RHSTy->getContainedType())
12179 return LHS;
12180 return {};
12181 }
12182 case Type::HLSLInlineSpirv:
12183 const HLSLInlineSpirvType *LHSTy = LHS->castAs<HLSLInlineSpirvType>();
12184 const HLSLInlineSpirvType *RHSTy = RHS->castAs<HLSLInlineSpirvType>();
12185
12186 if (LHSTy->getOpcode() == RHSTy->getOpcode() &&
12187 LHSTy->getSize() == RHSTy->getSize() &&
12188 LHSTy->getAlignment() == RHSTy->getAlignment()) {
12189 for (size_t I = 0; I < LHSTy->getOperands().size(); I++)
12190 if (LHSTy->getOperands()[I] != RHSTy->getOperands()[I])
12191 return {};
12192
12193 return LHS;
12194 }
12195 return {};
12196 }
12197
12198 llvm_unreachable("Invalid Type::Class!");
12199}
12200
12201bool ASTContext::mergeExtParameterInfo(
12202 const FunctionProtoType *FirstFnType, const FunctionProtoType *SecondFnType,
12203 bool &CanUseFirst, bool &CanUseSecond,
12204 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &NewParamInfos) {
12205 assert(NewParamInfos.empty() && "param info list not empty");
12206 CanUseFirst = CanUseSecond = true;
12207 bool FirstHasInfo = FirstFnType->hasExtParameterInfos();
12208 bool SecondHasInfo = SecondFnType->hasExtParameterInfos();
12209
12210 // Fast path: if the first type doesn't have ext parameter infos,
12211 // we match if and only if the second type also doesn't have them.
12212 if (!FirstHasInfo && !SecondHasInfo)
12213 return true;
12214
12215 bool NeedParamInfo = false;
12216 size_t E = FirstHasInfo ? FirstFnType->getExtParameterInfos().size()
12217 : SecondFnType->getExtParameterInfos().size();
12218
12219 for (size_t I = 0; I < E; ++I) {
12220 FunctionProtoType::ExtParameterInfo FirstParam, SecondParam;
12221 if (FirstHasInfo)
12222 FirstParam = FirstFnType->getExtParameterInfo(I);
12223 if (SecondHasInfo)
12224 SecondParam = SecondFnType->getExtParameterInfo(I);
12225
12226 // Cannot merge unless everything except the noescape flag matches.
12227 if (FirstParam.withIsNoEscape(NoEscape: false) != SecondParam.withIsNoEscape(NoEscape: false))
12228 return false;
12229
12230 bool FirstNoEscape = FirstParam.isNoEscape();
12231 bool SecondNoEscape = SecondParam.isNoEscape();
12232 bool IsNoEscape = FirstNoEscape && SecondNoEscape;
12233 NewParamInfos.push_back(Elt: FirstParam.withIsNoEscape(NoEscape: IsNoEscape));
12234 if (NewParamInfos.back().getOpaqueValue())
12235 NeedParamInfo = true;
12236 if (FirstNoEscape != IsNoEscape)
12237 CanUseFirst = false;
12238 if (SecondNoEscape != IsNoEscape)
12239 CanUseSecond = false;
12240 }
12241
12242 if (!NeedParamInfo)
12243 NewParamInfos.clear();
12244
12245 return true;
12246}
12247
12248void ASTContext::ResetObjCLayout(const ObjCInterfaceDecl *D) {
12249 if (auto It = ObjCLayouts.find(Val: D); It != ObjCLayouts.end()) {
12250 It->second = nullptr;
12251 for (auto *SubClass : ObjCSubClasses.lookup(Val: D))
12252 ResetObjCLayout(D: SubClass);
12253 }
12254}
12255
12256/// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and
12257/// 'RHS' attributes and returns the merged version; including for function
12258/// return types.
12259QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) {
12260 QualType LHSCan = getCanonicalType(T: LHS),
12261 RHSCan = getCanonicalType(T: RHS);
12262 // If two types are identical, they are compatible.
12263 if (LHSCan == RHSCan)
12264 return LHS;
12265 if (RHSCan->isFunctionType()) {
12266 if (!LHSCan->isFunctionType())
12267 return {};
12268 QualType OldReturnType =
12269 cast<FunctionType>(Val: RHSCan.getTypePtr())->getReturnType();
12270 QualType NewReturnType =
12271 cast<FunctionType>(Val: LHSCan.getTypePtr())->getReturnType();
12272 QualType ResReturnType =
12273 mergeObjCGCQualifiers(LHS: NewReturnType, RHS: OldReturnType);
12274 if (ResReturnType.isNull())
12275 return {};
12276 if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) {
12277 // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo();
12278 // In either case, use OldReturnType to build the new function type.
12279 const auto *F = LHS->castAs<FunctionType>();
12280 if (const auto *FPT = cast<FunctionProtoType>(Val: F)) {
12281 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
12282 EPI.ExtInfo = getFunctionExtInfo(t: LHS);
12283 QualType ResultType =
12284 getFunctionType(ResultTy: OldReturnType, Args: FPT->getParamTypes(), EPI);
12285 return ResultType;
12286 }
12287 }
12288 return {};
12289 }
12290
12291 // If the qualifiers are different, the types can still be merged.
12292 Qualifiers LQuals = LHSCan.getLocalQualifiers();
12293 Qualifiers RQuals = RHSCan.getLocalQualifiers();
12294 if (LQuals != RQuals) {
12295 // If any of these qualifiers are different, we have a type mismatch.
12296 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() ||
12297 LQuals.getAddressSpace() != RQuals.getAddressSpace())
12298 return {};
12299
12300 // Exactly one GC qualifier difference is allowed: __strong is
12301 // okay if the other type has no GC qualifier but is an Objective
12302 // C object pointer (i.e. implicitly strong by default). We fix
12303 // this by pretending that the unqualified type was actually
12304 // qualified __strong.
12305 Qualifiers::GC GC_L = LQuals.getObjCGCAttr();
12306 Qualifiers::GC GC_R = RQuals.getObjCGCAttr();
12307 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements");
12308
12309 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak)
12310 return {};
12311
12312 if (GC_L == Qualifiers::Strong)
12313 return LHS;
12314 if (GC_R == Qualifiers::Strong)
12315 return RHS;
12316 return {};
12317 }
12318
12319 if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) {
12320 QualType LHSBaseQT = LHS->castAs<ObjCObjectPointerType>()->getPointeeType();
12321 QualType RHSBaseQT = RHS->castAs<ObjCObjectPointerType>()->getPointeeType();
12322 QualType ResQT = mergeObjCGCQualifiers(LHS: LHSBaseQT, RHS: RHSBaseQT);
12323 if (ResQT == LHSBaseQT)
12324 return LHS;
12325 if (ResQT == RHSBaseQT)
12326 return RHS;
12327 }
12328 return {};
12329}
12330
12331//===----------------------------------------------------------------------===//
12332// Integer Predicates
12333//===----------------------------------------------------------------------===//
12334
12335unsigned ASTContext::getIntWidth(QualType T) const {
12336 if (const auto *ED = T->getAsEnumDecl())
12337 T = ED->getIntegerType();
12338 if (T->isBooleanType())
12339 return 1;
12340 if (const auto *EIT = T->getAs<BitIntType>())
12341 return EIT->getNumBits();
12342 // For builtin types, just use the standard type sizing method
12343 return (unsigned)getTypeSize(T);
12344}
12345
12346QualType ASTContext::getCorrespondingUnsignedType(QualType T) const {
12347 assert((T->hasIntegerRepresentation() || T->isEnumeralType() ||
12348 T->isFixedPointType()) &&
12349 "Unexpected type");
12350
12351 // Turn <4 x signed int> -> <4 x unsigned int>
12352 if (const auto *VTy = T->getAs<VectorType>())
12353 return getVectorType(vecType: getCorrespondingUnsignedType(T: VTy->getElementType()),
12354 NumElts: VTy->getNumElements(), VecKind: VTy->getVectorKind());
12355
12356 // For _BitInt, return an unsigned _BitInt with same width.
12357 if (const auto *EITy = T->getAs<BitIntType>())
12358 return getBitIntType(/*Unsigned=*/IsUnsigned: true, NumBits: EITy->getNumBits());
12359
12360 // For the overflow behavior types, construct a new unsigned variant
12361 if (const auto *OBT = T->getAs<OverflowBehaviorType>())
12362 return getOverflowBehaviorType(
12363 Kind: OBT->getBehaviorKind(),
12364 Underlying: getCorrespondingUnsignedType(T: OBT->getUnderlyingType()));
12365
12366 // For enums, get the underlying integer type of the enum, and let the general
12367 // integer type signchanging code handle it.
12368 if (const auto *ED = T->getAsEnumDecl())
12369 T = ED->getIntegerType();
12370
12371 switch (T->castAs<BuiltinType>()->getKind()) {
12372 case BuiltinType::Char_U:
12373 // Plain `char` is mapped to `unsigned char` even if it's already unsigned
12374 case BuiltinType::Char_S:
12375 case BuiltinType::SChar:
12376 case BuiltinType::Char8:
12377 return UnsignedCharTy;
12378 case BuiltinType::Short:
12379 return UnsignedShortTy;
12380 case BuiltinType::Int:
12381 return UnsignedIntTy;
12382 case BuiltinType::Long:
12383 return UnsignedLongTy;
12384 case BuiltinType::LongLong:
12385 return UnsignedLongLongTy;
12386 case BuiltinType::Int128:
12387 return UnsignedInt128Ty;
12388 // wchar_t is special. It is either signed or not, but when it's signed,
12389 // there's no matching "unsigned wchar_t". Therefore we return the unsigned
12390 // version of its underlying type instead.
12391 case BuiltinType::WChar_S:
12392 return getUnsignedWCharType();
12393
12394 case BuiltinType::ShortAccum:
12395 return UnsignedShortAccumTy;
12396 case BuiltinType::Accum:
12397 return UnsignedAccumTy;
12398 case BuiltinType::LongAccum:
12399 return UnsignedLongAccumTy;
12400 case BuiltinType::SatShortAccum:
12401 return SatUnsignedShortAccumTy;
12402 case BuiltinType::SatAccum:
12403 return SatUnsignedAccumTy;
12404 case BuiltinType::SatLongAccum:
12405 return SatUnsignedLongAccumTy;
12406 case BuiltinType::ShortFract:
12407 return UnsignedShortFractTy;
12408 case BuiltinType::Fract:
12409 return UnsignedFractTy;
12410 case BuiltinType::LongFract:
12411 return UnsignedLongFractTy;
12412 case BuiltinType::SatShortFract:
12413 return SatUnsignedShortFractTy;
12414 case BuiltinType::SatFract:
12415 return SatUnsignedFractTy;
12416 case BuiltinType::SatLongFract:
12417 return SatUnsignedLongFractTy;
12418 default:
12419 assert((T->hasUnsignedIntegerRepresentation() ||
12420 T->isUnsignedFixedPointType()) &&
12421 "Unexpected signed integer or fixed point type");
12422 return T;
12423 }
12424}
12425
12426QualType ASTContext::getCorrespondingSignedType(QualType T) const {
12427 assert((T->hasIntegerRepresentation() || T->isEnumeralType() ||
12428 T->isFixedPointType()) &&
12429 "Unexpected type");
12430
12431 // Turn <4 x unsigned int> -> <4 x signed int>
12432 if (const auto *VTy = T->getAs<VectorType>())
12433 return getVectorType(vecType: getCorrespondingSignedType(T: VTy->getElementType()),
12434 NumElts: VTy->getNumElements(), VecKind: VTy->getVectorKind());
12435
12436 // For _BitInt, return a signed _BitInt with same width.
12437 if (const auto *EITy = T->getAs<BitIntType>())
12438 return getBitIntType(/*Unsigned=*/IsUnsigned: false, NumBits: EITy->getNumBits());
12439
12440 // For enums, get the underlying integer type of the enum, and let the general
12441 // integer type signchanging code handle it.
12442 if (const auto *ED = T->getAsEnumDecl())
12443 T = ED->getIntegerType();
12444
12445 switch (T->castAs<BuiltinType>()->getKind()) {
12446 case BuiltinType::Char_S:
12447 // Plain `char` is mapped to `signed char` even if it's already signed
12448 case BuiltinType::Char_U:
12449 case BuiltinType::UChar:
12450 case BuiltinType::Char8:
12451 return SignedCharTy;
12452 case BuiltinType::UShort:
12453 return ShortTy;
12454 case BuiltinType::UInt:
12455 return IntTy;
12456 case BuiltinType::ULong:
12457 return LongTy;
12458 case BuiltinType::ULongLong:
12459 return LongLongTy;
12460 case BuiltinType::UInt128:
12461 return Int128Ty;
12462 // wchar_t is special. It is either unsigned or not, but when it's unsigned,
12463 // there's no matching "signed wchar_t". Therefore we return the signed
12464 // version of its underlying type instead.
12465 case BuiltinType::WChar_U:
12466 return getSignedWCharType();
12467
12468 case BuiltinType::UShortAccum:
12469 return ShortAccumTy;
12470 case BuiltinType::UAccum:
12471 return AccumTy;
12472 case BuiltinType::ULongAccum:
12473 return LongAccumTy;
12474 case BuiltinType::SatUShortAccum:
12475 return SatShortAccumTy;
12476 case BuiltinType::SatUAccum:
12477 return SatAccumTy;
12478 case BuiltinType::SatULongAccum:
12479 return SatLongAccumTy;
12480 case BuiltinType::UShortFract:
12481 return ShortFractTy;
12482 case BuiltinType::UFract:
12483 return FractTy;
12484 case BuiltinType::ULongFract:
12485 return LongFractTy;
12486 case BuiltinType::SatUShortFract:
12487 return SatShortFractTy;
12488 case BuiltinType::SatUFract:
12489 return SatFractTy;
12490 case BuiltinType::SatULongFract:
12491 return SatLongFractTy;
12492 default:
12493 assert(
12494 (T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) &&
12495 "Unexpected signed integer or fixed point type");
12496 return T;
12497 }
12498}
12499
12500ASTMutationListener::~ASTMutationListener() = default;
12501
12502void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD,
12503 QualType ReturnType) {}
12504
12505//===----------------------------------------------------------------------===//
12506// Builtin Type Computation
12507//===----------------------------------------------------------------------===//
12508
12509/// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the
12510/// pointer over the consumed characters. This returns the resultant type. If
12511/// AllowTypeModifiers is false then modifier like * are not parsed, just basic
12512/// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of
12513/// a vector of "i*".
12514///
12515/// RequiresICE is filled in on return to indicate whether the value is required
12516/// to be an Integer Constant Expression.
12517static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
12518 ASTContext::GetBuiltinTypeError &Error,
12519 bool &RequiresICE,
12520 bool AllowTypeModifiers) {
12521 // Modifiers.
12522 int HowLong = 0;
12523 bool Signed = false, Unsigned = false;
12524 RequiresICE = false;
12525
12526 // Read the prefixed modifiers first.
12527 bool Done = false;
12528 #ifndef NDEBUG
12529 bool IsSpecial = false;
12530 #endif
12531 while (!Done) {
12532 switch (*Str++) {
12533 default: Done = true; --Str; break;
12534 case 'I':
12535 RequiresICE = true;
12536 break;
12537 case 'S':
12538 assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!");
12539 assert(!Signed && "Can't use 'S' modifier multiple times!");
12540 Signed = true;
12541 break;
12542 case 'U':
12543 assert(!Signed && "Can't use both 'S' and 'U' modifiers!");
12544 assert(!Unsigned && "Can't use 'U' modifier multiple times!");
12545 Unsigned = true;
12546 break;
12547 case 'L':
12548 assert(!IsSpecial && "Can't use 'L' with 'W', 'N', 'Z' or 'O' modifiers");
12549 assert(HowLong <= 2 && "Can't have LLLL modifier");
12550 ++HowLong;
12551 break;
12552 case 'N':
12553 // 'N' behaves like 'L' for all non LP64 targets and 'int' otherwise.
12554 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
12555 assert(HowLong == 0 && "Can't use both 'L' and 'N' modifiers!");
12556 #ifndef NDEBUG
12557 IsSpecial = true;
12558 #endif
12559 if (Context.getTargetInfo().getLongWidth() == 32)
12560 ++HowLong;
12561 break;
12562 case 'W':
12563 // This modifier represents int64 type.
12564 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
12565 assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!");
12566 #ifndef NDEBUG
12567 IsSpecial = true;
12568 #endif
12569 switch (Context.getTargetInfo().getInt64Type()) {
12570 default:
12571 llvm_unreachable("Unexpected integer type");
12572 case TargetInfo::SignedLong:
12573 HowLong = 1;
12574 break;
12575 case TargetInfo::SignedLongLong:
12576 HowLong = 2;
12577 break;
12578 }
12579 break;
12580 case 'Z':
12581 // This modifier represents int32 type.
12582 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
12583 assert(HowLong == 0 && "Can't use both 'L' and 'Z' modifiers!");
12584 #ifndef NDEBUG
12585 IsSpecial = true;
12586 #endif
12587 switch (Context.getTargetInfo().getIntTypeByWidth(BitWidth: 32, IsSigned: true)) {
12588 default:
12589 llvm_unreachable("Unexpected integer type");
12590 case TargetInfo::SignedInt:
12591 HowLong = 0;
12592 break;
12593 case TargetInfo::SignedLong:
12594 HowLong = 1;
12595 break;
12596 case TargetInfo::SignedLongLong:
12597 HowLong = 2;
12598 break;
12599 }
12600 break;
12601 case 'O':
12602 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
12603 assert(HowLong == 0 && "Can't use both 'L' and 'O' modifiers!");
12604 #ifndef NDEBUG
12605 IsSpecial = true;
12606 #endif
12607 if (Context.getLangOpts().OpenCL)
12608 HowLong = 1;
12609 else
12610 HowLong = 2;
12611 break;
12612 }
12613 }
12614
12615 QualType Type;
12616
12617 // Read the base type.
12618 switch (*Str++) {
12619 default:
12620 llvm_unreachable("Unknown builtin type letter!");
12621 case 'x':
12622 assert(HowLong == 0 && !Signed && !Unsigned &&
12623 "Bad modifiers used with 'x'!");
12624 Type = Context.Float16Ty;
12625 break;
12626 case 'y':
12627 assert(HowLong == 0 && !Signed && !Unsigned &&
12628 "Bad modifiers used with 'y'!");
12629 Type = Context.BFloat16Ty;
12630 break;
12631 case 'v':
12632 assert(HowLong == 0 && !Signed && !Unsigned &&
12633 "Bad modifiers used with 'v'!");
12634 Type = Context.VoidTy;
12635 break;
12636 case 'h':
12637 assert(HowLong == 0 && !Signed && !Unsigned &&
12638 "Bad modifiers used with 'h'!");
12639 Type = Context.HalfTy;
12640 break;
12641 case 'f':
12642 assert(HowLong == 0 && !Signed && !Unsigned &&
12643 "Bad modifiers used with 'f'!");
12644 Type = Context.FloatTy;
12645 break;
12646 case 'd':
12647 assert(HowLong < 3 && !Signed && !Unsigned &&
12648 "Bad modifiers used with 'd'!");
12649 if (HowLong == 1)
12650 Type = Context.LongDoubleTy;
12651 else if (HowLong == 2)
12652 Type = Context.Float128Ty;
12653 else
12654 Type = Context.DoubleTy;
12655 break;
12656 case 's':
12657 assert(HowLong == 0 && "Bad modifiers used with 's'!");
12658 if (Unsigned)
12659 Type = Context.UnsignedShortTy;
12660 else
12661 Type = Context.ShortTy;
12662 break;
12663 case 'i':
12664 if (HowLong == 3)
12665 Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty;
12666 else if (HowLong == 2)
12667 Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy;
12668 else if (HowLong == 1)
12669 Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy;
12670 else
12671 Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy;
12672 break;
12673 case 'c':
12674 assert(HowLong == 0 && "Bad modifiers used with 'c'!");
12675 if (Signed)
12676 Type = Context.SignedCharTy;
12677 else if (Unsigned)
12678 Type = Context.UnsignedCharTy;
12679 else
12680 Type = Context.CharTy;
12681 break;
12682 case 'b': // boolean
12683 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!");
12684 Type = Context.BoolTy;
12685 break;
12686 case 'z': // size_t.
12687 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!");
12688 Type = Context.getSizeType();
12689 break;
12690 case 'w': // wchar_t.
12691 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'w'!");
12692 Type = Context.getWideCharType();
12693 break;
12694 case 'F':
12695 Type = Context.getCFConstantStringType();
12696 break;
12697 case 'G':
12698 Type = Context.getObjCIdType();
12699 break;
12700 case 'H':
12701 Type = Context.getObjCSelType();
12702 break;
12703 case 'M':
12704 Type = Context.getObjCSuperType();
12705 break;
12706 case 'a':
12707 Type = Context.getBuiltinVaListType();
12708 assert(!Type.isNull() && "builtin va list type not initialized!");
12709 break;
12710 case 'A':
12711 // This is a "reference" to a va_list; however, what exactly
12712 // this means depends on how va_list is defined. There are two
12713 // different kinds of va_list: ones passed by value, and ones
12714 // passed by reference. An example of a by-value va_list is
12715 // x86, where va_list is a char*. An example of by-ref va_list
12716 // is x86-64, where va_list is a __va_list_tag[1]. For x86,
12717 // we want this argument to be a char*&; for x86-64, we want
12718 // it to be a __va_list_tag*.
12719 Type = Context.getBuiltinVaListType();
12720 assert(!Type.isNull() && "builtin va list type not initialized!");
12721 if (Type->isArrayType())
12722 Type = Context.getArrayDecayedType(Ty: Type);
12723 else
12724 Type = Context.getLValueReferenceType(T: Type);
12725 break;
12726 case 'q': {
12727 char *End;
12728 unsigned NumElements = strtoul(nptr: Str, endptr: &End, base: 10);
12729 assert(End != Str && "Missing vector size");
12730 Str = End;
12731
12732 QualType ElementType = DecodeTypeFromStr(Str, Context, Error,
12733 RequiresICE, AllowTypeModifiers: false);
12734 assert(!RequiresICE && "Can't require vector ICE");
12735
12736 Type = Context.getScalableVectorType(EltTy: ElementType, NumElts: NumElements);
12737 break;
12738 }
12739 case 'Q': {
12740 switch (*Str++) {
12741 case 'a': {
12742 Type = Context.SveCountTy;
12743 break;
12744 }
12745 case 'b': {
12746 Type = Context.AMDGPUBufferRsrcTy;
12747 break;
12748 }
12749 case 't': {
12750 Type = Context.AMDGPUTextureTy;
12751 break;
12752 }
12753 case 'r': {
12754 Type = Context.HLSLResourceTy;
12755 break;
12756 }
12757 default:
12758 llvm_unreachable("Unexpected target builtin type");
12759 }
12760 break;
12761 }
12762 case 'V': {
12763 char *End;
12764 unsigned NumElements = strtoul(nptr: Str, endptr: &End, base: 10);
12765 assert(End != Str && "Missing vector size");
12766 Str = End;
12767
12768 QualType ElementType = DecodeTypeFromStr(Str, Context, Error,
12769 RequiresICE, AllowTypeModifiers: false);
12770 assert(!RequiresICE && "Can't require vector ICE");
12771
12772 // TODO: No way to make AltiVec vectors in builtins yet.
12773 Type = Context.getVectorType(vecType: ElementType, NumElts: NumElements, VecKind: VectorKind::Generic);
12774 break;
12775 }
12776 case 'E': {
12777 char *End;
12778
12779 unsigned NumElements = strtoul(nptr: Str, endptr: &End, base: 10);
12780 assert(End != Str && "Missing vector size");
12781
12782 Str = End;
12783
12784 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE,
12785 AllowTypeModifiers: false);
12786 Type = Context.getExtVectorType(vecType: ElementType, NumElts: NumElements);
12787 break;
12788 }
12789 case 'X': {
12790 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE,
12791 AllowTypeModifiers: false);
12792 assert(!RequiresICE && "Can't require complex ICE");
12793 Type = Context.getComplexType(T: ElementType);
12794 break;
12795 }
12796 case 'Y':
12797 Type = Context.getPointerDiffType();
12798 break;
12799 case 'P':
12800 Type = Context.getFILEType();
12801 if (Type.isNull()) {
12802 Error = ASTContext::GE_Missing_stdio;
12803 return {};
12804 }
12805 break;
12806 case 'J':
12807 if (Signed)
12808 Type = Context.getsigjmp_bufType();
12809 else
12810 Type = Context.getjmp_bufType();
12811
12812 if (Type.isNull()) {
12813 Error = ASTContext::GE_Missing_setjmp;
12814 return {};
12815 }
12816 break;
12817 case 'K':
12818 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!");
12819 Type = Context.getucontext_tType();
12820
12821 if (Type.isNull()) {
12822 Error = ASTContext::GE_Missing_ucontext;
12823 return {};
12824 }
12825 break;
12826 case 'p':
12827 Type = Context.getProcessIDType();
12828 break;
12829 case 'm':
12830 Type = Context.MFloat8Ty;
12831 break;
12832 }
12833
12834 // If there are modifiers and if we're allowed to parse them, go for it.
12835 Done = !AllowTypeModifiers;
12836 while (!Done) {
12837 switch (char c = *Str++) {
12838 default: Done = true; --Str; break;
12839 case '*':
12840 case '&': {
12841 // Both pointers and references can have their pointee types
12842 // qualified with an address space.
12843 char *End;
12844 unsigned AddrSpace = strtoul(nptr: Str, endptr: &End, base: 10);
12845 if (End != Str) {
12846 // Note AddrSpace == 0 is not the same as an unspecified address space.
12847 Type = Context.getAddrSpaceQualType(
12848 T: Type,
12849 AddressSpace: Context.getLangASForBuiltinAddressSpace(AS: AddrSpace));
12850 Str = End;
12851 }
12852 if (c == '*')
12853 Type = Context.getPointerType(T: Type);
12854 else
12855 Type = Context.getLValueReferenceType(T: Type);
12856 break;
12857 }
12858 // FIXME: There's no way to have a built-in with an rvalue ref arg.
12859 case 'C':
12860 Type = Type.withConst();
12861 break;
12862 case 'D':
12863 Type = Context.getVolatileType(T: Type);
12864 break;
12865 case 'R':
12866 Type = Type.withRestrict();
12867 break;
12868 }
12869 }
12870
12871 assert((!RequiresICE || Type->isIntegralOrEnumerationType()) &&
12872 "Integer constant 'I' type must be an integer");
12873
12874 return Type;
12875}
12876
12877// On some targets such as PowerPC, some of the builtins are defined with custom
12878// type descriptors for target-dependent types. These descriptors are decoded in
12879// other functions, but it may be useful to be able to fall back to default
12880// descriptor decoding to define builtins mixing target-dependent and target-
12881// independent types. This function allows decoding one type descriptor with
12882// default decoding.
12883QualType ASTContext::DecodeTypeStr(const char *&Str, const ASTContext &Context,
12884 GetBuiltinTypeError &Error, bool &RequireICE,
12885 bool AllowTypeModifiers) const {
12886 return DecodeTypeFromStr(Str, Context, Error, RequiresICE&: RequireICE, AllowTypeModifiers);
12887}
12888
12889/// GetBuiltinType - Return the type for the specified builtin.
12890QualType ASTContext::GetBuiltinType(unsigned Id,
12891 GetBuiltinTypeError &Error,
12892 unsigned *IntegerConstantArgs) const {
12893 const char *TypeStr = BuiltinInfo.getTypeString(ID: Id);
12894 if (TypeStr[0] == '\0') {
12895 Error = GE_Missing_type;
12896 return {};
12897 }
12898
12899 SmallVector<QualType, 8> ArgTypes;
12900
12901 bool RequiresICE = false;
12902 Error = GE_None;
12903 QualType ResType = DecodeTypeFromStr(Str&: TypeStr, Context: *this, Error,
12904 RequiresICE, AllowTypeModifiers: true);
12905 if (Error != GE_None)
12906 return {};
12907
12908 assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE");
12909
12910 while (TypeStr[0] && TypeStr[0] != '.') {
12911 QualType Ty = DecodeTypeFromStr(Str&: TypeStr, Context: *this, Error, RequiresICE, AllowTypeModifiers: true);
12912 if (Error != GE_None)
12913 return {};
12914
12915 // If this argument is required to be an IntegerConstantExpression and the
12916 // caller cares, fill in the bitmask we return.
12917 if (RequiresICE && IntegerConstantArgs)
12918 *IntegerConstantArgs |= 1 << ArgTypes.size();
12919
12920 // Do array -> pointer decay. The builtin should use the decayed type.
12921 if (Ty->isArrayType())
12922 Ty = getArrayDecayedType(Ty);
12923
12924 ArgTypes.push_back(Elt: Ty);
12925 }
12926
12927 if (Id == Builtin::BI__GetExceptionInfo)
12928 return {};
12929
12930 assert((TypeStr[0] != '.' || TypeStr[1] == 0) &&
12931 "'.' should only occur at end of builtin type list!");
12932
12933 bool Variadic = (TypeStr[0] == '.');
12934
12935 FunctionType::ExtInfo EI(Target->getDefaultCallingConv());
12936 if (BuiltinInfo.isNoReturn(ID: Id))
12937 EI = EI.withNoReturn(noReturn: true);
12938
12939 // We really shouldn't be making a no-proto type here.
12940 if (ArgTypes.empty() && Variadic && !getLangOpts().requiresStrictPrototypes())
12941 return getFunctionNoProtoType(ResultTy: ResType, Info: EI);
12942
12943 FunctionProtoType::ExtProtoInfo EPI;
12944 EPI.ExtInfo = EI;
12945 EPI.Variadic = Variadic;
12946 if (getLangOpts().CPlusPlus && BuiltinInfo.isNoThrow(ID: Id))
12947 EPI.ExceptionSpec.Type =
12948 getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone;
12949
12950 return getFunctionType(ResultTy: ResType, Args: ArgTypes, EPI);
12951}
12952
12953static GVALinkage basicGVALinkageForFunction(const ASTContext &Context,
12954 const FunctionDecl *FD) {
12955 if (!FD->isExternallyVisible())
12956 return GVA_Internal;
12957
12958 // Non-user-provided functions get emitted as weak definitions with every
12959 // use, no matter whether they've been explicitly instantiated etc.
12960 if (!FD->isUserProvided())
12961 return GVA_DiscardableODR;
12962
12963 GVALinkage External;
12964 switch (FD->getTemplateSpecializationKind()) {
12965 case TSK_Undeclared:
12966 case TSK_ExplicitSpecialization:
12967 External = GVA_StrongExternal;
12968 break;
12969
12970 case TSK_ExplicitInstantiationDefinition:
12971 return GVA_StrongODR;
12972
12973 // C++11 [temp.explicit]p10:
12974 // [ Note: The intent is that an inline function that is the subject of
12975 // an explicit instantiation declaration will still be implicitly
12976 // instantiated when used so that the body can be considered for
12977 // inlining, but that no out-of-line copy of the inline function would be
12978 // generated in the translation unit. -- end note ]
12979 case TSK_ExplicitInstantiationDeclaration:
12980 return GVA_AvailableExternally;
12981
12982 case TSK_ImplicitInstantiation:
12983 External = GVA_DiscardableODR;
12984 break;
12985 }
12986
12987 if (!FD->isInlined())
12988 return External;
12989
12990 if ((!Context.getLangOpts().CPlusPlus &&
12991 !Context.getTargetInfo().getCXXABI().isMicrosoft() &&
12992 !FD->hasAttr<DLLExportAttr>()) ||
12993 FD->hasAttr<GNUInlineAttr>()) {
12994 // FIXME: This doesn't match gcc's behavior for dllexport inline functions.
12995
12996 // GNU or C99 inline semantics. Determine whether this symbol should be
12997 // externally visible.
12998 if (FD->isInlineDefinitionExternallyVisible())
12999 return External;
13000
13001 // C99 inline semantics, where the symbol is not externally visible.
13002 return GVA_AvailableExternally;
13003 }
13004
13005 // Functions specified with extern and inline in -fms-compatibility mode
13006 // forcibly get emitted. While the body of the function cannot be later
13007 // replaced, the function definition cannot be discarded.
13008 if (FD->isMSExternInline())
13009 return GVA_StrongODR;
13010
13011 if (Context.getTargetInfo().getCXXABI().isMicrosoft() &&
13012 isa<CXXConstructorDecl>(Val: FD) &&
13013 cast<CXXConstructorDecl>(Val: FD)->isInheritingConstructor())
13014 // Our approach to inheriting constructors is fundamentally different from
13015 // that used by the MS ABI, so keep our inheriting constructor thunks
13016 // internal rather than trying to pick an unambiguous mangling for them.
13017 return GVA_Internal;
13018
13019 return GVA_DiscardableODR;
13020}
13021
13022static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context,
13023 const Decl *D, GVALinkage L) {
13024 // See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx
13025 // dllexport/dllimport on inline functions.
13026 if (D->hasAttr<DLLImportAttr>()) {
13027 if (L == GVA_DiscardableODR || L == GVA_StrongODR)
13028 return GVA_AvailableExternally;
13029 } else if (D->hasAttr<DLLExportAttr>()) {
13030 if (L == GVA_DiscardableODR)
13031 return GVA_StrongODR;
13032 } else if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice) {
13033 // Device-side functions with __global__ attribute must always be
13034 // visible externally so they can be launched from host.
13035 if (D->hasAttr<CUDAGlobalAttr>() &&
13036 (L == GVA_DiscardableODR || L == GVA_Internal))
13037 return GVA_StrongODR;
13038 // Single source offloading languages like CUDA/HIP need to be able to
13039 // access static device variables from host code of the same compilation
13040 // unit. This is done by externalizing the static variable with a shared
13041 // name between the host and device compilation which is the same for the
13042 // same compilation unit whereas different among different compilation
13043 // units.
13044 if (Context.shouldExternalize(D))
13045 return GVA_StrongExternal;
13046 }
13047 return L;
13048}
13049
13050/// Adjust the GVALinkage for a declaration based on what an external AST source
13051/// knows about whether there can be other definitions of this declaration.
13052static GVALinkage
13053adjustGVALinkageForExternalDefinitionKind(const ASTContext &Ctx, const Decl *D,
13054 GVALinkage L) {
13055 ExternalASTSource *Source = Ctx.getExternalSource();
13056 if (!Source)
13057 return L;
13058
13059 switch (Source->hasExternalDefinitions(D)) {
13060 case ExternalASTSource::EK_Never:
13061 // Other translation units rely on us to provide the definition.
13062 if (L == GVA_DiscardableODR)
13063 return GVA_StrongODR;
13064 break;
13065
13066 case ExternalASTSource::EK_Always:
13067 return GVA_AvailableExternally;
13068
13069 case ExternalASTSource::EK_ReplyHazy:
13070 break;
13071 }
13072 return L;
13073}
13074
13075GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const {
13076 return adjustGVALinkageForExternalDefinitionKind(Ctx: *this, D: FD,
13077 L: adjustGVALinkageForAttributes(Context: *this, D: FD,
13078 L: basicGVALinkageForFunction(Context: *this, FD)));
13079}
13080
13081static GVALinkage basicGVALinkageForVariable(const ASTContext &Context,
13082 const VarDecl *VD) {
13083 // As an extension for interactive REPLs, make sure constant variables are
13084 // only emitted once instead of LinkageComputer::getLVForNamespaceScopeDecl
13085 // marking them as internal.
13086 if (Context.getLangOpts().CPlusPlus &&
13087 Context.getLangOpts().IncrementalExtensions &&
13088 VD->getType().isConstQualified() &&
13089 !VD->getType().isVolatileQualified() && !VD->isInline() &&
13090 !isa<VarTemplateSpecializationDecl>(Val: VD) && !VD->getDescribedVarTemplate())
13091 return GVA_DiscardableODR;
13092
13093 if (!VD->isExternallyVisible())
13094 return GVA_Internal;
13095
13096 if (VD->isStaticLocal()) {
13097 const DeclContext *LexicalContext = VD->getParentFunctionOrMethod();
13098 while (LexicalContext && !isa<FunctionDecl>(Val: LexicalContext))
13099 LexicalContext = LexicalContext->getLexicalParent();
13100
13101 // ObjC Blocks can create local variables that don't have a FunctionDecl
13102 // LexicalContext.
13103 if (!LexicalContext)
13104 return GVA_DiscardableODR;
13105
13106 // Otherwise, let the static local variable inherit its linkage from the
13107 // nearest enclosing function.
13108 auto StaticLocalLinkage =
13109 Context.GetGVALinkageForFunction(FD: cast<FunctionDecl>(Val: LexicalContext));
13110
13111 // Itanium ABI 5.2.2: "Each COMDAT group [for a static local variable] must
13112 // be emitted in any object with references to the symbol for the object it
13113 // contains, whether inline or out-of-line."
13114 // Similar behavior is observed with MSVC. An alternative ABI could use
13115 // StrongODR/AvailableExternally to match the function, but none are
13116 // known/supported currently.
13117 if (StaticLocalLinkage == GVA_StrongODR ||
13118 StaticLocalLinkage == GVA_AvailableExternally)
13119 return GVA_DiscardableODR;
13120 return StaticLocalLinkage;
13121 }
13122
13123 // MSVC treats in-class initialized static data members as definitions.
13124 // By giving them non-strong linkage, out-of-line definitions won't
13125 // cause link errors.
13126 if (Context.isMSStaticDataMemberInlineDefinition(VD))
13127 return GVA_DiscardableODR;
13128
13129 // Most non-template variables have strong linkage; inline variables are
13130 // linkonce_odr or (occasionally, for compatibility) weak_odr.
13131 GVALinkage StrongLinkage;
13132 switch (Context.getInlineVariableDefinitionKind(VD)) {
13133 case ASTContext::InlineVariableDefinitionKind::None:
13134 StrongLinkage = GVA_StrongExternal;
13135 break;
13136 case ASTContext::InlineVariableDefinitionKind::Weak:
13137 case ASTContext::InlineVariableDefinitionKind::WeakUnknown:
13138 StrongLinkage = GVA_DiscardableODR;
13139 break;
13140 case ASTContext::InlineVariableDefinitionKind::Strong:
13141 StrongLinkage = GVA_StrongODR;
13142 break;
13143 }
13144
13145 switch (VD->getTemplateSpecializationKind()) {
13146 case TSK_Undeclared:
13147 return StrongLinkage;
13148
13149 case TSK_ExplicitSpecialization:
13150 return Context.getTargetInfo().getCXXABI().isMicrosoft() &&
13151 VD->isStaticDataMember()
13152 ? GVA_StrongODR
13153 : StrongLinkage;
13154
13155 case TSK_ExplicitInstantiationDefinition:
13156 return GVA_StrongODR;
13157
13158 case TSK_ExplicitInstantiationDeclaration:
13159 return GVA_AvailableExternally;
13160
13161 case TSK_ImplicitInstantiation:
13162 return GVA_DiscardableODR;
13163 }
13164
13165 llvm_unreachable("Invalid Linkage!");
13166}
13167
13168GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) const {
13169 return adjustGVALinkageForExternalDefinitionKind(Ctx: *this, D: VD,
13170 L: adjustGVALinkageForAttributes(Context: *this, D: VD,
13171 L: basicGVALinkageForVariable(Context: *this, VD)));
13172}
13173
13174bool ASTContext::DeclMustBeEmitted(const Decl *D) {
13175 if (const auto *VD = dyn_cast<VarDecl>(Val: D)) {
13176 if (!VD->isFileVarDecl())
13177 return false;
13178 // Global named register variables (GNU extension) are never emitted.
13179 if (VD->getStorageClass() == SC_Register)
13180 return false;
13181 if (VD->getDescribedVarTemplate() ||
13182 isa<VarTemplatePartialSpecializationDecl>(Val: VD))
13183 return false;
13184 } else if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
13185 // We never need to emit an uninstantiated function template.
13186 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate)
13187 return false;
13188 } else if (isa<PragmaCommentDecl>(Val: D))
13189 return true;
13190 else if (isa<PragmaDetectMismatchDecl>(Val: D))
13191 return true;
13192 else if (isa<OMPRequiresDecl>(Val: D))
13193 return true;
13194 else if (isa<OMPThreadPrivateDecl>(Val: D))
13195 return !D->getDeclContext()->isDependentContext();
13196 else if (isa<OMPAllocateDecl>(Val: D))
13197 return !D->getDeclContext()->isDependentContext();
13198 else if (isa<OMPDeclareReductionDecl>(Val: D) || isa<OMPDeclareMapperDecl>(Val: D))
13199 return !D->getDeclContext()->isDependentContext();
13200 else if (isa<ImportDecl>(Val: D))
13201 return true;
13202 else
13203 return false;
13204
13205 // If this is a member of a class template, we do not need to emit it.
13206 if (D->getDeclContext()->isDependentContext())
13207 return false;
13208
13209 // Weak references don't produce any output by themselves.
13210 if (D->hasAttr<WeakRefAttr>())
13211 return false;
13212
13213 // SYCL device compilation requires that functions defined with the
13214 // sycl_kernel_entry_point or sycl_external attributes be emitted. All
13215 // other entities are emitted only if they are used by a function
13216 // defined with one of those attributes.
13217 if (LangOpts.SYCLIsDevice)
13218 return isa<FunctionDecl>(Val: D) && (D->hasAttr<SYCLKernelEntryPointAttr>() ||
13219 D->hasAttr<SYCLExternalAttr>());
13220
13221 // Aliases and used decls are required.
13222 if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>())
13223 return true;
13224
13225 if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
13226 // Forward declarations aren't required.
13227 if (!FD->doesThisDeclarationHaveABody())
13228 return FD->doesDeclarationForceExternallyVisibleDefinition();
13229
13230 // Constructors and destructors are required.
13231 if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>())
13232 return true;
13233
13234 // The key function for a class is required. This rule only comes
13235 // into play when inline functions can be key functions, though.
13236 if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) {
13237 if (const auto *MD = dyn_cast<CXXMethodDecl>(Val: FD)) {
13238 const CXXRecordDecl *RD = MD->getParent();
13239 if (MD->isOutOfLine() && RD->isDynamicClass()) {
13240 const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD);
13241 if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl())
13242 return true;
13243 }
13244 }
13245 }
13246
13247 GVALinkage Linkage = GetGVALinkageForFunction(FD);
13248
13249 // static, static inline, always_inline, and extern inline functions can
13250 // always be deferred. Normal inline functions can be deferred in C99/C++.
13251 // Implicit template instantiations can also be deferred in C++.
13252 return !isDiscardableGVALinkage(L: Linkage);
13253 }
13254
13255 const auto *VD = cast<VarDecl>(Val: D);
13256 assert(VD->isFileVarDecl() && "Expected file scoped var");
13257
13258 // If the decl is marked as `declare target to`, it should be emitted for the
13259 // host and for the device.
13260 if (LangOpts.OpenMP &&
13261 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
13262 return true;
13263
13264 if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly &&
13265 !isMSStaticDataMemberInlineDefinition(VD))
13266 return false;
13267
13268 if (VD->shouldEmitInExternalSource())
13269 return false;
13270
13271 // Variables that can be needed in other TUs are required.
13272 auto Linkage = GetGVALinkageForVariable(VD);
13273 if (!isDiscardableGVALinkage(L: Linkage))
13274 return true;
13275
13276 // We never need to emit a variable that is available in another TU.
13277 if (Linkage == GVA_AvailableExternally)
13278 return false;
13279
13280 // Variables that have destruction with side-effects are required.
13281 if (VD->needsDestruction(Ctx: *this))
13282 return true;
13283
13284 // Variables that have initialization with side-effects are required.
13285 if (VD->hasInitWithSideEffects())
13286 return true;
13287
13288 // Likewise, variables with tuple-like bindings are required if their
13289 // bindings have side-effects.
13290 if (const auto *DD = dyn_cast<DecompositionDecl>(Val: VD)) {
13291 for (const auto *BD : DD->flat_bindings())
13292 if (const auto *BindingVD = BD->getHoldingVar())
13293 if (DeclMustBeEmitted(D: BindingVD))
13294 return true;
13295 }
13296
13297 return false;
13298}
13299
13300void ASTContext::forEachMultiversionedFunctionVersion(
13301 const FunctionDecl *FD,
13302 llvm::function_ref<void(FunctionDecl *)> Pred) const {
13303 assert(FD->isMultiVersion() && "Only valid for multiversioned functions");
13304 llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls;
13305 FD = FD->getMostRecentDecl();
13306 // FIXME: The order of traversal here matters and depends on the order of
13307 // lookup results, which happens to be (mostly) oldest-to-newest, but we
13308 // shouldn't rely on that.
13309 for (auto *CurDecl :
13310 FD->getDeclContext()->getRedeclContext()->lookup(Name: FD->getDeclName())) {
13311 FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl();
13312 if (CurFD && hasSameType(T1: CurFD->getType(), T2: FD->getType()) &&
13313 SeenDecls.insert(V: CurFD).second) {
13314 Pred(CurFD);
13315 }
13316 }
13317}
13318
13319CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic,
13320 bool IsCXXMethod) const {
13321 // Pass through to the C++ ABI object
13322 if (IsCXXMethod)
13323 return ABI->getDefaultMethodCallConv(isVariadic: IsVariadic);
13324
13325 switch (LangOpts.getDefaultCallingConv()) {
13326 case LangOptions::DCC_None:
13327 break;
13328 case LangOptions::DCC_CDecl:
13329 return CC_C;
13330 case LangOptions::DCC_FastCall:
13331 if (getTargetInfo().hasFeature(Feature: "sse2") && !IsVariadic)
13332 return CC_X86FastCall;
13333 break;
13334 case LangOptions::DCC_StdCall:
13335 if (!IsVariadic)
13336 return CC_X86StdCall;
13337 break;
13338 case LangOptions::DCC_VectorCall:
13339 // __vectorcall cannot be applied to variadic functions.
13340 if (!IsVariadic)
13341 return CC_X86VectorCall;
13342 break;
13343 case LangOptions::DCC_RegCall:
13344 // __regcall cannot be applied to variadic functions.
13345 if (!IsVariadic)
13346 return CC_X86RegCall;
13347 break;
13348 case LangOptions::DCC_RtdCall:
13349 if (!IsVariadic)
13350 return CC_M68kRTD;
13351 break;
13352 }
13353 return Target->getDefaultCallingConv();
13354}
13355
13356bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const {
13357 // Pass through to the C++ ABI object
13358 return ABI->isNearlyEmpty(RD);
13359}
13360
13361VTableContextBase *ASTContext::getVTableContext() {
13362 if (!VTContext) {
13363 auto ABI = Target->getCXXABI();
13364 if (ABI.isMicrosoft())
13365 VTContext.reset(p: new MicrosoftVTableContext(*this));
13366 else {
13367 auto ComponentLayout = getLangOpts().RelativeCXXABIVTables
13368 ? ItaniumVTableContext::Relative
13369 : ItaniumVTableContext::Pointer;
13370 VTContext.reset(p: new ItaniumVTableContext(*this, ComponentLayout));
13371 }
13372 }
13373 return VTContext.get();
13374}
13375
13376MangleContext *ASTContext::createMangleContext(const TargetInfo *T) {
13377 if (!T)
13378 T = Target;
13379 switch (T->getCXXABI().getKind()) {
13380 case TargetCXXABI::AppleARM64:
13381 case TargetCXXABI::Fuchsia:
13382 case TargetCXXABI::GenericAArch64:
13383 case TargetCXXABI::GenericItanium:
13384 case TargetCXXABI::GenericARM:
13385 case TargetCXXABI::GenericMIPS:
13386 case TargetCXXABI::iOS:
13387 case TargetCXXABI::WebAssembly:
13388 case TargetCXXABI::WatchOS:
13389 case TargetCXXABI::XL:
13390 return ItaniumMangleContext::create(Context&: *this, Diags&: getDiagnostics());
13391 case TargetCXXABI::Microsoft:
13392 return MicrosoftMangleContext::create(Context&: *this, Diags&: getDiagnostics());
13393 }
13394 llvm_unreachable("Unsupported ABI");
13395}
13396
13397MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) {
13398 assert(T.getCXXABI().getKind() != TargetCXXABI::Microsoft &&
13399 "Device mangle context does not support Microsoft mangling.");
13400 switch (T.getCXXABI().getKind()) {
13401 case TargetCXXABI::AppleARM64:
13402 case TargetCXXABI::Fuchsia:
13403 case TargetCXXABI::GenericAArch64:
13404 case TargetCXXABI::GenericItanium:
13405 case TargetCXXABI::GenericARM:
13406 case TargetCXXABI::GenericMIPS:
13407 case TargetCXXABI::iOS:
13408 case TargetCXXABI::WebAssembly:
13409 case TargetCXXABI::WatchOS:
13410 case TargetCXXABI::XL:
13411 return ItaniumMangleContext::create(
13412 Context&: *this, Diags&: getDiagnostics(),
13413 Discriminator: [](ASTContext &, const NamedDecl *ND) -> UnsignedOrNone {
13414 if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: ND))
13415 return RD->getDeviceLambdaManglingNumber();
13416 return std::nullopt;
13417 },
13418 /*IsAux=*/true);
13419 case TargetCXXABI::Microsoft:
13420 return MicrosoftMangleContext::create(Context&: *this, Diags&: getDiagnostics(),
13421 /*IsAux=*/true);
13422 }
13423 llvm_unreachable("Unsupported ABI");
13424}
13425
13426MangleContext *ASTContext::cudaNVInitDeviceMC() {
13427 // If the host and device have different C++ ABIs, mark it as the device
13428 // mangle context so that the mangling needs to retrieve the additional
13429 // device lambda mangling number instead of the regular host one.
13430 if (getAuxTargetInfo() && getTargetInfo().getCXXABI().isMicrosoft() &&
13431 getAuxTargetInfo()->getCXXABI().isItaniumFamily()) {
13432 return createDeviceMangleContext(T: *getAuxTargetInfo());
13433 }
13434
13435 return createMangleContext(T: getAuxTargetInfo());
13436}
13437
13438CXXABI::~CXXABI() = default;
13439
13440size_t ASTContext::getSideTableAllocatedMemory() const {
13441 return ASTRecordLayouts.getMemorySize() +
13442 llvm::capacity_in_bytes(X: ObjCLayouts) +
13443 llvm::capacity_in_bytes(X: KeyFunctions) +
13444 llvm::capacity_in_bytes(X: ObjCImpls) +
13445 llvm::capacity_in_bytes(X: BlockVarCopyInits) +
13446 llvm::capacity_in_bytes(X: DeclAttrs) +
13447 llvm::capacity_in_bytes(X: TemplateOrInstantiation) +
13448 llvm::capacity_in_bytes(X: InstantiatedFromUsingDecl) +
13449 llvm::capacity_in_bytes(X: InstantiatedFromUsingShadowDecl) +
13450 llvm::capacity_in_bytes(X: InstantiatedFromUnnamedFieldDecl) +
13451 llvm::capacity_in_bytes(X: OverriddenMethods) +
13452 llvm::capacity_in_bytes(X: Types) +
13453 llvm::capacity_in_bytes(x: VariableArrayTypes);
13454}
13455
13456/// getIntTypeForBitwidth -
13457/// sets integer QualTy according to specified details:
13458/// bitwidth, signed/unsigned.
13459/// Returns empty type if there is no appropriate target types.
13460QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth,
13461 unsigned Signed) const {
13462 TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(BitWidth: DestWidth, IsSigned: Signed);
13463 CanQualType QualTy = getFromTargetType(Type: Ty);
13464 if (!QualTy && DestWidth == 128)
13465 return Signed ? Int128Ty : UnsignedInt128Ty;
13466 return QualTy;
13467}
13468
13469/// getRealTypeForBitwidth -
13470/// sets floating point QualTy according to specified bitwidth.
13471/// Returns empty type if there is no appropriate target types.
13472QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth,
13473 FloatModeKind ExplicitType) const {
13474 FloatModeKind Ty =
13475 getTargetInfo().getRealTypeByWidth(BitWidth: DestWidth, ExplicitType);
13476 switch (Ty) {
13477 case FloatModeKind::Half:
13478 return HalfTy;
13479 case FloatModeKind::Float:
13480 return FloatTy;
13481 case FloatModeKind::Double:
13482 return DoubleTy;
13483 case FloatModeKind::LongDouble:
13484 return LongDoubleTy;
13485 case FloatModeKind::Float128:
13486 return Float128Ty;
13487 case FloatModeKind::Ibm128:
13488 return Ibm128Ty;
13489 case FloatModeKind::NoFloat:
13490 return {};
13491 }
13492
13493 llvm_unreachable("Unhandled TargetInfo::RealType value");
13494}
13495
13496void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) {
13497 if (Number <= 1)
13498 return;
13499
13500 MangleNumbers[ND] = Number;
13501
13502 if (Listener)
13503 Listener->AddedManglingNumber(D: ND, Number);
13504}
13505
13506unsigned ASTContext::getManglingNumber(const NamedDecl *ND,
13507 bool ForAuxTarget) const {
13508 auto I = MangleNumbers.find(Key: ND);
13509 unsigned Res = I != MangleNumbers.end() ? I->second : 1;
13510 // CUDA/HIP host compilation encodes host and device mangling numbers
13511 // as lower and upper half of 32 bit integer.
13512 if (LangOpts.CUDA && !LangOpts.CUDAIsDevice) {
13513 Res = ForAuxTarget ? Res >> 16 : Res & 0xFFFF;
13514 } else {
13515 assert(!ForAuxTarget && "Only CUDA/HIP host compilation supports mangling "
13516 "number for aux target");
13517 }
13518 return Res > 1 ? Res : 1;
13519}
13520
13521void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) {
13522 if (Number <= 1)
13523 return;
13524
13525 StaticLocalNumbers[VD] = Number;
13526
13527 if (Listener)
13528 Listener->AddedStaticLocalNumbers(D: VD, Number);
13529}
13530
13531unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const {
13532 auto I = StaticLocalNumbers.find(Key: VD);
13533 return I != StaticLocalNumbers.end() ? I->second : 1;
13534}
13535
13536void ASTContext::setIsDestroyingOperatorDelete(const FunctionDecl *FD,
13537 bool IsDestroying) {
13538 if (!IsDestroying) {
13539 assert(!DestroyingOperatorDeletes.contains(FD->getCanonicalDecl()));
13540 return;
13541 }
13542 DestroyingOperatorDeletes.insert(V: FD->getCanonicalDecl());
13543}
13544
13545bool ASTContext::isDestroyingOperatorDelete(const FunctionDecl *FD) const {
13546 return DestroyingOperatorDeletes.contains(V: FD->getCanonicalDecl());
13547}
13548
13549void ASTContext::setIsTypeAwareOperatorNewOrDelete(const FunctionDecl *FD,
13550 bool IsTypeAware) {
13551 if (!IsTypeAware) {
13552 assert(!TypeAwareOperatorNewAndDeletes.contains(FD->getCanonicalDecl()));
13553 return;
13554 }
13555 TypeAwareOperatorNewAndDeletes.insert(V: FD->getCanonicalDecl());
13556}
13557
13558bool ASTContext::isTypeAwareOperatorNewOrDelete(const FunctionDecl *FD) const {
13559 return TypeAwareOperatorNewAndDeletes.contains(V: FD->getCanonicalDecl());
13560}
13561
13562void ASTContext::addOperatorDeleteForVDtor(const CXXDestructorDecl *Dtor,
13563 FunctionDecl *OperatorDelete,
13564 OperatorDeleteKind K) const {
13565 switch (K) {
13566 case OperatorDeleteKind::Regular:
13567 OperatorDeletesForVirtualDtor[Dtor->getCanonicalDecl()] = OperatorDelete;
13568 break;
13569 case OperatorDeleteKind::GlobalRegular:
13570 GlobalOperatorDeletesForVirtualDtor[Dtor->getCanonicalDecl()] =
13571 OperatorDelete;
13572 break;
13573 case OperatorDeleteKind::Array:
13574 ArrayOperatorDeletesForVirtualDtor[Dtor->getCanonicalDecl()] =
13575 OperatorDelete;
13576 break;
13577 case OperatorDeleteKind::ArrayGlobal:
13578 GlobalArrayOperatorDeletesForVirtualDtor[Dtor->getCanonicalDecl()] =
13579 OperatorDelete;
13580 break;
13581 }
13582}
13583
13584bool ASTContext::dtorHasOperatorDelete(const CXXDestructorDecl *Dtor,
13585 OperatorDeleteKind K) const {
13586 switch (K) {
13587 case OperatorDeleteKind::Regular:
13588 return OperatorDeletesForVirtualDtor.contains(Val: Dtor->getCanonicalDecl());
13589 case OperatorDeleteKind::GlobalRegular:
13590 return GlobalOperatorDeletesForVirtualDtor.contains(
13591 Val: Dtor->getCanonicalDecl());
13592 case OperatorDeleteKind::Array:
13593 return ArrayOperatorDeletesForVirtualDtor.contains(
13594 Val: Dtor->getCanonicalDecl());
13595 case OperatorDeleteKind::ArrayGlobal:
13596 return GlobalArrayOperatorDeletesForVirtualDtor.contains(
13597 Val: Dtor->getCanonicalDecl());
13598 }
13599 return false;
13600}
13601
13602FunctionDecl *
13603ASTContext::getOperatorDeleteForVDtor(const CXXDestructorDecl *Dtor,
13604 OperatorDeleteKind K) const {
13605 const CXXDestructorDecl *Canon = Dtor->getCanonicalDecl();
13606 switch (K) {
13607 case OperatorDeleteKind::Regular:
13608 if (OperatorDeletesForVirtualDtor.contains(Val: Canon))
13609 return OperatorDeletesForVirtualDtor[Canon];
13610 return nullptr;
13611 case OperatorDeleteKind::GlobalRegular:
13612 if (GlobalOperatorDeletesForVirtualDtor.contains(Val: Canon))
13613 return GlobalOperatorDeletesForVirtualDtor[Canon];
13614 return nullptr;
13615 case OperatorDeleteKind::Array:
13616 if (ArrayOperatorDeletesForVirtualDtor.contains(Val: Canon))
13617 return ArrayOperatorDeletesForVirtualDtor[Canon];
13618 return nullptr;
13619 case OperatorDeleteKind::ArrayGlobal:
13620 if (GlobalArrayOperatorDeletesForVirtualDtor.contains(Val: Canon))
13621 return GlobalArrayOperatorDeletesForVirtualDtor[Canon];
13622 return nullptr;
13623 }
13624 return nullptr;
13625}
13626
13627bool ASTContext::classNeedsVectorDeletingDestructor(const CXXRecordDecl *RD) {
13628 if (!getTargetInfo().emitVectorDeletingDtors(getLangOpts()))
13629 return false;
13630 CXXDestructorDecl *Dtor = RD->getDestructor();
13631 // The compiler can't know if new[]/delete[] will be used outside of the DLL,
13632 // so just force vector deleting destructor emission if dllexport is present.
13633 // This matches MSVC behavior.
13634 if (Dtor && Dtor->isVirtual() && Dtor->hasAttr<DLLExportAttr>())
13635 return true;
13636
13637 return RequireVectorDeletingDtor.count(V: RD);
13638}
13639
13640void ASTContext::setClassNeedsVectorDeletingDestructor(
13641 const CXXRecordDecl *RD) {
13642 if (!getTargetInfo().emitVectorDeletingDtors(getLangOpts()))
13643 return;
13644 RequireVectorDeletingDtor.insert(V: RD);
13645}
13646
13647MangleNumberingContext &
13648ASTContext::getManglingNumberContext(const DeclContext *DC) {
13649 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C.
13650 std::unique_ptr<MangleNumberingContext> &MCtx = MangleNumberingContexts[DC];
13651 if (!MCtx)
13652 MCtx = createMangleNumberingContext();
13653 return *MCtx;
13654}
13655
13656MangleNumberingContext &
13657ASTContext::getManglingNumberContext(NeedExtraManglingDecl_t, const Decl *D) {
13658 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C.
13659 std::unique_ptr<MangleNumberingContext> &MCtx =
13660 ExtraMangleNumberingContexts[D];
13661 if (!MCtx)
13662 MCtx = createMangleNumberingContext();
13663 return *MCtx;
13664}
13665
13666std::unique_ptr<MangleNumberingContext>
13667ASTContext::createMangleNumberingContext() const {
13668 return ABI->createMangleNumberingContext();
13669}
13670
13671const CXXConstructorDecl *
13672ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) {
13673 return ABI->getCopyConstructorForExceptionObject(
13674 cast<CXXRecordDecl>(Val: RD->getFirstDecl()));
13675}
13676
13677void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD,
13678 CXXConstructorDecl *CD) {
13679 return ABI->addCopyConstructorForExceptionObject(
13680 cast<CXXRecordDecl>(Val: RD->getFirstDecl()),
13681 cast<CXXConstructorDecl>(Val: CD->getFirstDecl()));
13682}
13683
13684void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD,
13685 TypedefNameDecl *DD) {
13686 return ABI->addTypedefNameForUnnamedTagDecl(TD, DD);
13687}
13688
13689TypedefNameDecl *
13690ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl *TD) {
13691 return ABI->getTypedefNameForUnnamedTagDecl(TD);
13692}
13693
13694void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl *TD,
13695 DeclaratorDecl *DD) {
13696 return ABI->addDeclaratorForUnnamedTagDecl(TD, DD);
13697}
13698
13699DeclaratorDecl *ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl *TD) {
13700 return ABI->getDeclaratorForUnnamedTagDecl(TD);
13701}
13702
13703void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) {
13704 ParamIndices[D] = index;
13705}
13706
13707unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const {
13708 ParameterIndexTable::const_iterator I = ParamIndices.find(Val: D);
13709 assert(I != ParamIndices.end() &&
13710 "ParmIndices lacks entry set by ParmVarDecl");
13711 return I->second;
13712}
13713
13714QualType ASTContext::getStringLiteralArrayType(QualType EltTy,
13715 unsigned Length) const {
13716 // A C++ string literal has a const-qualified element type (C++ 2.13.4p1).
13717 if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings)
13718 EltTy = EltTy.withConst();
13719
13720 EltTy = adjustStringLiteralBaseType(Ty: EltTy);
13721
13722 // Get an array type for the string, according to C99 6.4.5. This includes
13723 // the null terminator character.
13724 return getConstantArrayType(EltTy, ArySizeIn: llvm::APInt(32, Length + 1), SizeExpr: nullptr,
13725 ASM: ArraySizeModifier::Normal, /*IndexTypeQuals*/ 0);
13726}
13727
13728StringLiteral *
13729ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const {
13730 StringLiteral *&Result = StringLiteralCache[Key];
13731 if (!Result)
13732 Result = StringLiteral::Create(
13733 Ctx: *this, Str: Key, Kind: StringLiteralKind::Ordinary,
13734 /*Pascal*/ false, Ty: getStringLiteralArrayType(EltTy: CharTy, Length: Key.size()),
13735 Locs: SourceLocation());
13736 return Result;
13737}
13738
13739MSGuidDecl *
13740ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const {
13741 assert(MSGuidTagDecl && "building MS GUID without MS extensions?");
13742
13743 llvm::FoldingSetNodeID ID;
13744 MSGuidDecl::Profile(ID, P: Parts);
13745
13746 void *InsertPos;
13747 if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos))
13748 return Existing;
13749
13750 QualType GUIDType = getMSGuidType().withConst();
13751 MSGuidDecl *New = MSGuidDecl::Create(C: *this, T: GUIDType, P: Parts);
13752 MSGuidDecls.InsertNode(N: New, InsertPos);
13753 return New;
13754}
13755
13756UnnamedGlobalConstantDecl *
13757ASTContext::getUnnamedGlobalConstantDecl(QualType Ty,
13758 const APValue &APVal) const {
13759 llvm::FoldingSetNodeID ID;
13760 UnnamedGlobalConstantDecl::Profile(ID, Ty, APVal);
13761
13762 void *InsertPos;
13763 if (UnnamedGlobalConstantDecl *Existing =
13764 UnnamedGlobalConstantDecls.FindNodeOrInsertPos(ID, InsertPos))
13765 return Existing;
13766
13767 UnnamedGlobalConstantDecl *New =
13768 UnnamedGlobalConstantDecl::Create(C: *this, T: Ty, APVal);
13769 UnnamedGlobalConstantDecls.InsertNode(N: New, InsertPos);
13770 return New;
13771}
13772
13773TemplateParamObjectDecl *
13774ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const {
13775 assert(T->isRecordType() && "template param object of unexpected type");
13776
13777 // C++ [temp.param]p8:
13778 // [...] a static storage duration object of type 'const T' [...]
13779 T.addConst();
13780
13781 llvm::FoldingSetNodeID ID;
13782 TemplateParamObjectDecl::Profile(ID, T, V);
13783
13784 void *InsertPos;
13785 if (TemplateParamObjectDecl *Existing =
13786 TemplateParamObjectDecls.FindNodeOrInsertPos(ID, InsertPos))
13787 return Existing;
13788
13789 TemplateParamObjectDecl *New = TemplateParamObjectDecl::Create(C: *this, T, V);
13790 TemplateParamObjectDecls.InsertNode(N: New, InsertPos);
13791 return New;
13792}
13793
13794bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const {
13795 const llvm::Triple &T = getTargetInfo().getTriple();
13796 if (!T.isOSDarwin())
13797 return false;
13798
13799 if (!(T.isiOS() && T.isOSVersionLT(Major: 7)) &&
13800 !(T.isMacOSX() && T.isOSVersionLT(Major: 10, Minor: 9)))
13801 return false;
13802
13803 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
13804 CharUnits sizeChars = getTypeSizeInChars(T: AtomicTy);
13805 uint64_t Size = sizeChars.getQuantity();
13806 CharUnits alignChars = getTypeAlignInChars(T: AtomicTy);
13807 unsigned Align = alignChars.getQuantity();
13808 unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth();
13809 return (Size != Align || toBits(CharSize: sizeChars) > MaxInlineWidthInBits);
13810}
13811
13812bool
13813ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl,
13814 const ObjCMethodDecl *MethodImpl) {
13815 // No point trying to match an unavailable/deprecated mothod.
13816 if (MethodDecl->hasAttr<UnavailableAttr>()
13817 || MethodDecl->hasAttr<DeprecatedAttr>())
13818 return false;
13819 if (MethodDecl->getObjCDeclQualifier() !=
13820 MethodImpl->getObjCDeclQualifier())
13821 return false;
13822 if (!hasSameType(T1: MethodDecl->getReturnType(), T2: MethodImpl->getReturnType()))
13823 return false;
13824
13825 if (MethodDecl->param_size() != MethodImpl->param_size())
13826 return false;
13827
13828 for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(),
13829 IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(),
13830 EF = MethodDecl->param_end();
13831 IM != EM && IF != EF; ++IM, ++IF) {
13832 const ParmVarDecl *DeclVar = (*IF);
13833 const ParmVarDecl *ImplVar = (*IM);
13834 if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier())
13835 return false;
13836 if (!hasSameType(T1: DeclVar->getType(), T2: ImplVar->getType()))
13837 return false;
13838 }
13839
13840 return (MethodDecl->isVariadic() == MethodImpl->isVariadic());
13841}
13842
13843uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const {
13844 LangAS AS;
13845 if (QT->getUnqualifiedDesugaredType()->isNullPtrType())
13846 AS = LangAS::Default;
13847 else
13848 AS = QT->getPointeeType().getAddressSpace();
13849
13850 return getTargetInfo().getNullPointerValue(AddrSpace: AS);
13851}
13852
13853unsigned ASTContext::getTargetAddressSpace(LangAS AS) const {
13854 return getTargetInfo().getTargetAddressSpace(AS);
13855}
13856
13857bool ASTContext::hasSameExpr(const Expr *X, const Expr *Y) const {
13858 if (X == Y)
13859 return true;
13860 if (!X || !Y)
13861 return false;
13862 llvm::FoldingSetNodeID IDX, IDY;
13863 X->Profile(ID&: IDX, Context: *this, /*Canonical=*/true);
13864 Y->Profile(ID&: IDY, Context: *this, /*Canonical=*/true);
13865 return IDX == IDY;
13866}
13867
13868// The getCommon* helpers return, for given 'same' X and Y entities given as
13869// inputs, another entity which is also the 'same' as the inputs, but which
13870// is closer to the canonical form of the inputs, each according to a given
13871// criteria.
13872// The getCommon*Checked variants are 'null inputs not-allowed' equivalents of
13873// the regular ones.
13874
13875static Decl *getCommonDecl(Decl *X, Decl *Y) {
13876 if (!declaresSameEntity(D1: X, D2: Y))
13877 return nullptr;
13878 for (const Decl *DX : X->redecls()) {
13879 // If we reach Y before reaching the first decl, that means X is older.
13880 if (DX == Y)
13881 return X;
13882 // If we reach the first decl, then Y is older.
13883 if (DX->isFirstDecl())
13884 return Y;
13885 }
13886 llvm_unreachable("Corrupt redecls chain");
13887}
13888
13889template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true>
13890static T *getCommonDecl(T *X, T *Y) {
13891 return cast_or_null<T>(
13892 getCommonDecl(X: const_cast<Decl *>(cast_or_null<Decl>(X)),
13893 Y: const_cast<Decl *>(cast_or_null<Decl>(Y))));
13894}
13895
13896template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true>
13897static T *getCommonDeclChecked(T *X, T *Y) {
13898 return cast<T>(getCommonDecl(X: const_cast<Decl *>(cast<Decl>(X)),
13899 Y: const_cast<Decl *>(cast<Decl>(Y))));
13900}
13901
13902static TemplateName getCommonTemplateName(const ASTContext &Ctx, TemplateName X,
13903 TemplateName Y,
13904 bool IgnoreDeduced = false) {
13905 if (X.getAsVoidPointer() == Y.getAsVoidPointer())
13906 return X;
13907 // FIXME: There are cases here where we could find a common template name
13908 // with more sugar. For example one could be a SubstTemplateTemplate*
13909 // replacing the other.
13910 TemplateName CX = Ctx.getCanonicalTemplateName(Name: X, IgnoreDeduced);
13911 if (CX.getAsVoidPointer() !=
13912 Ctx.getCanonicalTemplateName(Name: Y).getAsVoidPointer())
13913 return TemplateName();
13914 return CX;
13915}
13916
13917static TemplateName getCommonTemplateNameChecked(const ASTContext &Ctx,
13918 TemplateName X, TemplateName Y,
13919 bool IgnoreDeduced) {
13920 TemplateName R = getCommonTemplateName(Ctx, X, Y, IgnoreDeduced);
13921 assert(R.getAsVoidPointer() != nullptr);
13922 return R;
13923}
13924
13925static auto getCommonTypes(const ASTContext &Ctx, ArrayRef<QualType> Xs,
13926 ArrayRef<QualType> Ys, bool Unqualified = false) {
13927 assert(Xs.size() == Ys.size());
13928 SmallVector<QualType, 8> Rs(Xs.size());
13929 for (size_t I = 0; I < Rs.size(); ++I)
13930 Rs[I] = Ctx.getCommonSugaredType(X: Xs[I], Y: Ys[I], Unqualified);
13931 return Rs;
13932}
13933
13934template <class T>
13935static SourceLocation getCommonAttrLoc(const T *X, const T *Y) {
13936 return X->getAttributeLoc() == Y->getAttributeLoc() ? X->getAttributeLoc()
13937 : SourceLocation();
13938}
13939
13940static TemplateArgument getCommonTemplateArgument(const ASTContext &Ctx,
13941 const TemplateArgument &X,
13942 const TemplateArgument &Y) {
13943 if (X.getKind() != Y.getKind())
13944 return TemplateArgument();
13945
13946 switch (X.getKind()) {
13947 case TemplateArgument::ArgKind::Type:
13948 if (!Ctx.hasSameType(T1: X.getAsType(), T2: Y.getAsType()))
13949 return TemplateArgument();
13950 return TemplateArgument(
13951 Ctx.getCommonSugaredType(X: X.getAsType(), Y: Y.getAsType()));
13952 case TemplateArgument::ArgKind::NullPtr:
13953 if (!Ctx.hasSameType(T1: X.getNullPtrType(), T2: Y.getNullPtrType()))
13954 return TemplateArgument();
13955 return TemplateArgument(
13956 Ctx.getCommonSugaredType(X: X.getNullPtrType(), Y: Y.getNullPtrType()),
13957 /*Unqualified=*/true);
13958 case TemplateArgument::ArgKind::Expression:
13959 if (!Ctx.hasSameType(T1: X.getAsExpr()->getType(), T2: Y.getAsExpr()->getType()))
13960 return TemplateArgument();
13961 // FIXME: Try to keep the common sugar.
13962 return X;
13963 case TemplateArgument::ArgKind::Template: {
13964 TemplateName TX = X.getAsTemplate(), TY = Y.getAsTemplate();
13965 TemplateName CTN = ::getCommonTemplateName(Ctx, X: TX, Y: TY);
13966 if (!CTN.getAsVoidPointer())
13967 return TemplateArgument();
13968 return TemplateArgument(CTN);
13969 }
13970 case TemplateArgument::ArgKind::TemplateExpansion: {
13971 TemplateName TX = X.getAsTemplateOrTemplatePattern(),
13972 TY = Y.getAsTemplateOrTemplatePattern();
13973 TemplateName CTN = ::getCommonTemplateName(Ctx, X: TX, Y: TY);
13974 if (!CTN.getAsVoidPointer())
13975 return TemplateName();
13976 auto NExpX = X.getNumTemplateExpansions();
13977 assert(NExpX == Y.getNumTemplateExpansions());
13978 return TemplateArgument(CTN, NExpX);
13979 }
13980 default:
13981 // FIXME: Handle the other argument kinds.
13982 return X;
13983 }
13984}
13985
13986static bool getCommonTemplateArguments(const ASTContext &Ctx,
13987 SmallVectorImpl<TemplateArgument> &R,
13988 ArrayRef<TemplateArgument> Xs,
13989 ArrayRef<TemplateArgument> Ys) {
13990 if (Xs.size() != Ys.size())
13991 return true;
13992 R.resize(N: Xs.size());
13993 for (size_t I = 0; I < R.size(); ++I) {
13994 R[I] = getCommonTemplateArgument(Ctx, X: Xs[I], Y: Ys[I]);
13995 if (R[I].isNull())
13996 return true;
13997 }
13998 return false;
13999}
14000
14001static auto getCommonTemplateArguments(const ASTContext &Ctx,
14002 ArrayRef<TemplateArgument> Xs,
14003 ArrayRef<TemplateArgument> Ys) {
14004 SmallVector<TemplateArgument, 8> R;
14005 bool Different = getCommonTemplateArguments(Ctx, R, Xs, Ys);
14006 assert(!Different);
14007 (void)Different;
14008 return R;
14009}
14010
14011template <class T>
14012static ElaboratedTypeKeyword getCommonTypeKeyword(const T *X, const T *Y,
14013 bool IsSame) {
14014 ElaboratedTypeKeyword KX = X->getKeyword(), KY = Y->getKeyword();
14015 if (KX == KY)
14016 return KX;
14017 KX = getCanonicalElaboratedTypeKeyword(Keyword: KX);
14018 assert(!IsSame || KX == getCanonicalElaboratedTypeKeyword(KY));
14019 return KX;
14020}
14021
14022/// Returns a NestedNameSpecifier which has only the common sugar
14023/// present in both NNS1 and NNS2.
14024static NestedNameSpecifier getCommonNNS(const ASTContext &Ctx,
14025 NestedNameSpecifier NNS1,
14026 NestedNameSpecifier NNS2, bool IsSame) {
14027 // If they are identical, all sugar is common.
14028 if (NNS1 == NNS2)
14029 return NNS1;
14030
14031 // IsSame implies both Qualifiers are equivalent.
14032 NestedNameSpecifier Canon = NNS1.getCanonical();
14033 if (Canon != NNS2.getCanonical()) {
14034 assert(!IsSame && "Should be the same NestedNameSpecifier");
14035 // If they are not the same, there is nothing to unify.
14036 return std::nullopt;
14037 }
14038
14039 NestedNameSpecifier R = std::nullopt;
14040 NestedNameSpecifier::Kind Kind = NNS1.getKind();
14041 assert(Kind == NNS2.getKind());
14042 switch (Kind) {
14043 case NestedNameSpecifier::Kind::Namespace: {
14044 auto [Namespace1, Prefix1] = NNS1.getAsNamespaceAndPrefix();
14045 auto [Namespace2, Prefix2] = NNS2.getAsNamespaceAndPrefix();
14046 auto Kind = Namespace1->getKind();
14047 if (Kind != Namespace2->getKind() ||
14048 (Kind == Decl::NamespaceAlias &&
14049 !declaresSameEntity(D1: Namespace1, D2: Namespace2))) {
14050 R = NestedNameSpecifier(
14051 Ctx,
14052 ::getCommonDeclChecked(X: Namespace1->getNamespace(),
14053 Y: Namespace2->getNamespace()),
14054 /*Prefix=*/std::nullopt);
14055 break;
14056 }
14057 // The prefixes for namespaces are not significant, its declaration
14058 // identifies it uniquely.
14059 NestedNameSpecifier Prefix = ::getCommonNNS(Ctx, NNS1: Prefix1, NNS2: Prefix2,
14060 /*IsSame=*/false);
14061 R = NestedNameSpecifier(Ctx, ::getCommonDeclChecked(X: Namespace1, Y: Namespace2),
14062 Prefix);
14063 break;
14064 }
14065 case NestedNameSpecifier::Kind::Type: {
14066 const Type *T1 = NNS1.getAsType(), *T2 = NNS2.getAsType();
14067 const Type *T = Ctx.getCommonSugaredType(X: QualType(T1, 0), Y: QualType(T2, 0),
14068 /*Unqualified=*/true)
14069 .getTypePtr();
14070 R = NestedNameSpecifier(T);
14071 break;
14072 }
14073 case NestedNameSpecifier::Kind::MicrosoftSuper: {
14074 // FIXME: Can __super even be used with data members?
14075 // If it's only usable in functions, we will never see it here,
14076 // unless we save the qualifiers used in function types.
14077 // In that case, it might be possible NNS2 is a type,
14078 // in which case we should degrade the result to
14079 // a CXXRecordType.
14080 R = NestedNameSpecifier(getCommonDeclChecked(X: NNS1.getAsMicrosoftSuper(),
14081 Y: NNS2.getAsMicrosoftSuper()));
14082 break;
14083 }
14084 case NestedNameSpecifier::Kind::Null:
14085 case NestedNameSpecifier::Kind::Global:
14086 // These are singletons.
14087 llvm_unreachable("singletons did not compare equal");
14088 }
14089 assert(R.getCanonical() == Canon);
14090 return R;
14091}
14092
14093template <class T>
14094static NestedNameSpecifier getCommonQualifier(const ASTContext &Ctx, const T *X,
14095 const T *Y, bool IsSame) {
14096 return ::getCommonNNS(Ctx, NNS1: X->getQualifier(), NNS2: Y->getQualifier(), IsSame);
14097}
14098
14099template <class T>
14100static QualType getCommonElementType(const ASTContext &Ctx, const T *X,
14101 const T *Y) {
14102 return Ctx.getCommonSugaredType(X: X->getElementType(), Y: Y->getElementType());
14103}
14104
14105static QualType getCommonTypeWithQualifierLifting(const ASTContext &Ctx,
14106 QualType X, QualType Y,
14107 Qualifiers &QX,
14108 Qualifiers &QY) {
14109 QualType R = Ctx.getCommonSugaredType(X, Y,
14110 /*Unqualified=*/true);
14111 // Qualifiers common to both element types.
14112 Qualifiers RQ = R.getQualifiers();
14113 // For each side, move to the top level any qualifiers which are not common to
14114 // both element types. The caller must assume top level qualifiers might
14115 // be different, even if they are the same type, and can be treated as sugar.
14116 QX += X.getQualifiers() - RQ;
14117 QY += Y.getQualifiers() - RQ;
14118 return R;
14119}
14120
14121template <class T>
14122static QualType getCommonArrayElementType(const ASTContext &Ctx, const T *X,
14123 Qualifiers &QX, const T *Y,
14124 Qualifiers &QY) {
14125 return getCommonTypeWithQualifierLifting(Ctx, X->getElementType(),
14126 Y->getElementType(), QX, QY);
14127}
14128
14129template <class T>
14130static QualType getCommonPointeeType(const ASTContext &Ctx, const T *X,
14131 const T *Y) {
14132 return Ctx.getCommonSugaredType(X: X->getPointeeType(), Y: Y->getPointeeType());
14133}
14134
14135template <class T>
14136static auto *getCommonSizeExpr(const ASTContext &Ctx, T *X, T *Y) {
14137 assert(Ctx.hasSameExpr(X->getSizeExpr(), Y->getSizeExpr()));
14138 return X->getSizeExpr();
14139}
14140
14141static auto getCommonSizeModifier(const ArrayType *X, const ArrayType *Y) {
14142 assert(X->getSizeModifier() == Y->getSizeModifier());
14143 return X->getSizeModifier();
14144}
14145
14146static auto getCommonIndexTypeCVRQualifiers(const ArrayType *X,
14147 const ArrayType *Y) {
14148 assert(X->getIndexTypeCVRQualifiers() == Y->getIndexTypeCVRQualifiers());
14149 return X->getIndexTypeCVRQualifiers();
14150}
14151
14152// Merges two type lists such that the resulting vector will contain
14153// each type (in a canonical sense) only once, in the order they appear
14154// from X to Y. If they occur in both X and Y, the result will contain
14155// the common sugared type between them.
14156static void mergeTypeLists(const ASTContext &Ctx,
14157 SmallVectorImpl<QualType> &Out, ArrayRef<QualType> X,
14158 ArrayRef<QualType> Y) {
14159 llvm::DenseMap<QualType, unsigned> Found;
14160 for (auto Ts : {X, Y}) {
14161 for (QualType T : Ts) {
14162 auto Res = Found.try_emplace(Key: Ctx.getCanonicalType(T), Args: Out.size());
14163 if (!Res.second) {
14164 QualType &U = Out[Res.first->second];
14165 U = Ctx.getCommonSugaredType(X: U, Y: T);
14166 } else {
14167 Out.emplace_back(Args&: T);
14168 }
14169 }
14170 }
14171}
14172
14173FunctionProtoType::ExceptionSpecInfo
14174ASTContext::mergeExceptionSpecs(FunctionProtoType::ExceptionSpecInfo ESI1,
14175 FunctionProtoType::ExceptionSpecInfo ESI2,
14176 SmallVectorImpl<QualType> &ExceptionTypeStorage,
14177 bool AcceptDependent) const {
14178 ExceptionSpecificationType EST1 = ESI1.Type, EST2 = ESI2.Type;
14179
14180 // If either of them can throw anything, that is the result.
14181 for (auto I : {EST_None, EST_MSAny, EST_NoexceptFalse}) {
14182 if (EST1 == I)
14183 return ESI1;
14184 if (EST2 == I)
14185 return ESI2;
14186 }
14187
14188 // If either of them is non-throwing, the result is the other.
14189 for (auto I :
14190 {EST_NoThrow, EST_DynamicNone, EST_BasicNoexcept, EST_NoexceptTrue}) {
14191 if (EST1 == I)
14192 return ESI2;
14193 if (EST2 == I)
14194 return ESI1;
14195 }
14196
14197 // If we're left with value-dependent computed noexcept expressions, we're
14198 // stuck. Before C++17, we can just drop the exception specification entirely,
14199 // since it's not actually part of the canonical type. And this should never
14200 // happen in C++17, because it would mean we were computing the composite
14201 // pointer type of dependent types, which should never happen.
14202 if (EST1 == EST_DependentNoexcept || EST2 == EST_DependentNoexcept) {
14203 assert(AcceptDependent &&
14204 "computing composite pointer type of dependent types");
14205 return FunctionProtoType::ExceptionSpecInfo();
14206 }
14207
14208 // Switch over the possibilities so that people adding new values know to
14209 // update this function.
14210 switch (EST1) {
14211 case EST_None:
14212 case EST_DynamicNone:
14213 case EST_MSAny:
14214 case EST_BasicNoexcept:
14215 case EST_DependentNoexcept:
14216 case EST_NoexceptFalse:
14217 case EST_NoexceptTrue:
14218 case EST_NoThrow:
14219 llvm_unreachable("These ESTs should be handled above");
14220
14221 case EST_Dynamic: {
14222 // This is the fun case: both exception specifications are dynamic. Form
14223 // the union of the two lists.
14224 assert(EST2 == EST_Dynamic && "other cases should already be handled");
14225 mergeTypeLists(Ctx: *this, Out&: ExceptionTypeStorage, X: ESI1.Exceptions,
14226 Y: ESI2.Exceptions);
14227 FunctionProtoType::ExceptionSpecInfo Result(EST_Dynamic);
14228 Result.Exceptions = ExceptionTypeStorage;
14229 return Result;
14230 }
14231
14232 case EST_Unevaluated:
14233 case EST_Uninstantiated:
14234 case EST_Unparsed:
14235 llvm_unreachable("shouldn't see unresolved exception specifications here");
14236 }
14237
14238 llvm_unreachable("invalid ExceptionSpecificationType");
14239}
14240
14241static QualType getCommonNonSugarTypeNode(const ASTContext &Ctx, const Type *X,
14242 Qualifiers &QX, const Type *Y,
14243 Qualifiers &QY) {
14244 Type::TypeClass TC = X->getTypeClass();
14245 assert(TC == Y->getTypeClass());
14246 switch (TC) {
14247#define UNEXPECTED_TYPE(Class, Kind) \
14248 case Type::Class: \
14249 llvm_unreachable("Unexpected " Kind ": " #Class);
14250
14251#define NON_CANONICAL_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "non-canonical")
14252#define TYPE(Class, Base)
14253#include "clang/AST/TypeNodes.inc"
14254
14255#define SUGAR_FREE_TYPE(Class) UNEXPECTED_TYPE(Class, "sugar-free")
14256 SUGAR_FREE_TYPE(Builtin)
14257 SUGAR_FREE_TYPE(DeducedTemplateSpecialization)
14258 SUGAR_FREE_TYPE(DependentBitInt)
14259 SUGAR_FREE_TYPE(BitInt)
14260 SUGAR_FREE_TYPE(ObjCInterface)
14261 SUGAR_FREE_TYPE(SubstTemplateTypeParmPack)
14262 SUGAR_FREE_TYPE(SubstBuiltinTemplatePack)
14263 SUGAR_FREE_TYPE(UnresolvedUsing)
14264 SUGAR_FREE_TYPE(HLSLAttributedResource)
14265 SUGAR_FREE_TYPE(HLSLInlineSpirv)
14266#undef SUGAR_FREE_TYPE
14267#define NON_UNIQUE_TYPE(Class) UNEXPECTED_TYPE(Class, "non-unique")
14268 NON_UNIQUE_TYPE(TypeOfExpr)
14269 NON_UNIQUE_TYPE(VariableArray)
14270#undef NON_UNIQUE_TYPE
14271
14272 UNEXPECTED_TYPE(TypeOf, "sugar")
14273
14274#undef UNEXPECTED_TYPE
14275
14276 case Type::Auto: {
14277 const auto *AX = cast<AutoType>(Val: X), *AY = cast<AutoType>(Val: Y);
14278 assert(AX->getDeducedType().isNull());
14279 assert(AY->getDeducedType().isNull());
14280 assert(AX->getKeyword() == AY->getKeyword());
14281 assert(AX->isInstantiationDependentType() ==
14282 AY->isInstantiationDependentType());
14283 auto As = getCommonTemplateArguments(Ctx, Xs: AX->getTypeConstraintArguments(),
14284 Ys: AY->getTypeConstraintArguments());
14285 return Ctx.getAutoType(DeducedType: QualType(), Keyword: AX->getKeyword(),
14286 IsDependent: AX->isInstantiationDependentType(),
14287 IsPack: AX->containsUnexpandedParameterPack(),
14288 TypeConstraintConcept: getCommonDeclChecked(X: AX->getTypeConstraintConcept(),
14289 Y: AY->getTypeConstraintConcept()),
14290 TypeConstraintArgs: As);
14291 }
14292 case Type::IncompleteArray: {
14293 const auto *AX = cast<IncompleteArrayType>(Val: X),
14294 *AY = cast<IncompleteArrayType>(Val: Y);
14295 return Ctx.getIncompleteArrayType(
14296 elementType: getCommonArrayElementType(Ctx, X: AX, QX, Y: AY, QY),
14297 ASM: getCommonSizeModifier(X: AX, Y: AY), elementTypeQuals: getCommonIndexTypeCVRQualifiers(X: AX, Y: AY));
14298 }
14299 case Type::DependentSizedArray: {
14300 const auto *AX = cast<DependentSizedArrayType>(Val: X),
14301 *AY = cast<DependentSizedArrayType>(Val: Y);
14302 return Ctx.getDependentSizedArrayType(
14303 elementType: getCommonArrayElementType(Ctx, X: AX, QX, Y: AY, QY),
14304 numElements: getCommonSizeExpr(Ctx, X: AX, Y: AY), ASM: getCommonSizeModifier(X: AX, Y: AY),
14305 elementTypeQuals: getCommonIndexTypeCVRQualifiers(X: AX, Y: AY));
14306 }
14307 case Type::ConstantArray: {
14308 const auto *AX = cast<ConstantArrayType>(Val: X),
14309 *AY = cast<ConstantArrayType>(Val: Y);
14310 assert(AX->getSize() == AY->getSize());
14311 const Expr *SizeExpr = Ctx.hasSameExpr(X: AX->getSizeExpr(), Y: AY->getSizeExpr())
14312 ? AX->getSizeExpr()
14313 : nullptr;
14314 return Ctx.getConstantArrayType(
14315 EltTy: getCommonArrayElementType(Ctx, X: AX, QX, Y: AY, QY), ArySizeIn: AX->getSize(), SizeExpr,
14316 ASM: getCommonSizeModifier(X: AX, Y: AY), IndexTypeQuals: getCommonIndexTypeCVRQualifiers(X: AX, Y: AY));
14317 }
14318 case Type::ArrayParameter: {
14319 const auto *AX = cast<ArrayParameterType>(Val: X),
14320 *AY = cast<ArrayParameterType>(Val: Y);
14321 assert(AX->getSize() == AY->getSize());
14322 const Expr *SizeExpr = Ctx.hasSameExpr(X: AX->getSizeExpr(), Y: AY->getSizeExpr())
14323 ? AX->getSizeExpr()
14324 : nullptr;
14325 auto ArrayTy = Ctx.getConstantArrayType(
14326 EltTy: getCommonArrayElementType(Ctx, X: AX, QX, Y: AY, QY), ArySizeIn: AX->getSize(), SizeExpr,
14327 ASM: getCommonSizeModifier(X: AX, Y: AY), IndexTypeQuals: getCommonIndexTypeCVRQualifiers(X: AX, Y: AY));
14328 return Ctx.getArrayParameterType(Ty: ArrayTy);
14329 }
14330 case Type::Atomic: {
14331 const auto *AX = cast<AtomicType>(Val: X), *AY = cast<AtomicType>(Val: Y);
14332 return Ctx.getAtomicType(
14333 T: Ctx.getCommonSugaredType(X: AX->getValueType(), Y: AY->getValueType()));
14334 }
14335 case Type::Complex: {
14336 const auto *CX = cast<ComplexType>(Val: X), *CY = cast<ComplexType>(Val: Y);
14337 return Ctx.getComplexType(T: getCommonArrayElementType(Ctx, X: CX, QX, Y: CY, QY));
14338 }
14339 case Type::Pointer: {
14340 const auto *PX = cast<PointerType>(Val: X), *PY = cast<PointerType>(Val: Y);
14341 return Ctx.getPointerType(T: getCommonPointeeType(Ctx, X: PX, Y: PY));
14342 }
14343 case Type::BlockPointer: {
14344 const auto *PX = cast<BlockPointerType>(Val: X), *PY = cast<BlockPointerType>(Val: Y);
14345 return Ctx.getBlockPointerType(T: getCommonPointeeType(Ctx, X: PX, Y: PY));
14346 }
14347 case Type::ObjCObjectPointer: {
14348 const auto *PX = cast<ObjCObjectPointerType>(Val: X),
14349 *PY = cast<ObjCObjectPointerType>(Val: Y);
14350 return Ctx.getObjCObjectPointerType(ObjectT: getCommonPointeeType(Ctx, X: PX, Y: PY));
14351 }
14352 case Type::MemberPointer: {
14353 const auto *PX = cast<MemberPointerType>(Val: X),
14354 *PY = cast<MemberPointerType>(Val: Y);
14355 assert(declaresSameEntity(PX->getMostRecentCXXRecordDecl(),
14356 PY->getMostRecentCXXRecordDecl()));
14357 return Ctx.getMemberPointerType(
14358 T: getCommonPointeeType(Ctx, X: PX, Y: PY),
14359 Qualifier: getCommonQualifier(Ctx, X: PX, Y: PY, /*IsSame=*/true),
14360 Cls: PX->getMostRecentCXXRecordDecl());
14361 }
14362 case Type::LValueReference: {
14363 const auto *PX = cast<LValueReferenceType>(Val: X),
14364 *PY = cast<LValueReferenceType>(Val: Y);
14365 // FIXME: Preserve PointeeTypeAsWritten.
14366 return Ctx.getLValueReferenceType(T: getCommonPointeeType(Ctx, X: PX, Y: PY),
14367 SpelledAsLValue: PX->isSpelledAsLValue() ||
14368 PY->isSpelledAsLValue());
14369 }
14370 case Type::RValueReference: {
14371 const auto *PX = cast<RValueReferenceType>(Val: X),
14372 *PY = cast<RValueReferenceType>(Val: Y);
14373 // FIXME: Preserve PointeeTypeAsWritten.
14374 return Ctx.getRValueReferenceType(T: getCommonPointeeType(Ctx, X: PX, Y: PY));
14375 }
14376 case Type::DependentAddressSpace: {
14377 const auto *PX = cast<DependentAddressSpaceType>(Val: X),
14378 *PY = cast<DependentAddressSpaceType>(Val: Y);
14379 assert(Ctx.hasSameExpr(PX->getAddrSpaceExpr(), PY->getAddrSpaceExpr()));
14380 return Ctx.getDependentAddressSpaceType(PointeeType: getCommonPointeeType(Ctx, X: PX, Y: PY),
14381 AddrSpaceExpr: PX->getAddrSpaceExpr(),
14382 AttrLoc: getCommonAttrLoc(X: PX, Y: PY));
14383 }
14384 case Type::FunctionNoProto: {
14385 const auto *FX = cast<FunctionNoProtoType>(Val: X),
14386 *FY = cast<FunctionNoProtoType>(Val: Y);
14387 assert(FX->getExtInfo() == FY->getExtInfo());
14388 return Ctx.getFunctionNoProtoType(
14389 ResultTy: Ctx.getCommonSugaredType(X: FX->getReturnType(), Y: FY->getReturnType()),
14390 Info: FX->getExtInfo());
14391 }
14392 case Type::FunctionProto: {
14393 const auto *FX = cast<FunctionProtoType>(Val: X),
14394 *FY = cast<FunctionProtoType>(Val: Y);
14395 FunctionProtoType::ExtProtoInfo EPIX = FX->getExtProtoInfo(),
14396 EPIY = FY->getExtProtoInfo();
14397 assert(EPIX.ExtInfo == EPIY.ExtInfo);
14398 assert(!EPIX.ExtParameterInfos == !EPIY.ExtParameterInfos);
14399 assert(!EPIX.ExtParameterInfos ||
14400 llvm::equal(
14401 llvm::ArrayRef(EPIX.ExtParameterInfos, FX->getNumParams()),
14402 llvm::ArrayRef(EPIY.ExtParameterInfos, FY->getNumParams())));
14403 assert(EPIX.RefQualifier == EPIY.RefQualifier);
14404 assert(EPIX.TypeQuals == EPIY.TypeQuals);
14405 assert(EPIX.Variadic == EPIY.Variadic);
14406
14407 // FIXME: Can we handle an empty EllipsisLoc?
14408 // Use emtpy EllipsisLoc if X and Y differ.
14409
14410 EPIX.HasTrailingReturn = EPIX.HasTrailingReturn && EPIY.HasTrailingReturn;
14411
14412 QualType R =
14413 Ctx.getCommonSugaredType(X: FX->getReturnType(), Y: FY->getReturnType());
14414 auto P = getCommonTypes(Ctx, Xs: FX->param_types(), Ys: FY->param_types(),
14415 /*Unqualified=*/true);
14416
14417 SmallVector<QualType, 8> Exceptions;
14418 EPIX.ExceptionSpec = Ctx.mergeExceptionSpecs(
14419 ESI1: EPIX.ExceptionSpec, ESI2: EPIY.ExceptionSpec, ExceptionTypeStorage&: Exceptions, AcceptDependent: true);
14420 return Ctx.getFunctionType(ResultTy: R, Args: P, EPI: EPIX);
14421 }
14422 case Type::ObjCObject: {
14423 const auto *OX = cast<ObjCObjectType>(Val: X), *OY = cast<ObjCObjectType>(Val: Y);
14424 assert(
14425 std::equal(OX->getProtocols().begin(), OX->getProtocols().end(),
14426 OY->getProtocols().begin(), OY->getProtocols().end(),
14427 [](const ObjCProtocolDecl *P0, const ObjCProtocolDecl *P1) {
14428 return P0->getCanonicalDecl() == P1->getCanonicalDecl();
14429 }) &&
14430 "protocol lists must be the same");
14431 auto TAs = getCommonTypes(Ctx, Xs: OX->getTypeArgsAsWritten(),
14432 Ys: OY->getTypeArgsAsWritten());
14433 return Ctx.getObjCObjectType(
14434 baseType: Ctx.getCommonSugaredType(X: OX->getBaseType(), Y: OY->getBaseType()), typeArgs: TAs,
14435 protocols: OX->getProtocols(),
14436 isKindOf: OX->isKindOfTypeAsWritten() && OY->isKindOfTypeAsWritten());
14437 }
14438 case Type::ConstantMatrix: {
14439 const auto *MX = cast<ConstantMatrixType>(Val: X),
14440 *MY = cast<ConstantMatrixType>(Val: Y);
14441 assert(MX->getNumRows() == MY->getNumRows());
14442 assert(MX->getNumColumns() == MY->getNumColumns());
14443 return Ctx.getConstantMatrixType(ElementTy: getCommonElementType(Ctx, X: MX, Y: MY),
14444 NumRows: MX->getNumRows(), NumColumns: MX->getNumColumns());
14445 }
14446 case Type::DependentSizedMatrix: {
14447 const auto *MX = cast<DependentSizedMatrixType>(Val: X),
14448 *MY = cast<DependentSizedMatrixType>(Val: Y);
14449 assert(Ctx.hasSameExpr(MX->getRowExpr(), MY->getRowExpr()));
14450 assert(Ctx.hasSameExpr(MX->getColumnExpr(), MY->getColumnExpr()));
14451 return Ctx.getDependentSizedMatrixType(
14452 ElementTy: getCommonElementType(Ctx, X: MX, Y: MY), RowExpr: MX->getRowExpr(),
14453 ColumnExpr: MX->getColumnExpr(), AttrLoc: getCommonAttrLoc(X: MX, Y: MY));
14454 }
14455 case Type::Vector: {
14456 const auto *VX = cast<VectorType>(Val: X), *VY = cast<VectorType>(Val: Y);
14457 assert(VX->getNumElements() == VY->getNumElements());
14458 assert(VX->getVectorKind() == VY->getVectorKind());
14459 return Ctx.getVectorType(vecType: getCommonElementType(Ctx, X: VX, Y: VY),
14460 NumElts: VX->getNumElements(), VecKind: VX->getVectorKind());
14461 }
14462 case Type::ExtVector: {
14463 const auto *VX = cast<ExtVectorType>(Val: X), *VY = cast<ExtVectorType>(Val: Y);
14464 assert(VX->getNumElements() == VY->getNumElements());
14465 return Ctx.getExtVectorType(vecType: getCommonElementType(Ctx, X: VX, Y: VY),
14466 NumElts: VX->getNumElements());
14467 }
14468 case Type::DependentSizedExtVector: {
14469 const auto *VX = cast<DependentSizedExtVectorType>(Val: X),
14470 *VY = cast<DependentSizedExtVectorType>(Val: Y);
14471 return Ctx.getDependentSizedExtVectorType(vecType: getCommonElementType(Ctx, X: VX, Y: VY),
14472 SizeExpr: getCommonSizeExpr(Ctx, X: VX, Y: VY),
14473 AttrLoc: getCommonAttrLoc(X: VX, Y: VY));
14474 }
14475 case Type::DependentVector: {
14476 const auto *VX = cast<DependentVectorType>(Val: X),
14477 *VY = cast<DependentVectorType>(Val: Y);
14478 assert(VX->getVectorKind() == VY->getVectorKind());
14479 return Ctx.getDependentVectorType(
14480 VecType: getCommonElementType(Ctx, X: VX, Y: VY), SizeExpr: getCommonSizeExpr(Ctx, X: VX, Y: VY),
14481 AttrLoc: getCommonAttrLoc(X: VX, Y: VY), VecKind: VX->getVectorKind());
14482 }
14483 case Type::Enum:
14484 case Type::Record:
14485 case Type::InjectedClassName: {
14486 const auto *TX = cast<TagType>(Val: X), *TY = cast<TagType>(Val: Y);
14487 return Ctx.getTagType(Keyword: ::getCommonTypeKeyword(X: TX, Y: TY, /*IsSame=*/false),
14488 Qualifier: ::getCommonQualifier(Ctx, X: TX, Y: TY, /*IsSame=*/false),
14489 TD: ::getCommonDeclChecked(X: TX->getDecl(), Y: TY->getDecl()),
14490 /*OwnedTag=*/OwnsTag: false);
14491 }
14492 case Type::TemplateSpecialization: {
14493 const auto *TX = cast<TemplateSpecializationType>(Val: X),
14494 *TY = cast<TemplateSpecializationType>(Val: Y);
14495 auto As = getCommonTemplateArguments(Ctx, Xs: TX->template_arguments(),
14496 Ys: TY->template_arguments());
14497 return Ctx.getTemplateSpecializationType(
14498 Keyword: getCommonTypeKeyword(X: TX, Y: TY, /*IsSame=*/false),
14499 Template: ::getCommonTemplateNameChecked(Ctx, X: TX->getTemplateName(),
14500 Y: TY->getTemplateName(),
14501 /*IgnoreDeduced=*/true),
14502 SpecifiedArgs: As, /*CanonicalArgs=*/{}, Underlying: X->getCanonicalTypeInternal());
14503 }
14504 case Type::Decltype: {
14505 const auto *DX = cast<DecltypeType>(Val: X);
14506 [[maybe_unused]] const auto *DY = cast<DecltypeType>(Val: Y);
14507 assert(DX->isDependentType());
14508 assert(DY->isDependentType());
14509 assert(Ctx.hasSameExpr(DX->getUnderlyingExpr(), DY->getUnderlyingExpr()));
14510 // As Decltype is not uniqued, building a common type would be wasteful.
14511 return QualType(DX, 0);
14512 }
14513 case Type::PackIndexing: {
14514 const auto *DX = cast<PackIndexingType>(Val: X);
14515 [[maybe_unused]] const auto *DY = cast<PackIndexingType>(Val: Y);
14516 assert(DX->isDependentType());
14517 assert(DY->isDependentType());
14518 assert(Ctx.hasSameExpr(DX->getIndexExpr(), DY->getIndexExpr()));
14519 return QualType(DX, 0);
14520 }
14521 case Type::DependentName: {
14522 const auto *NX = cast<DependentNameType>(Val: X),
14523 *NY = cast<DependentNameType>(Val: Y);
14524 assert(NX->getIdentifier() == NY->getIdentifier());
14525 return Ctx.getDependentNameType(
14526 Keyword: getCommonTypeKeyword(X: NX, Y: NY, /*IsSame=*/true),
14527 NNS: getCommonQualifier(Ctx, X: NX, Y: NY, /*IsSame=*/true), Name: NX->getIdentifier());
14528 }
14529 case Type::OverflowBehavior: {
14530 const auto *NX = cast<OverflowBehaviorType>(Val: X),
14531 *NY = cast<OverflowBehaviorType>(Val: Y);
14532 assert(NX->getBehaviorKind() == NY->getBehaviorKind());
14533 return Ctx.getOverflowBehaviorType(
14534 Kind: NX->getBehaviorKind(),
14535 Underlying: getCommonTypeWithQualifierLifting(Ctx, X: NX->getUnderlyingType(),
14536 Y: NY->getUnderlyingType(), QX, QY));
14537 }
14538 case Type::UnaryTransform: {
14539 const auto *TX = cast<UnaryTransformType>(Val: X),
14540 *TY = cast<UnaryTransformType>(Val: Y);
14541 assert(TX->getUTTKind() == TY->getUTTKind());
14542 return Ctx.getUnaryTransformType(
14543 BaseType: Ctx.getCommonSugaredType(X: TX->getBaseType(), Y: TY->getBaseType()),
14544 UnderlyingType: Ctx.getCommonSugaredType(X: TX->getUnderlyingType(),
14545 Y: TY->getUnderlyingType()),
14546 Kind: TX->getUTTKind());
14547 }
14548 case Type::PackExpansion: {
14549 const auto *PX = cast<PackExpansionType>(Val: X),
14550 *PY = cast<PackExpansionType>(Val: Y);
14551 assert(PX->getNumExpansions() == PY->getNumExpansions());
14552 return Ctx.getPackExpansionType(
14553 Pattern: Ctx.getCommonSugaredType(X: PX->getPattern(), Y: PY->getPattern()),
14554 NumExpansions: PX->getNumExpansions(), ExpectPackInType: false);
14555 }
14556 case Type::Pipe: {
14557 const auto *PX = cast<PipeType>(Val: X), *PY = cast<PipeType>(Val: Y);
14558 assert(PX->isReadOnly() == PY->isReadOnly());
14559 auto MP = PX->isReadOnly() ? &ASTContext::getReadPipeType
14560 : &ASTContext::getWritePipeType;
14561 return (Ctx.*MP)(getCommonElementType(Ctx, X: PX, Y: PY));
14562 }
14563 case Type::TemplateTypeParm: {
14564 const auto *TX = cast<TemplateTypeParmType>(Val: X),
14565 *TY = cast<TemplateTypeParmType>(Val: Y);
14566 assert(TX->getDepth() == TY->getDepth());
14567 assert(TX->getIndex() == TY->getIndex());
14568 assert(TX->isParameterPack() == TY->isParameterPack());
14569 return Ctx.getTemplateTypeParmType(
14570 Depth: TX->getDepth(), Index: TX->getIndex(), ParameterPack: TX->isParameterPack(),
14571 TTPDecl: getCommonDecl(X: TX->getDecl(), Y: TY->getDecl()));
14572 }
14573 }
14574 llvm_unreachable("Unknown Type Class");
14575}
14576
14577static QualType getCommonSugarTypeNode(const ASTContext &Ctx, const Type *X,
14578 const Type *Y,
14579 SplitQualType Underlying) {
14580 Type::TypeClass TC = X->getTypeClass();
14581 if (TC != Y->getTypeClass())
14582 return QualType();
14583 switch (TC) {
14584#define UNEXPECTED_TYPE(Class, Kind) \
14585 case Type::Class: \
14586 llvm_unreachable("Unexpected " Kind ": " #Class);
14587#define TYPE(Class, Base)
14588#define DEPENDENT_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "dependent")
14589#include "clang/AST/TypeNodes.inc"
14590
14591#define CANONICAL_TYPE(Class) UNEXPECTED_TYPE(Class, "canonical")
14592 CANONICAL_TYPE(Atomic)
14593 CANONICAL_TYPE(BitInt)
14594 CANONICAL_TYPE(BlockPointer)
14595 CANONICAL_TYPE(Builtin)
14596 CANONICAL_TYPE(Complex)
14597 CANONICAL_TYPE(ConstantArray)
14598 CANONICAL_TYPE(ArrayParameter)
14599 CANONICAL_TYPE(ConstantMatrix)
14600 CANONICAL_TYPE(Enum)
14601 CANONICAL_TYPE(ExtVector)
14602 CANONICAL_TYPE(FunctionNoProto)
14603 CANONICAL_TYPE(FunctionProto)
14604 CANONICAL_TYPE(IncompleteArray)
14605 CANONICAL_TYPE(HLSLAttributedResource)
14606 CANONICAL_TYPE(HLSLInlineSpirv)
14607 CANONICAL_TYPE(LValueReference)
14608 CANONICAL_TYPE(ObjCInterface)
14609 CANONICAL_TYPE(ObjCObject)
14610 CANONICAL_TYPE(ObjCObjectPointer)
14611 CANONICAL_TYPE(OverflowBehavior)
14612 CANONICAL_TYPE(Pipe)
14613 CANONICAL_TYPE(Pointer)
14614 CANONICAL_TYPE(Record)
14615 CANONICAL_TYPE(RValueReference)
14616 CANONICAL_TYPE(VariableArray)
14617 CANONICAL_TYPE(Vector)
14618#undef CANONICAL_TYPE
14619
14620#undef UNEXPECTED_TYPE
14621
14622 case Type::Adjusted: {
14623 const auto *AX = cast<AdjustedType>(Val: X), *AY = cast<AdjustedType>(Val: Y);
14624 QualType OX = AX->getOriginalType(), OY = AY->getOriginalType();
14625 if (!Ctx.hasSameType(T1: OX, T2: OY))
14626 return QualType();
14627 // FIXME: It's inefficient to have to unify the original types.
14628 return Ctx.getAdjustedType(Orig: Ctx.getCommonSugaredType(X: OX, Y: OY),
14629 New: Ctx.getQualifiedType(split: Underlying));
14630 }
14631 case Type::Decayed: {
14632 const auto *DX = cast<DecayedType>(Val: X), *DY = cast<DecayedType>(Val: Y);
14633 QualType OX = DX->getOriginalType(), OY = DY->getOriginalType();
14634 if (!Ctx.hasSameType(T1: OX, T2: OY))
14635 return QualType();
14636 // FIXME: It's inefficient to have to unify the original types.
14637 return Ctx.getDecayedType(Orig: Ctx.getCommonSugaredType(X: OX, Y: OY),
14638 Decayed: Ctx.getQualifiedType(split: Underlying));
14639 }
14640 case Type::Attributed: {
14641 const auto *AX = cast<AttributedType>(Val: X), *AY = cast<AttributedType>(Val: Y);
14642 AttributedType::Kind Kind = AX->getAttrKind();
14643 if (Kind != AY->getAttrKind())
14644 return QualType();
14645 QualType MX = AX->getModifiedType(), MY = AY->getModifiedType();
14646 if (!Ctx.hasSameType(T1: MX, T2: MY))
14647 return QualType();
14648 // FIXME: It's inefficient to have to unify the modified types.
14649 return Ctx.getAttributedType(attrKind: Kind, modifiedType: Ctx.getCommonSugaredType(X: MX, Y: MY),
14650 equivalentType: Ctx.getQualifiedType(split: Underlying),
14651 attr: AX->getAttr());
14652 }
14653 case Type::BTFTagAttributed: {
14654 const auto *BX = cast<BTFTagAttributedType>(Val: X);
14655 const BTFTypeTagAttr *AX = BX->getAttr();
14656 // The attribute is not uniqued, so just compare the tag.
14657 if (AX->getBTFTypeTag() !=
14658 cast<BTFTagAttributedType>(Val: Y)->getAttr()->getBTFTypeTag())
14659 return QualType();
14660 return Ctx.getBTFTagAttributedType(BTFAttr: AX, Wrapped: Ctx.getQualifiedType(split: Underlying));
14661 }
14662 case Type::Auto: {
14663 const auto *AX = cast<AutoType>(Val: X), *AY = cast<AutoType>(Val: Y);
14664
14665 AutoTypeKeyword KW = AX->getKeyword();
14666 if (KW != AY->getKeyword())
14667 return QualType();
14668
14669 TemplateDecl *CD = ::getCommonDecl(X: AX->getTypeConstraintConcept(),
14670 Y: AY->getTypeConstraintConcept());
14671 SmallVector<TemplateArgument, 8> As;
14672 if (CD &&
14673 getCommonTemplateArguments(Ctx, R&: As, Xs: AX->getTypeConstraintArguments(),
14674 Ys: AY->getTypeConstraintArguments())) {
14675 CD = nullptr; // The arguments differ, so make it unconstrained.
14676 As.clear();
14677 }
14678
14679 // Both auto types can't be dependent, otherwise they wouldn't have been
14680 // sugar. This implies they can't contain unexpanded packs either.
14681 return Ctx.getAutoType(DeducedType: Ctx.getQualifiedType(split: Underlying), Keyword: AX->getKeyword(),
14682 /*IsDependent=*/false, /*IsPack=*/false, TypeConstraintConcept: CD, TypeConstraintArgs: As);
14683 }
14684 case Type::PackIndexing:
14685 case Type::Decltype:
14686 return QualType();
14687 case Type::DeducedTemplateSpecialization:
14688 // FIXME: Try to merge these.
14689 return QualType();
14690 case Type::MacroQualified: {
14691 const auto *MX = cast<MacroQualifiedType>(Val: X),
14692 *MY = cast<MacroQualifiedType>(Val: Y);
14693 const IdentifierInfo *IX = MX->getMacroIdentifier();
14694 if (IX != MY->getMacroIdentifier())
14695 return QualType();
14696 return Ctx.getMacroQualifiedType(UnderlyingTy: Ctx.getQualifiedType(split: Underlying), MacroII: IX);
14697 }
14698 case Type::SubstTemplateTypeParm: {
14699 const auto *SX = cast<SubstTemplateTypeParmType>(Val: X),
14700 *SY = cast<SubstTemplateTypeParmType>(Val: Y);
14701 Decl *CD =
14702 ::getCommonDecl(X: SX->getAssociatedDecl(), Y: SY->getAssociatedDecl());
14703 if (!CD)
14704 return QualType();
14705 unsigned Index = SX->getIndex();
14706 if (Index != SY->getIndex())
14707 return QualType();
14708 auto PackIndex = SX->getPackIndex();
14709 if (PackIndex != SY->getPackIndex())
14710 return QualType();
14711 return Ctx.getSubstTemplateTypeParmType(Replacement: Ctx.getQualifiedType(split: Underlying),
14712 AssociatedDecl: CD, Index, PackIndex,
14713 Final: SX->getFinal() && SY->getFinal());
14714 }
14715 case Type::ObjCTypeParam:
14716 // FIXME: Try to merge these.
14717 return QualType();
14718 case Type::Paren:
14719 return Ctx.getParenType(InnerType: Ctx.getQualifiedType(split: Underlying));
14720
14721 case Type::TemplateSpecialization: {
14722 const auto *TX = cast<TemplateSpecializationType>(Val: X),
14723 *TY = cast<TemplateSpecializationType>(Val: Y);
14724 TemplateName CTN =
14725 ::getCommonTemplateName(Ctx, X: TX->getTemplateName(),
14726 Y: TY->getTemplateName(), /*IgnoreDeduced=*/true);
14727 if (!CTN.getAsVoidPointer())
14728 return QualType();
14729 SmallVector<TemplateArgument, 8> As;
14730 if (getCommonTemplateArguments(Ctx, R&: As, Xs: TX->template_arguments(),
14731 Ys: TY->template_arguments()))
14732 return QualType();
14733 return Ctx.getTemplateSpecializationType(
14734 Keyword: getCommonTypeKeyword(X: TX, Y: TY, /*IsSame=*/false), Template: CTN, SpecifiedArgs: As,
14735 /*CanonicalArgs=*/{}, Underlying: Ctx.getQualifiedType(split: Underlying));
14736 }
14737 case Type::Typedef: {
14738 const auto *TX = cast<TypedefType>(Val: X), *TY = cast<TypedefType>(Val: Y);
14739 const TypedefNameDecl *CD = ::getCommonDecl(X: TX->getDecl(), Y: TY->getDecl());
14740 if (!CD)
14741 return QualType();
14742 return Ctx.getTypedefType(
14743 Keyword: ::getCommonTypeKeyword(X: TX, Y: TY, /*IsSame=*/false),
14744 Qualifier: ::getCommonQualifier(Ctx, X: TX, Y: TY, /*IsSame=*/false), Decl: CD,
14745 UnderlyingType: Ctx.getQualifiedType(split: Underlying));
14746 }
14747 case Type::TypeOf: {
14748 // The common sugar between two typeof expressions, where one is
14749 // potentially a typeof_unqual and the other is not, we unify to the
14750 // qualified type as that retains the most information along with the type.
14751 // We only return a typeof_unqual type when both types are unqual types.
14752 TypeOfKind Kind = TypeOfKind::Qualified;
14753 if (cast<TypeOfType>(Val: X)->getKind() == cast<TypeOfType>(Val: Y)->getKind() &&
14754 cast<TypeOfType>(Val: X)->getKind() == TypeOfKind::Unqualified)
14755 Kind = TypeOfKind::Unqualified;
14756 return Ctx.getTypeOfType(tofType: Ctx.getQualifiedType(split: Underlying), Kind);
14757 }
14758 case Type::TypeOfExpr:
14759 return QualType();
14760
14761 case Type::UnaryTransform: {
14762 const auto *UX = cast<UnaryTransformType>(Val: X),
14763 *UY = cast<UnaryTransformType>(Val: Y);
14764 UnaryTransformType::UTTKind KX = UX->getUTTKind();
14765 if (KX != UY->getUTTKind())
14766 return QualType();
14767 QualType BX = UX->getBaseType(), BY = UY->getBaseType();
14768 if (!Ctx.hasSameType(T1: BX, T2: BY))
14769 return QualType();
14770 // FIXME: It's inefficient to have to unify the base types.
14771 return Ctx.getUnaryTransformType(BaseType: Ctx.getCommonSugaredType(X: BX, Y: BY),
14772 UnderlyingType: Ctx.getQualifiedType(split: Underlying), Kind: KX);
14773 }
14774 case Type::Using: {
14775 const auto *UX = cast<UsingType>(Val: X), *UY = cast<UsingType>(Val: Y);
14776 const UsingShadowDecl *CD = ::getCommonDecl(X: UX->getDecl(), Y: UY->getDecl());
14777 if (!CD)
14778 return QualType();
14779 return Ctx.getUsingType(Keyword: ::getCommonTypeKeyword(X: UX, Y: UY, /*IsSame=*/false),
14780 Qualifier: ::getCommonQualifier(Ctx, X: UX, Y: UY, /*IsSame=*/false),
14781 D: CD, UnderlyingType: Ctx.getQualifiedType(split: Underlying));
14782 }
14783 case Type::MemberPointer: {
14784 const auto *PX = cast<MemberPointerType>(Val: X),
14785 *PY = cast<MemberPointerType>(Val: Y);
14786 CXXRecordDecl *Cls = PX->getMostRecentCXXRecordDecl();
14787 assert(Cls == PY->getMostRecentCXXRecordDecl());
14788 return Ctx.getMemberPointerType(
14789 T: ::getCommonPointeeType(Ctx, X: PX, Y: PY),
14790 Qualifier: ::getCommonQualifier(Ctx, X: PX, Y: PY, /*IsSame=*/false), Cls);
14791 }
14792 case Type::CountAttributed: {
14793 const auto *DX = cast<CountAttributedType>(Val: X),
14794 *DY = cast<CountAttributedType>(Val: Y);
14795 if (DX->isCountInBytes() != DY->isCountInBytes())
14796 return QualType();
14797 if (DX->isOrNull() != DY->isOrNull())
14798 return QualType();
14799 Expr *CEX = DX->getCountExpr();
14800 Expr *CEY = DY->getCountExpr();
14801 ArrayRef<clang::TypeCoupledDeclRefInfo> CDX = DX->getCoupledDecls();
14802 if (Ctx.hasSameExpr(X: CEX, Y: CEY))
14803 return Ctx.getCountAttributedType(WrappedTy: Ctx.getQualifiedType(split: Underlying), CountExpr: CEX,
14804 CountInBytes: DX->isCountInBytes(), OrNull: DX->isOrNull(),
14805 DependentDecls: CDX);
14806 if (!CEX->isIntegerConstantExpr(Ctx) || !CEY->isIntegerConstantExpr(Ctx))
14807 return QualType();
14808 // Two declarations with the same integer constant may still differ in their
14809 // expression pointers, so we need to evaluate them.
14810 llvm::APSInt VX = *CEX->getIntegerConstantExpr(Ctx);
14811 llvm::APSInt VY = *CEY->getIntegerConstantExpr(Ctx);
14812 if (VX != VY)
14813 return QualType();
14814 return Ctx.getCountAttributedType(WrappedTy: Ctx.getQualifiedType(split: Underlying), CountExpr: CEX,
14815 CountInBytes: DX->isCountInBytes(), OrNull: DX->isOrNull(),
14816 DependentDecls: CDX);
14817 }
14818 case Type::PredefinedSugar:
14819 assert(cast<PredefinedSugarType>(X)->getKind() !=
14820 cast<PredefinedSugarType>(Y)->getKind());
14821 return QualType();
14822 }
14823 llvm_unreachable("Unhandled Type Class");
14824}
14825
14826static auto unwrapSugar(SplitQualType &T, Qualifiers &QTotal) {
14827 SmallVector<SplitQualType, 8> R;
14828 while (true) {
14829 QTotal.addConsistentQualifiers(qs: T.Quals);
14830 QualType NT = T.Ty->getLocallyUnqualifiedSingleStepDesugaredType();
14831 if (NT == QualType(T.Ty, 0))
14832 break;
14833 R.push_back(Elt: T);
14834 T = NT.split();
14835 }
14836 return R;
14837}
14838
14839QualType ASTContext::getCommonSugaredType(QualType X, QualType Y,
14840 bool Unqualified) const {
14841 assert(Unqualified ? hasSameUnqualifiedType(X, Y) : hasSameType(X, Y));
14842 if (X == Y)
14843 return X;
14844 if (!Unqualified) {
14845 if (X.isCanonical())
14846 return X;
14847 if (Y.isCanonical())
14848 return Y;
14849 }
14850
14851 SplitQualType SX = X.split(), SY = Y.split();
14852 Qualifiers QX, QY;
14853 // Desugar SX and SY, setting the sugar and qualifiers aside into Xs and Ys,
14854 // until we reach their underlying "canonical nodes". Note these are not
14855 // necessarily canonical types, as they may still have sugared properties.
14856 // QX and QY will store the sum of all qualifiers in Xs and Ys respectively.
14857 auto Xs = ::unwrapSugar(T&: SX, QTotal&: QX), Ys = ::unwrapSugar(T&: SY, QTotal&: QY);
14858
14859 // If this is an ArrayType, the element qualifiers are interchangeable with
14860 // the top level qualifiers.
14861 // * In case the canonical nodes are the same, the elements types are already
14862 // the same.
14863 // * Otherwise, the element types will be made the same, and any different
14864 // element qualifiers will be moved up to the top level qualifiers, per
14865 // 'getCommonArrayElementType'.
14866 // In both cases, this means there may be top level qualifiers which differ
14867 // between X and Y. If so, these differing qualifiers are redundant with the
14868 // element qualifiers, and can be removed without changing the canonical type.
14869 // The desired behaviour is the same as for the 'Unqualified' case here:
14870 // treat the redundant qualifiers as sugar, remove the ones which are not
14871 // common to both sides.
14872 bool KeepCommonQualifiers =
14873 Unqualified || isa<ArrayType, OverflowBehaviorType>(Val: SX.Ty);
14874
14875 if (SX.Ty != SY.Ty) {
14876 // The canonical nodes differ. Build a common canonical node out of the two,
14877 // unifying their sugar. This may recurse back here.
14878 SX.Ty =
14879 ::getCommonNonSugarTypeNode(Ctx: *this, X: SX.Ty, QX, Y: SY.Ty, QY).getTypePtr();
14880 } else {
14881 // The canonical nodes were identical: We may have desugared too much.
14882 // Add any common sugar back in.
14883 while (!Xs.empty() && !Ys.empty() && Xs.back().Ty == Ys.back().Ty) {
14884 QX -= SX.Quals;
14885 QY -= SY.Quals;
14886 SX = Xs.pop_back_val();
14887 SY = Ys.pop_back_val();
14888 }
14889 }
14890 if (KeepCommonQualifiers)
14891 QX = Qualifiers::removeCommonQualifiers(L&: QX, R&: QY);
14892 else
14893 assert(QX == QY);
14894
14895 // Even though the remaining sugar nodes in Xs and Ys differ, some may be
14896 // related. Walk up these nodes, unifying them and adding the result.
14897 while (!Xs.empty() && !Ys.empty()) {
14898 auto Underlying = SplitQualType(
14899 SX.Ty, Qualifiers::removeCommonQualifiers(L&: SX.Quals, R&: SY.Quals));
14900 SX = Xs.pop_back_val();
14901 SY = Ys.pop_back_val();
14902 SX.Ty = ::getCommonSugarTypeNode(Ctx: *this, X: SX.Ty, Y: SY.Ty, Underlying)
14903 .getTypePtrOrNull();
14904 // Stop at the first pair which is unrelated.
14905 if (!SX.Ty) {
14906 SX.Ty = Underlying.Ty;
14907 break;
14908 }
14909 QX -= Underlying.Quals;
14910 };
14911
14912 // Add back the missing accumulated qualifiers, which were stripped off
14913 // with the sugar nodes we could not unify.
14914 QualType R = getQualifiedType(T: SX.Ty, Qs: QX);
14915 assert(Unqualified ? hasSameUnqualifiedType(R, X) : hasSameType(R, X));
14916 return R;
14917}
14918
14919QualType ASTContext::getCorrespondingUnsaturatedType(QualType Ty) const {
14920 assert(Ty->isFixedPointType());
14921
14922 if (Ty->isUnsaturatedFixedPointType())
14923 return Ty;
14924
14925 switch (Ty->castAs<BuiltinType>()->getKind()) {
14926 default:
14927 llvm_unreachable("Not a saturated fixed point type!");
14928 case BuiltinType::SatShortAccum:
14929 return ShortAccumTy;
14930 case BuiltinType::SatAccum:
14931 return AccumTy;
14932 case BuiltinType::SatLongAccum:
14933 return LongAccumTy;
14934 case BuiltinType::SatUShortAccum:
14935 return UnsignedShortAccumTy;
14936 case BuiltinType::SatUAccum:
14937 return UnsignedAccumTy;
14938 case BuiltinType::SatULongAccum:
14939 return UnsignedLongAccumTy;
14940 case BuiltinType::SatShortFract:
14941 return ShortFractTy;
14942 case BuiltinType::SatFract:
14943 return FractTy;
14944 case BuiltinType::SatLongFract:
14945 return LongFractTy;
14946 case BuiltinType::SatUShortFract:
14947 return UnsignedShortFractTy;
14948 case BuiltinType::SatUFract:
14949 return UnsignedFractTy;
14950 case BuiltinType::SatULongFract:
14951 return UnsignedLongFractTy;
14952 }
14953}
14954
14955QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const {
14956 assert(Ty->isFixedPointType());
14957
14958 if (Ty->isSaturatedFixedPointType()) return Ty;
14959
14960 switch (Ty->castAs<BuiltinType>()->getKind()) {
14961 default:
14962 llvm_unreachable("Not a fixed point type!");
14963 case BuiltinType::ShortAccum:
14964 return SatShortAccumTy;
14965 case BuiltinType::Accum:
14966 return SatAccumTy;
14967 case BuiltinType::LongAccum:
14968 return SatLongAccumTy;
14969 case BuiltinType::UShortAccum:
14970 return SatUnsignedShortAccumTy;
14971 case BuiltinType::UAccum:
14972 return SatUnsignedAccumTy;
14973 case BuiltinType::ULongAccum:
14974 return SatUnsignedLongAccumTy;
14975 case BuiltinType::ShortFract:
14976 return SatShortFractTy;
14977 case BuiltinType::Fract:
14978 return SatFractTy;
14979 case BuiltinType::LongFract:
14980 return SatLongFractTy;
14981 case BuiltinType::UShortFract:
14982 return SatUnsignedShortFractTy;
14983 case BuiltinType::UFract:
14984 return SatUnsignedFractTy;
14985 case BuiltinType::ULongFract:
14986 return SatUnsignedLongFractTy;
14987 }
14988}
14989
14990LangAS ASTContext::getLangASForBuiltinAddressSpace(unsigned AS) const {
14991 if (LangOpts.OpenCL)
14992 return getTargetInfo().getOpenCLBuiltinAddressSpace(AS);
14993
14994 if (LangOpts.CUDA)
14995 return getTargetInfo().getCUDABuiltinAddressSpace(AS);
14996
14997 return getLangASFromTargetAS(TargetAS: AS);
14998}
14999
15000// Explicitly instantiate this in case a Redeclarable<T> is used from a TU that
15001// doesn't include ASTContext.h
15002template
15003clang::LazyGenerationalUpdatePtr<
15004 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::ValueType
15005clang::LazyGenerationalUpdatePtr<
15006 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue(
15007 const clang::ASTContext &Ctx, Decl *Value);
15008
15009unsigned char ASTContext::getFixedPointScale(QualType Ty) const {
15010 assert(Ty->isFixedPointType());
15011
15012 const TargetInfo &Target = getTargetInfo();
15013 switch (Ty->castAs<BuiltinType>()->getKind()) {
15014 default:
15015 llvm_unreachable("Not a fixed point type!");
15016 case BuiltinType::ShortAccum:
15017 case BuiltinType::SatShortAccum:
15018 return Target.getShortAccumScale();
15019 case BuiltinType::Accum:
15020 case BuiltinType::SatAccum:
15021 return Target.getAccumScale();
15022 case BuiltinType::LongAccum:
15023 case BuiltinType::SatLongAccum:
15024 return Target.getLongAccumScale();
15025 case BuiltinType::UShortAccum:
15026 case BuiltinType::SatUShortAccum:
15027 return Target.getUnsignedShortAccumScale();
15028 case BuiltinType::UAccum:
15029 case BuiltinType::SatUAccum:
15030 return Target.getUnsignedAccumScale();
15031 case BuiltinType::ULongAccum:
15032 case BuiltinType::SatULongAccum:
15033 return Target.getUnsignedLongAccumScale();
15034 case BuiltinType::ShortFract:
15035 case BuiltinType::SatShortFract:
15036 return Target.getShortFractScale();
15037 case BuiltinType::Fract:
15038 case BuiltinType::SatFract:
15039 return Target.getFractScale();
15040 case BuiltinType::LongFract:
15041 case BuiltinType::SatLongFract:
15042 return Target.getLongFractScale();
15043 case BuiltinType::UShortFract:
15044 case BuiltinType::SatUShortFract:
15045 return Target.getUnsignedShortFractScale();
15046 case BuiltinType::UFract:
15047 case BuiltinType::SatUFract:
15048 return Target.getUnsignedFractScale();
15049 case BuiltinType::ULongFract:
15050 case BuiltinType::SatULongFract:
15051 return Target.getUnsignedLongFractScale();
15052 }
15053}
15054
15055unsigned char ASTContext::getFixedPointIBits(QualType Ty) const {
15056 assert(Ty->isFixedPointType());
15057
15058 const TargetInfo &Target = getTargetInfo();
15059 switch (Ty->castAs<BuiltinType>()->getKind()) {
15060 default:
15061 llvm_unreachable("Not a fixed point type!");
15062 case BuiltinType::ShortAccum:
15063 case BuiltinType::SatShortAccum:
15064 return Target.getShortAccumIBits();
15065 case BuiltinType::Accum:
15066 case BuiltinType::SatAccum:
15067 return Target.getAccumIBits();
15068 case BuiltinType::LongAccum:
15069 case BuiltinType::SatLongAccum:
15070 return Target.getLongAccumIBits();
15071 case BuiltinType::UShortAccum:
15072 case BuiltinType::SatUShortAccum:
15073 return Target.getUnsignedShortAccumIBits();
15074 case BuiltinType::UAccum:
15075 case BuiltinType::SatUAccum:
15076 return Target.getUnsignedAccumIBits();
15077 case BuiltinType::ULongAccum:
15078 case BuiltinType::SatULongAccum:
15079 return Target.getUnsignedLongAccumIBits();
15080 case BuiltinType::ShortFract:
15081 case BuiltinType::SatShortFract:
15082 case BuiltinType::Fract:
15083 case BuiltinType::SatFract:
15084 case BuiltinType::LongFract:
15085 case BuiltinType::SatLongFract:
15086 case BuiltinType::UShortFract:
15087 case BuiltinType::SatUShortFract:
15088 case BuiltinType::UFract:
15089 case BuiltinType::SatUFract:
15090 case BuiltinType::ULongFract:
15091 case BuiltinType::SatULongFract:
15092 return 0;
15093 }
15094}
15095
15096llvm::FixedPointSemantics
15097ASTContext::getFixedPointSemantics(QualType Ty) const {
15098 assert((Ty->isFixedPointType() || Ty->isIntegerType()) &&
15099 "Can only get the fixed point semantics for a "
15100 "fixed point or integer type.");
15101 if (Ty->isIntegerType())
15102 return llvm::FixedPointSemantics::GetIntegerSemantics(
15103 Width: getIntWidth(T: Ty), IsSigned: Ty->isSignedIntegerType());
15104
15105 bool isSigned = Ty->isSignedFixedPointType();
15106 return llvm::FixedPointSemantics(
15107 static_cast<unsigned>(getTypeSize(T: Ty)), getFixedPointScale(Ty), isSigned,
15108 Ty->isSaturatedFixedPointType(),
15109 !isSigned && getTargetInfo().doUnsignedFixedPointTypesHavePadding());
15110}
15111
15112llvm::APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const {
15113 assert(Ty->isFixedPointType());
15114 return llvm::APFixedPoint::getMax(Sema: getFixedPointSemantics(Ty));
15115}
15116
15117llvm::APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const {
15118 assert(Ty->isFixedPointType());
15119 return llvm::APFixedPoint::getMin(Sema: getFixedPointSemantics(Ty));
15120}
15121
15122QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const {
15123 assert(Ty->isUnsignedFixedPointType() &&
15124 "Expected unsigned fixed point type");
15125
15126 switch (Ty->castAs<BuiltinType>()->getKind()) {
15127 case BuiltinType::UShortAccum:
15128 return ShortAccumTy;
15129 case BuiltinType::UAccum:
15130 return AccumTy;
15131 case BuiltinType::ULongAccum:
15132 return LongAccumTy;
15133 case BuiltinType::SatUShortAccum:
15134 return SatShortAccumTy;
15135 case BuiltinType::SatUAccum:
15136 return SatAccumTy;
15137 case BuiltinType::SatULongAccum:
15138 return SatLongAccumTy;
15139 case BuiltinType::UShortFract:
15140 return ShortFractTy;
15141 case BuiltinType::UFract:
15142 return FractTy;
15143 case BuiltinType::ULongFract:
15144 return LongFractTy;
15145 case BuiltinType::SatUShortFract:
15146 return SatShortFractTy;
15147 case BuiltinType::SatUFract:
15148 return SatFractTy;
15149 case BuiltinType::SatULongFract:
15150 return SatLongFractTy;
15151 default:
15152 llvm_unreachable("Unexpected unsigned fixed point type");
15153 }
15154}
15155
15156// Given a list of FMV features, return a concatenated list of the
15157// corresponding backend features (which may contain duplicates).
15158static std::vector<std::string> getFMVBackendFeaturesFor(
15159 const llvm::SmallVectorImpl<StringRef> &FMVFeatStrings) {
15160 std::vector<std::string> BackendFeats;
15161 llvm::AArch64::ExtensionSet FeatureBits;
15162 for (StringRef F : FMVFeatStrings)
15163 if (auto FMVExt = llvm::AArch64::parseFMVExtension(Extension: F))
15164 if (FMVExt->ID)
15165 FeatureBits.enable(E: *FMVExt->ID);
15166 FeatureBits.toLLVMFeatureList(Features&: BackendFeats);
15167 return BackendFeats;
15168}
15169
15170ParsedTargetAttr
15171ASTContext::filterFunctionTargetAttrs(const TargetAttr *TD) const {
15172 assert(TD != nullptr);
15173 ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(Str: TD->getFeaturesStr());
15174
15175 llvm::erase_if(C&: ParsedAttr.Features, P: [&](const std::string &Feat) {
15176 return !Target->isValidFeatureName(Feature: StringRef{Feat}.substr(Start: 1));
15177 });
15178 return ParsedAttr;
15179}
15180
15181void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
15182 const FunctionDecl *FD) const {
15183 if (FD)
15184 getFunctionFeatureMap(FeatureMap, GD: GlobalDecl().getWithDecl(D: FD));
15185 else
15186 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(),
15187 CPU: Target->getTargetOpts().CPU,
15188 FeatureVec: Target->getTargetOpts().Features);
15189}
15190
15191// Fills in the supplied string map with the set of target features for the
15192// passed in function.
15193void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
15194 GlobalDecl GD) const {
15195 StringRef TargetCPU = Target->getTargetOpts().CPU;
15196 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
15197 if (const auto *TD = FD->getAttr<TargetAttr>()) {
15198 ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD);
15199
15200 // Make a copy of the features as passed on the command line into the
15201 // beginning of the additional features from the function to override.
15202 // AArch64 handles command line option features in parseTargetAttr().
15203 if (!Target->getTriple().isAArch64())
15204 ParsedAttr.Features.insert(
15205 position: ParsedAttr.Features.begin(),
15206 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
15207 last: Target->getTargetOpts().FeaturesAsWritten.end());
15208
15209 if (ParsedAttr.CPU != "" && Target->isValidCPUName(Name: ParsedAttr.CPU))
15210 TargetCPU = ParsedAttr.CPU;
15211
15212 // Now populate the feature map, first with the TargetCPU which is either
15213 // the default or a new one from the target attribute string. Then we'll use
15214 // the passed in features (FeaturesAsWritten) along with the new ones from
15215 // the attribute.
15216 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU,
15217 FeatureVec: ParsedAttr.Features);
15218 } else if (const auto *SD = FD->getAttr<CPUSpecificAttr>()) {
15219 llvm::SmallVector<StringRef, 32> FeaturesTmp;
15220 Target->getCPUSpecificCPUDispatchFeatures(
15221 Name: SD->getCPUName(Index: GD.getMultiVersionIndex())->getName(), Features&: FeaturesTmp);
15222 std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end());
15223 Features.insert(position: Features.begin(),
15224 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
15225 last: Target->getTargetOpts().FeaturesAsWritten.end());
15226 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
15227 } else if (const auto *TC = FD->getAttr<TargetClonesAttr>()) {
15228 if (Target->getTriple().isAArch64()) {
15229 llvm::SmallVector<StringRef, 8> Feats;
15230 TC->getFeatures(Out&: Feats, Index: GD.getMultiVersionIndex());
15231 std::vector<std::string> Features = getFMVBackendFeaturesFor(FMVFeatStrings: Feats);
15232 Features.insert(position: Features.begin(),
15233 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
15234 last: Target->getTargetOpts().FeaturesAsWritten.end());
15235 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
15236 } else if (Target->getTriple().isRISCV()) {
15237 StringRef VersionStr = TC->getFeatureStr(Index: GD.getMultiVersionIndex());
15238 std::vector<std::string> Features;
15239 if (VersionStr != "default") {
15240 ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(Str: VersionStr);
15241 Features.insert(position: Features.begin(), first: ParsedAttr.Features.begin(),
15242 last: ParsedAttr.Features.end());
15243 }
15244 Features.insert(position: Features.begin(),
15245 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
15246 last: Target->getTargetOpts().FeaturesAsWritten.end());
15247 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
15248 } else {
15249 std::vector<std::string> Features;
15250 StringRef VersionStr = TC->getFeatureStr(Index: GD.getMultiVersionIndex());
15251 if (VersionStr.starts_with(Prefix: "arch="))
15252 TargetCPU = VersionStr.drop_front(N: sizeof("arch=") - 1);
15253 else if (VersionStr != "default")
15254 Features.push_back(x: (StringRef{"+"} + VersionStr).str());
15255 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
15256 }
15257 } else if (const auto *TV = FD->getAttr<TargetVersionAttr>()) {
15258 std::vector<std::string> Features;
15259 if (Target->getTriple().isRISCV()) {
15260 ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(Str: TV->getName());
15261 Features.insert(position: Features.begin(), first: ParsedAttr.Features.begin(),
15262 last: ParsedAttr.Features.end());
15263 } else {
15264 assert(Target->getTriple().isAArch64());
15265 llvm::SmallVector<StringRef, 8> Feats;
15266 TV->getFeatures(Out&: Feats);
15267 Features = getFMVBackendFeaturesFor(FMVFeatStrings: Feats);
15268 }
15269 Features.insert(position: Features.begin(),
15270 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
15271 last: Target->getTargetOpts().FeaturesAsWritten.end());
15272 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
15273 } else {
15274 FeatureMap = Target->getTargetOpts().FeatureMap;
15275 }
15276}
15277
15278static SYCLKernelInfo BuildSYCLKernelInfo(ASTContext &Context,
15279 CanQualType KernelNameType,
15280 const FunctionDecl *FD) {
15281 // Host and device compilation may use different ABIs and different ABIs
15282 // may allocate name mangling discriminators differently. A discriminator
15283 // override is used to ensure consistent discriminator allocation across
15284 // host and device compilation.
15285 auto DeviceDiscriminatorOverrider =
15286 [](ASTContext &Ctx, const NamedDecl *ND) -> UnsignedOrNone {
15287 if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: ND))
15288 if (RD->isLambda())
15289 return RD->getDeviceLambdaManglingNumber();
15290 return std::nullopt;
15291 };
15292 std::unique_ptr<MangleContext> MC{ItaniumMangleContext::create(
15293 Context, Diags&: Context.getDiagnostics(), Discriminator: DeviceDiscriminatorOverrider)};
15294
15295 // Construct a mangled name for the SYCL kernel caller offload entry point.
15296 // FIXME: The Itanium typeinfo mangling (_ZTS<type>) is currently used to
15297 // name the SYCL kernel caller offload entry point function. This mangling
15298 // does not suffice to clearly identify symbols that correspond to SYCL
15299 // kernel caller functions, nor is this mangling natural for targets that
15300 // use a non-Itanium ABI.
15301 std::string Buffer;
15302 Buffer.reserve(res_arg: 128);
15303 llvm::raw_string_ostream Out(Buffer);
15304 MC->mangleCanonicalTypeName(T: KernelNameType, Out);
15305 std::string KernelName = Out.str();
15306
15307 return {KernelNameType, FD, KernelName};
15308}
15309
15310void ASTContext::registerSYCLEntryPointFunction(FunctionDecl *FD) {
15311 // If the function declaration to register is invalid or dependent, the
15312 // registration attempt is ignored.
15313 if (FD->isInvalidDecl() || FD->isTemplated())
15314 return;
15315
15316 const auto *SKEPAttr = FD->getAttr<SYCLKernelEntryPointAttr>();
15317 assert(SKEPAttr && "Missing sycl_kernel_entry_point attribute");
15318
15319 // Be tolerant of multiple registration attempts so long as each attempt
15320 // is for the same entity. Callers are obligated to detect and diagnose
15321 // conflicting kernel names prior to calling this function.
15322 CanQualType KernelNameType = getCanonicalType(T: SKEPAttr->getKernelName());
15323 auto IT = SYCLKernels.find(Val: KernelNameType);
15324 assert((IT == SYCLKernels.end() ||
15325 declaresSameEntity(FD, IT->second.getKernelEntryPointDecl())) &&
15326 "SYCL kernel name conflict");
15327 (void)IT;
15328 SYCLKernels.insert(KV: std::make_pair(
15329 x&: KernelNameType, y: BuildSYCLKernelInfo(Context&: *this, KernelNameType, FD)));
15330}
15331
15332const SYCLKernelInfo &ASTContext::getSYCLKernelInfo(QualType T) const {
15333 CanQualType KernelNameType = getCanonicalType(T);
15334 return SYCLKernels.at(Val: KernelNameType);
15335}
15336
15337const SYCLKernelInfo *ASTContext::findSYCLKernelInfo(QualType T) const {
15338 CanQualType KernelNameType = getCanonicalType(T);
15339 auto IT = SYCLKernels.find(Val: KernelNameType);
15340 if (IT != SYCLKernels.end())
15341 return &IT->second;
15342 return nullptr;
15343}
15344
15345OMPTraitInfo &ASTContext::getNewOMPTraitInfo() {
15346 OMPTraitInfoVector.emplace_back(Args: new OMPTraitInfo());
15347 return *OMPTraitInfoVector.back();
15348}
15349
15350const StreamingDiagnostic &clang::
15351operator<<(const StreamingDiagnostic &DB,
15352 const ASTContext::SectionInfo &Section) {
15353 if (Section.Decl)
15354 return DB << Section.Decl;
15355 return DB << "a prior #pragma section";
15356}
15357
15358bool ASTContext::mayExternalize(const Decl *D) const {
15359 bool IsInternalVar =
15360 isa<VarDecl>(Val: D) &&
15361 basicGVALinkageForVariable(Context: *this, VD: cast<VarDecl>(Val: D)) == GVA_Internal;
15362 bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() &&
15363 !D->getAttr<CUDADeviceAttr>()->isImplicit()) ||
15364 (D->hasAttr<CUDAConstantAttr>() &&
15365 !D->getAttr<CUDAConstantAttr>()->isImplicit());
15366 // CUDA/HIP: managed variables need to be externalized since it is
15367 // a declaration in IR, therefore cannot have internal linkage. Kernels in
15368 // anonymous name space needs to be externalized to avoid duplicate symbols.
15369 return (IsInternalVar &&
15370 (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar)) ||
15371 (D->hasAttr<CUDAGlobalAttr>() &&
15372 basicGVALinkageForFunction(Context: *this, FD: cast<FunctionDecl>(Val: D)) ==
15373 GVA_Internal);
15374}
15375
15376bool ASTContext::shouldExternalize(const Decl *D) const {
15377 return mayExternalize(D) &&
15378 (D->hasAttr<HIPManagedAttr>() || D->hasAttr<CUDAGlobalAttr>() ||
15379 CUDADeviceVarODRUsedByHost.count(V: cast<VarDecl>(Val: D)));
15380}
15381
15382StringRef ASTContext::getCUIDHash() const {
15383 if (!CUIDHash.empty())
15384 return CUIDHash;
15385 if (LangOpts.CUID.empty())
15386 return StringRef();
15387 CUIDHash = llvm::utohexstr(X: llvm::MD5Hash(Str: LangOpts.CUID), /*LowerCase=*/true);
15388 return CUIDHash;
15389}
15390
15391const CXXRecordDecl *
15392ASTContext::baseForVTableAuthentication(const CXXRecordDecl *ThisClass) const {
15393 assert(ThisClass);
15394 assert(ThisClass->isPolymorphic());
15395 const CXXRecordDecl *PrimaryBase = ThisClass;
15396 while (1) {
15397 assert(PrimaryBase);
15398 assert(PrimaryBase->isPolymorphic());
15399 auto &Layout = getASTRecordLayout(D: PrimaryBase);
15400 auto Base = Layout.getPrimaryBase();
15401 if (!Base || Base == PrimaryBase || !Base->isPolymorphic())
15402 break;
15403 PrimaryBase = Base;
15404 }
15405 return PrimaryBase;
15406}
15407
15408bool ASTContext::useAbbreviatedThunkName(GlobalDecl VirtualMethodDecl,
15409 StringRef MangledName) {
15410 auto *Method = cast<CXXMethodDecl>(Val: VirtualMethodDecl.getDecl());
15411 assert(Method->isVirtual());
15412 bool DefaultIncludesPointerAuth =
15413 LangOpts.PointerAuthCalls || LangOpts.PointerAuthIntrinsics;
15414
15415 if (!DefaultIncludesPointerAuth)
15416 return true;
15417
15418 auto Existing = ThunksToBeAbbreviated.find(Val: VirtualMethodDecl);
15419 if (Existing != ThunksToBeAbbreviated.end())
15420 return Existing->second.contains(key: MangledName.str());
15421
15422 std::unique_ptr<MangleContext> Mangler(createMangleContext());
15423 llvm::StringMap<llvm::SmallVector<std::string, 2>> Thunks;
15424 auto VtableContext = getVTableContext();
15425 if (const auto *ThunkInfos = VtableContext->getThunkInfo(GD: VirtualMethodDecl)) {
15426 auto *Destructor = dyn_cast<CXXDestructorDecl>(Val: Method);
15427 for (const auto &Thunk : *ThunkInfos) {
15428 SmallString<256> ElidedName;
15429 llvm::raw_svector_ostream ElidedNameStream(ElidedName);
15430 if (Destructor)
15431 Mangler->mangleCXXDtorThunk(DD: Destructor, Type: VirtualMethodDecl.getDtorType(),
15432 Thunk, /* elideOverrideInfo */ ElideOverrideInfo: true,
15433 ElidedNameStream);
15434 else
15435 Mangler->mangleThunk(MD: Method, Thunk, /* elideOverrideInfo */ ElideOverrideInfo: true,
15436 ElidedNameStream);
15437 SmallString<256> MangledName;
15438 llvm::raw_svector_ostream mangledNameStream(MangledName);
15439 if (Destructor)
15440 Mangler->mangleCXXDtorThunk(DD: Destructor, Type: VirtualMethodDecl.getDtorType(),
15441 Thunk, /* elideOverrideInfo */ ElideOverrideInfo: false,
15442 mangledNameStream);
15443 else
15444 Mangler->mangleThunk(MD: Method, Thunk, /* elideOverrideInfo */ ElideOverrideInfo: false,
15445 mangledNameStream);
15446
15447 Thunks[ElidedName].push_back(Elt: std::string(MangledName));
15448 }
15449 }
15450 llvm::StringSet<> SimplifiedThunkNames;
15451 for (auto &ThunkList : Thunks) {
15452 llvm::sort(C&: ThunkList.second);
15453 SimplifiedThunkNames.insert(key: ThunkList.second[0]);
15454 }
15455 bool Result = SimplifiedThunkNames.contains(key: MangledName);
15456 ThunksToBeAbbreviated[VirtualMethodDecl] = std::move(SimplifiedThunkNames);
15457 return Result;
15458}
15459
15460bool ASTContext::arePFPFieldsTriviallyCopyable(const RecordDecl *RD) const {
15461 // Check for trivially-destructible here because non-trivially-destructible
15462 // types will always cause the type and any types derived from it to be
15463 // considered non-trivially-copyable. The same cannot be said for
15464 // trivially-copyable because deleting special members of a type derived from
15465 // a non-trivially-copyable type can cause the derived type to be considered
15466 // trivially copyable.
15467 if (getLangOpts().PointerFieldProtectionTagged)
15468 return !isa<CXXRecordDecl>(Val: RD) ||
15469 cast<CXXRecordDecl>(Val: RD)->hasTrivialDestructor();
15470 return true;
15471}
15472
15473static void findPFPFields(const ASTContext &Ctx, QualType Ty, CharUnits Offset,
15474 std::vector<PFPField> &Fields, bool IncludeVBases) {
15475 if (auto *AT = Ctx.getAsConstantArrayType(T: Ty)) {
15476 if (auto *ElemDecl = AT->getElementType()->getAsCXXRecordDecl()) {
15477 const ASTRecordLayout &ElemRL = Ctx.getASTRecordLayout(D: ElemDecl);
15478 for (unsigned i = 0; i != AT->getSize(); ++i)
15479 findPFPFields(Ctx, Ty: AT->getElementType(), Offset: Offset + i * ElemRL.getSize(),
15480 Fields, IncludeVBases: true);
15481 }
15482 }
15483 auto *Decl = Ty->getAsCXXRecordDecl();
15484 // isPFPType() is inherited from bases and members (including via arrays), so
15485 // we can early exit if it is false. Unions are excluded per the API
15486 // documentation.
15487 if (!Decl || !Decl->isPFPType() || Decl->isUnion())
15488 return;
15489 const ASTRecordLayout &RL = Ctx.getASTRecordLayout(D: Decl);
15490 for (FieldDecl *Field : Decl->fields()) {
15491 CharUnits FieldOffset =
15492 Offset +
15493 Ctx.toCharUnitsFromBits(BitSize: RL.getFieldOffset(FieldNo: Field->getFieldIndex()));
15494 if (Ctx.isPFPField(Field))
15495 Fields.push_back(x: {.Offset: FieldOffset, .Field: Field});
15496 findPFPFields(Ctx, Ty: Field->getType(), Offset: FieldOffset, Fields,
15497 /*IncludeVBases=*/true);
15498 }
15499 // Pass false for IncludeVBases below because vbases are only included in
15500 // layout for top-level types, i.e. not bases or vbases.
15501 for (CXXBaseSpecifier &Base : Decl->bases()) {
15502 if (Base.isVirtual())
15503 continue;
15504 CharUnits BaseOffset =
15505 Offset + RL.getBaseClassOffset(Base: Base.getType()->getAsCXXRecordDecl());
15506 findPFPFields(Ctx, Ty: Base.getType(), Offset: BaseOffset, Fields,
15507 /*IncludeVBases=*/false);
15508 }
15509 if (IncludeVBases) {
15510 for (CXXBaseSpecifier &Base : Decl->vbases()) {
15511 CharUnits BaseOffset =
15512 Offset + RL.getVBaseClassOffset(VBase: Base.getType()->getAsCXXRecordDecl());
15513 findPFPFields(Ctx, Ty: Base.getType(), Offset: BaseOffset, Fields,
15514 /*IncludeVBases=*/false);
15515 }
15516 }
15517}
15518
15519std::vector<PFPField> ASTContext::findPFPFields(QualType Ty) const {
15520 std::vector<PFPField> PFPFields;
15521 ::findPFPFields(Ctx: *this, Ty, Offset: CharUnits::Zero(), Fields&: PFPFields, IncludeVBases: true);
15522 return PFPFields;
15523}
15524
15525bool ASTContext::hasPFPFields(QualType Ty) const {
15526 return !findPFPFields(Ty).empty();
15527}
15528
15529bool ASTContext::isPFPField(const FieldDecl *FD) const {
15530 if (auto *RD = dyn_cast<CXXRecordDecl>(Val: FD->getParent()))
15531 return RD->isPFPType() && FD->getType()->isPointerType() &&
15532 !FD->hasAttr<NoFieldProtectionAttr>();
15533 return false;
15534}
15535
15536void ASTContext::recordMemberDataPointerEvaluation(const ValueDecl *VD) {
15537 auto *FD = dyn_cast<FieldDecl>(Val: VD);
15538 if (!FD)
15539 FD = cast<FieldDecl>(Val: cast<IndirectFieldDecl>(Val: VD)->chain().back());
15540 if (isPFPField(FD))
15541 PFPFieldsWithEvaluatedOffset.insert(X: FD);
15542}
15543
15544void ASTContext::recordOffsetOfEvaluation(const OffsetOfExpr *E) {
15545 if (E->getNumComponents() == 0)
15546 return;
15547 OffsetOfNode Comp = E->getComponent(Idx: E->getNumComponents() - 1);
15548 if (Comp.getKind() != OffsetOfNode::Field)
15549 return;
15550 if (FieldDecl *FD = Comp.getField(); isPFPField(FD))
15551 PFPFieldsWithEvaluatedOffset.insert(X: FD);
15552}
15553