1//===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the ASTContext interface.
10//
11//===----------------------------------------------------------------------===//
12
13#include "clang/AST/ASTContext.h"
14#include "ByteCode/Context.h"
15#include "CXXABI.h"
16#include "clang/AST/APValue.h"
17#include "clang/AST/ASTConcept.h"
18#include "clang/AST/ASTMutationListener.h"
19#include "clang/AST/ASTStructuralEquivalence.h"
20#include "clang/AST/ASTTypeTraits.h"
21#include "clang/AST/Attr.h"
22#include "clang/AST/AttrIterator.h"
23#include "clang/AST/CharUnits.h"
24#include "clang/AST/Comment.h"
25#include "clang/AST/Decl.h"
26#include "clang/AST/DeclBase.h"
27#include "clang/AST/DeclCXX.h"
28#include "clang/AST/DeclContextInternals.h"
29#include "clang/AST/DeclObjC.h"
30#include "clang/AST/DeclOpenMP.h"
31#include "clang/AST/DeclTemplate.h"
32#include "clang/AST/DeclarationName.h"
33#include "clang/AST/DependenceFlags.h"
34#include "clang/AST/Expr.h"
35#include "clang/AST/ExprCXX.h"
36#include "clang/AST/ExternalASTSource.h"
37#include "clang/AST/Mangle.h"
38#include "clang/AST/MangleNumberingContext.h"
39#include "clang/AST/NestedNameSpecifier.h"
40#include "clang/AST/ParentMapContext.h"
41#include "clang/AST/RawCommentList.h"
42#include "clang/AST/RecordLayout.h"
43#include "clang/AST/Stmt.h"
44#include "clang/AST/TemplateBase.h"
45#include "clang/AST/TemplateName.h"
46#include "clang/AST/Type.h"
47#include "clang/AST/TypeLoc.h"
48#include "clang/AST/UnresolvedSet.h"
49#include "clang/AST/VTableBuilder.h"
50#include "clang/Basic/AddressSpaces.h"
51#include "clang/Basic/Builtins.h"
52#include "clang/Basic/CommentOptions.h"
53#include "clang/Basic/DiagnosticAST.h"
54#include "clang/Basic/ExceptionSpecificationType.h"
55#include "clang/Basic/IdentifierTable.h"
56#include "clang/Basic/LLVM.h"
57#include "clang/Basic/LangOptions.h"
58#include "clang/Basic/Linkage.h"
59#include "clang/Basic/Module.h"
60#include "clang/Basic/NoSanitizeList.h"
61#include "clang/Basic/ObjCRuntime.h"
62#include "clang/Basic/ProfileList.h"
63#include "clang/Basic/SourceLocation.h"
64#include "clang/Basic/SourceManager.h"
65#include "clang/Basic/Specifiers.h"
66#include "clang/Basic/TargetCXXABI.h"
67#include "clang/Basic/TargetInfo.h"
68#include "clang/Basic/XRayLists.h"
69#include "llvm/ADT/APFixedPoint.h"
70#include "llvm/ADT/APInt.h"
71#include "llvm/ADT/APSInt.h"
72#include "llvm/ADT/ArrayRef.h"
73#include "llvm/ADT/DenseMap.h"
74#include "llvm/ADT/DenseSet.h"
75#include "llvm/ADT/FoldingSet.h"
76#include "llvm/ADT/PointerUnion.h"
77#include "llvm/ADT/STLExtras.h"
78#include "llvm/ADT/SmallPtrSet.h"
79#include "llvm/ADT/SmallVector.h"
80#include "llvm/ADT/StringExtras.h"
81#include "llvm/ADT/StringRef.h"
82#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
83#include "llvm/Support/Capacity.h"
84#include "llvm/Support/Casting.h"
85#include "llvm/Support/Compiler.h"
86#include "llvm/Support/ErrorHandling.h"
87#include "llvm/Support/MD5.h"
88#include "llvm/Support/MathExtras.h"
89#include "llvm/Support/SipHash.h"
90#include "llvm/Support/raw_ostream.h"
91#include "llvm/TargetParser/AArch64TargetParser.h"
92#include "llvm/TargetParser/Triple.h"
93#include <algorithm>
94#include <cassert>
95#include <cstddef>
96#include <cstdint>
97#include <cstdlib>
98#include <map>
99#include <memory>
100#include <optional>
101#include <string>
102#include <tuple>
103#include <utility>
104
105using namespace clang;
106
107enum FloatingRank {
108 BFloat16Rank,
109 Float16Rank,
110 HalfRank,
111 FloatRank,
112 DoubleRank,
113 LongDoubleRank,
114 Float128Rank,
115 Ibm128Rank
116};
117
118/// \returns The locations that are relevant when searching for Doc comments
119/// related to \p D.
120static SmallVector<SourceLocation, 2>
121getDeclLocsForCommentSearch(const Decl *D, SourceManager &SourceMgr) {
122 assert(D);
123
124 // User can not attach documentation to implicit declarations.
125 if (D->isImplicit())
126 return {};
127
128 // User can not attach documentation to implicit instantiations.
129 if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
130 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
131 return {};
132 }
133
134 if (const auto *VD = dyn_cast<VarDecl>(Val: D)) {
135 if (VD->isStaticDataMember() &&
136 VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
137 return {};
138 }
139
140 if (const auto *CRD = dyn_cast<CXXRecordDecl>(Val: D)) {
141 if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
142 return {};
143 }
144
145 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(Val: D)) {
146 TemplateSpecializationKind TSK = CTSD->getSpecializationKind();
147 if (TSK == TSK_ImplicitInstantiation ||
148 TSK == TSK_Undeclared)
149 return {};
150 }
151
152 if (const auto *ED = dyn_cast<EnumDecl>(Val: D)) {
153 if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
154 return {};
155 }
156 if (const auto *TD = dyn_cast<TagDecl>(Val: D)) {
157 // When tag declaration (but not definition!) is part of the
158 // decl-specifier-seq of some other declaration, it doesn't get comment
159 if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition())
160 return {};
161 }
162 // TODO: handle comments for function parameters properly.
163 if (isa<ParmVarDecl>(Val: D))
164 return {};
165
166 // TODO: we could look up template parameter documentation in the template
167 // documentation.
168 if (isa<TemplateTypeParmDecl>(Val: D) ||
169 isa<NonTypeTemplateParmDecl>(Val: D) ||
170 isa<TemplateTemplateParmDecl>(Val: D))
171 return {};
172
173 SmallVector<SourceLocation, 2> Locations;
174 // Find declaration location.
175 // For Objective-C declarations we generally don't expect to have multiple
176 // declarators, thus use declaration starting location as the "declaration
177 // location".
178 // For all other declarations multiple declarators are used quite frequently,
179 // so we use the location of the identifier as the "declaration location".
180 SourceLocation BaseLocation;
181 if (isa<ObjCMethodDecl>(Val: D) || isa<ObjCContainerDecl>(Val: D) ||
182 isa<ObjCPropertyDecl>(Val: D) || isa<RedeclarableTemplateDecl>(Val: D) ||
183 isa<ClassTemplateSpecializationDecl>(Val: D) ||
184 // Allow association with Y across {} in `typedef struct X {} Y`.
185 isa<TypedefDecl>(Val: D))
186 BaseLocation = D->getBeginLoc();
187 else
188 BaseLocation = D->getLocation();
189
190 if (!D->getLocation().isMacroID()) {
191 Locations.emplace_back(Args&: BaseLocation);
192 } else {
193 const auto *DeclCtx = D->getDeclContext();
194
195 // When encountering definitions generated from a macro (that are not
196 // contained by another declaration in the macro) we need to try and find
197 // the comment at the location of the expansion but if there is no comment
198 // there we should retry to see if there is a comment inside the macro as
199 // well. To this end we return first BaseLocation to first look at the
200 // expansion site, the second value is the spelling location of the
201 // beginning of the declaration defined inside the macro.
202 if (!(DeclCtx &&
203 Decl::castFromDeclContext(DeclCtx)->getLocation().isMacroID())) {
204 Locations.emplace_back(Args: SourceMgr.getExpansionLoc(Loc: BaseLocation));
205 }
206
207 // We use Decl::getBeginLoc() and not just BaseLocation here to ensure that
208 // we don't refer to the macro argument location at the expansion site (this
209 // can happen if the name's spelling is provided via macro argument), and
210 // always to the declaration itself.
211 Locations.emplace_back(Args: SourceMgr.getSpellingLoc(Loc: D->getBeginLoc()));
212 }
213
214 return Locations;
215}
216
217RawComment *ASTContext::getRawCommentForDeclNoCacheImpl(
218 const Decl *D, const SourceLocation RepresentativeLocForDecl,
219 const std::map<unsigned, RawComment *> &CommentsInTheFile) const {
220 // If the declaration doesn't map directly to a location in a file, we
221 // can't find the comment.
222 if (RepresentativeLocForDecl.isInvalid() ||
223 !RepresentativeLocForDecl.isFileID())
224 return nullptr;
225
226 // If there are no comments anywhere, we won't find anything.
227 if (CommentsInTheFile.empty())
228 return nullptr;
229
230 // Decompose the location for the declaration and find the beginning of the
231 // file buffer.
232 const FileIDAndOffset DeclLocDecomp =
233 SourceMgr.getDecomposedLoc(Loc: RepresentativeLocForDecl);
234
235 // Slow path.
236 auto OffsetCommentBehindDecl =
237 CommentsInTheFile.lower_bound(x: DeclLocDecomp.second);
238
239 // First check whether we have a trailing comment.
240 if (OffsetCommentBehindDecl != CommentsInTheFile.end()) {
241 RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second;
242 if ((CommentBehindDecl->isDocumentation() ||
243 LangOpts.CommentOpts.ParseAllComments) &&
244 CommentBehindDecl->isTrailingComment() &&
245 (isa<FieldDecl>(Val: D) || isa<EnumConstantDecl>(Val: D) || isa<VarDecl>(Val: D) ||
246 isa<ObjCMethodDecl>(Val: D) || isa<ObjCPropertyDecl>(Val: D))) {
247
248 // Check that Doxygen trailing comment comes after the declaration, starts
249 // on the same line and in the same file as the declaration.
250 if (SourceMgr.getLineNumber(FID: DeclLocDecomp.first, FilePos: DeclLocDecomp.second) ==
251 Comments.getCommentBeginLine(C: CommentBehindDecl, File: DeclLocDecomp.first,
252 Offset: OffsetCommentBehindDecl->first)) {
253 return CommentBehindDecl;
254 }
255 }
256 }
257
258 // The comment just after the declaration was not a trailing comment.
259 // Let's look at the previous comment.
260 if (OffsetCommentBehindDecl == CommentsInTheFile.begin())
261 return nullptr;
262
263 auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl;
264 RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second;
265
266 // Check that we actually have a non-member Doxygen comment.
267 if (!(CommentBeforeDecl->isDocumentation() ||
268 LangOpts.CommentOpts.ParseAllComments) ||
269 CommentBeforeDecl->isTrailingComment())
270 return nullptr;
271
272 // Decompose the end of the comment.
273 const unsigned CommentEndOffset =
274 Comments.getCommentEndOffset(C: CommentBeforeDecl);
275
276 // Get the corresponding buffer.
277 bool Invalid = false;
278 const char *Buffer = SourceMgr.getBufferData(FID: DeclLocDecomp.first,
279 Invalid: &Invalid).data();
280 if (Invalid)
281 return nullptr;
282
283 // Extract text between the comment and declaration.
284 StringRef Text(Buffer + CommentEndOffset,
285 DeclLocDecomp.second - CommentEndOffset);
286
287 // There should be no other declarations or preprocessor directives between
288 // comment and declaration.
289 if (Text.find_last_of(Chars: ";{}#@") != StringRef::npos)
290 return nullptr;
291
292 return CommentBeforeDecl;
293}
294
295RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
296 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr);
297
298 for (const auto DeclLoc : DeclLocs) {
299 // If the declaration doesn't map directly to a location in a file, we
300 // can't find the comment.
301 if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
302 continue;
303
304 if (ExternalSource && !CommentsLoaded) {
305 ExternalSource->ReadComments();
306 CommentsLoaded = true;
307 }
308
309 if (Comments.empty())
310 continue;
311
312 const FileID File = SourceMgr.getDecomposedLoc(Loc: DeclLoc).first;
313 if (!File.isValid())
314 continue;
315
316 const auto CommentsInThisFile = Comments.getCommentsInFile(File);
317 if (!CommentsInThisFile || CommentsInThisFile->empty())
318 continue;
319
320 if (RawComment *Comment =
321 getRawCommentForDeclNoCacheImpl(D, RepresentativeLocForDecl: DeclLoc, CommentsInTheFile: *CommentsInThisFile))
322 return Comment;
323 }
324
325 return nullptr;
326}
327
328void ASTContext::addComment(const RawComment &RC) {
329 assert(LangOpts.RetainCommentsFromSystemHeaders ||
330 !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin()));
331 Comments.addComment(RC, CommentOpts: LangOpts.CommentOpts, Allocator&: BumpAlloc);
332}
333
334const RawComment *ASTContext::getRawCommentForAnyRedecl(
335 const Decl *D,
336 const Decl **OriginalDecl) const {
337 if (!D) {
338 if (OriginalDecl)
339 OriginalDecl = nullptr;
340 return nullptr;
341 }
342
343 D = &adjustDeclToTemplate(D: *D);
344
345 // Any comment directly attached to D?
346 {
347 auto DeclComment = DeclRawComments.find(Val: D);
348 if (DeclComment != DeclRawComments.end()) {
349 if (OriginalDecl)
350 *OriginalDecl = D;
351 return DeclComment->second;
352 }
353 }
354
355 // Any comment attached to any redeclaration of D?
356 const Decl *CanonicalD = D->getCanonicalDecl();
357 if (!CanonicalD)
358 return nullptr;
359
360 {
361 auto RedeclComment = RedeclChainComments.find(Val: CanonicalD);
362 if (RedeclComment != RedeclChainComments.end()) {
363 if (OriginalDecl)
364 *OriginalDecl = RedeclComment->second;
365 auto CommentAtRedecl = DeclRawComments.find(Val: RedeclComment->second);
366 assert(CommentAtRedecl != DeclRawComments.end() &&
367 "This decl is supposed to have comment attached.");
368 return CommentAtRedecl->second;
369 }
370 }
371
372 // Any redeclarations of D that we haven't checked for comments yet?
373 const Decl *LastCheckedRedecl = [&]() {
374 const Decl *LastChecked = CommentlessRedeclChains.lookup(Val: CanonicalD);
375 bool CanUseCommentlessCache = false;
376 if (LastChecked) {
377 for (auto *Redecl : CanonicalD->redecls()) {
378 if (Redecl == D) {
379 CanUseCommentlessCache = true;
380 break;
381 }
382 if (Redecl == LastChecked)
383 break;
384 }
385 }
386 // FIXME: This could be improved so that even if CanUseCommentlessCache
387 // is false, once we've traversed past CanonicalD we still skip ahead
388 // LastChecked.
389 return CanUseCommentlessCache ? LastChecked : nullptr;
390 }();
391
392 for (const Decl *Redecl : D->redecls()) {
393 assert(Redecl);
394 // Skip all redeclarations that have been checked previously.
395 if (LastCheckedRedecl) {
396 if (LastCheckedRedecl == Redecl) {
397 LastCheckedRedecl = nullptr;
398 }
399 continue;
400 }
401 const RawComment *RedeclComment = getRawCommentForDeclNoCache(D: Redecl);
402 if (RedeclComment) {
403 cacheRawCommentForDecl(OriginalD: *Redecl, Comment: *RedeclComment);
404 if (OriginalDecl)
405 *OriginalDecl = Redecl;
406 return RedeclComment;
407 }
408 CommentlessRedeclChains[CanonicalD] = Redecl;
409 }
410
411 if (OriginalDecl)
412 *OriginalDecl = nullptr;
413 return nullptr;
414}
415
416void ASTContext::cacheRawCommentForDecl(const Decl &OriginalD,
417 const RawComment &Comment) const {
418 assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments);
419 DeclRawComments.try_emplace(Key: &OriginalD, Args: &Comment);
420 const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl();
421 RedeclChainComments.try_emplace(Key: CanonicalDecl, Args: &OriginalD);
422 CommentlessRedeclChains.erase(Val: CanonicalDecl);
423}
424
425static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod,
426 SmallVectorImpl<const NamedDecl *> &Redeclared) {
427 const DeclContext *DC = ObjCMethod->getDeclContext();
428 if (const auto *IMD = dyn_cast<ObjCImplDecl>(Val: DC)) {
429 const ObjCInterfaceDecl *ID = IMD->getClassInterface();
430 if (!ID)
431 return;
432 // Add redeclared method here.
433 for (const auto *Ext : ID->known_extensions()) {
434 if (ObjCMethodDecl *RedeclaredMethod =
435 Ext->getMethod(Sel: ObjCMethod->getSelector(),
436 isInstance: ObjCMethod->isInstanceMethod()))
437 Redeclared.push_back(Elt: RedeclaredMethod);
438 }
439 }
440}
441
442void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls,
443 const Preprocessor *PP) {
444 if (Comments.empty() || Decls.empty())
445 return;
446
447 FileID File;
448 for (const Decl *D : Decls) {
449 if (D->isInvalidDecl())
450 continue;
451
452 D = &adjustDeclToTemplate(D: *D);
453 SourceLocation Loc = D->getLocation();
454 if (Loc.isValid()) {
455 // See if there are any new comments that are not attached to a decl.
456 // The location doesn't have to be precise - we care only about the file.
457 File = SourceMgr.getDecomposedLoc(Loc).first;
458 break;
459 }
460 }
461
462 if (File.isInvalid())
463 return;
464
465 auto CommentsInThisFile = Comments.getCommentsInFile(File);
466 if (!CommentsInThisFile || CommentsInThisFile->empty() ||
467 CommentsInThisFile->rbegin()->second->isAttached())
468 return;
469
470 // There is at least one comment not attached to a decl.
471 // Maybe it should be attached to one of Decls?
472 //
473 // Note that this way we pick up not only comments that precede the
474 // declaration, but also comments that *follow* the declaration -- thanks to
475 // the lookahead in the lexer: we've consumed the semicolon and looked
476 // ahead through comments.
477 for (const Decl *D : Decls) {
478 assert(D);
479 if (D->isInvalidDecl())
480 continue;
481
482 D = &adjustDeclToTemplate(D: *D);
483
484 if (DeclRawComments.count(Val: D) > 0)
485 continue;
486
487 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr);
488
489 for (const auto DeclLoc : DeclLocs) {
490 if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
491 continue;
492
493 if (RawComment *const DocComment = getRawCommentForDeclNoCacheImpl(
494 D, RepresentativeLocForDecl: DeclLoc, CommentsInTheFile: *CommentsInThisFile)) {
495 cacheRawCommentForDecl(OriginalD: *D, Comment: *DocComment);
496 comments::FullComment *FC = DocComment->parse(Context: *this, PP, D);
497 ParsedComments[D->getCanonicalDecl()] = FC;
498 break;
499 }
500 }
501 }
502}
503
504comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC,
505 const Decl *D) const {
506 auto *ThisDeclInfo = new (*this) comments::DeclInfo;
507 ThisDeclInfo->CommentDecl = D;
508 ThisDeclInfo->IsFilled = false;
509 ThisDeclInfo->fill();
510 ThisDeclInfo->CommentDecl = FC->getDecl();
511 if (!ThisDeclInfo->TemplateParameters)
512 ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters;
513 comments::FullComment *CFC =
514 new (*this) comments::FullComment(FC->getBlocks(),
515 ThisDeclInfo);
516 return CFC;
517}
518
519comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const {
520 const RawComment *RC = getRawCommentForDeclNoCache(D);
521 return RC ? RC->parse(Context: *this, PP: nullptr, D) : nullptr;
522}
523
524comments::FullComment *ASTContext::getCommentForDecl(
525 const Decl *D,
526 const Preprocessor *PP) const {
527 if (!D || D->isInvalidDecl())
528 return nullptr;
529 D = &adjustDeclToTemplate(D: *D);
530
531 const Decl *Canonical = D->getCanonicalDecl();
532 llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos =
533 ParsedComments.find(Val: Canonical);
534
535 if (Pos != ParsedComments.end()) {
536 if (Canonical != D) {
537 comments::FullComment *FC = Pos->second;
538 comments::FullComment *CFC = cloneFullComment(FC, D);
539 return CFC;
540 }
541 return Pos->second;
542 }
543
544 const Decl *OriginalDecl = nullptr;
545
546 const RawComment *RC = getRawCommentForAnyRedecl(D, OriginalDecl: &OriginalDecl);
547 if (!RC) {
548 if (isa<ObjCMethodDecl>(Val: D) || isa<FunctionDecl>(Val: D)) {
549 SmallVector<const NamedDecl*, 8> Overridden;
550 const auto *OMD = dyn_cast<ObjCMethodDecl>(Val: D);
551 if (OMD && OMD->isPropertyAccessor())
552 if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl())
553 if (comments::FullComment *FC = getCommentForDecl(D: PDecl, PP))
554 return cloneFullComment(FC, D);
555 if (OMD)
556 addRedeclaredMethods(ObjCMethod: OMD, Redeclared&: Overridden);
557 getOverriddenMethods(Method: dyn_cast<NamedDecl>(Val: D), Overridden);
558 for (unsigned i = 0, e = Overridden.size(); i < e; i++)
559 if (comments::FullComment *FC = getCommentForDecl(D: Overridden[i], PP))
560 return cloneFullComment(FC, D);
561 }
562 else if (const auto *TD = dyn_cast<TypedefNameDecl>(Val: D)) {
563 // Attach any tag type's documentation to its typedef if latter
564 // does not have one of its own.
565 QualType QT = TD->getUnderlyingType();
566 if (const auto *TT = QT->getAs<TagType>())
567 if (comments::FullComment *FC = getCommentForDecl(D: TT->getDecl(), PP))
568 return cloneFullComment(FC, D);
569 }
570 else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(Val: D)) {
571 while (IC->getSuperClass()) {
572 IC = IC->getSuperClass();
573 if (comments::FullComment *FC = getCommentForDecl(D: IC, PP))
574 return cloneFullComment(FC, D);
575 }
576 }
577 else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(Val: D)) {
578 if (const ObjCInterfaceDecl *IC = CD->getClassInterface())
579 if (comments::FullComment *FC = getCommentForDecl(D: IC, PP))
580 return cloneFullComment(FC, D);
581 }
582 else if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: D)) {
583 if (!(RD = RD->getDefinition()))
584 return nullptr;
585 // Check non-virtual bases.
586 for (const auto &I : RD->bases()) {
587 if (I.isVirtual() || (I.getAccessSpecifier() != AS_public))
588 continue;
589 QualType Ty = I.getType();
590 if (Ty.isNull())
591 continue;
592 if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) {
593 if (!(NonVirtualBase= NonVirtualBase->getDefinition()))
594 continue;
595
596 if (comments::FullComment *FC = getCommentForDecl(D: (NonVirtualBase), PP))
597 return cloneFullComment(FC, D);
598 }
599 }
600 // Check virtual bases.
601 for (const auto &I : RD->vbases()) {
602 if (I.getAccessSpecifier() != AS_public)
603 continue;
604 QualType Ty = I.getType();
605 if (Ty.isNull())
606 continue;
607 if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) {
608 if (!(VirtualBase= VirtualBase->getDefinition()))
609 continue;
610 if (comments::FullComment *FC = getCommentForDecl(D: (VirtualBase), PP))
611 return cloneFullComment(FC, D);
612 }
613 }
614 }
615 return nullptr;
616 }
617
618 // If the RawComment was attached to other redeclaration of this Decl, we
619 // should parse the comment in context of that other Decl. This is important
620 // because comments can contain references to parameter names which can be
621 // different across redeclarations.
622 if (D != OriginalDecl && OriginalDecl)
623 return getCommentForDecl(D: OriginalDecl, PP);
624
625 comments::FullComment *FC = RC->parse(Context: *this, PP, D);
626 ParsedComments[Canonical] = FC;
627 return FC;
628}
629
630void ASTContext::CanonicalTemplateTemplateParm::Profile(
631 llvm::FoldingSetNodeID &ID, const ASTContext &C,
632 TemplateTemplateParmDecl *Parm) {
633 ID.AddInteger(I: Parm->getDepth());
634 ID.AddInteger(I: Parm->getPosition());
635 ID.AddBoolean(B: Parm->isParameterPack());
636 ID.AddInteger(I: Parm->templateParameterKind());
637
638 TemplateParameterList *Params = Parm->getTemplateParameters();
639 ID.AddInteger(I: Params->size());
640 for (TemplateParameterList::const_iterator P = Params->begin(),
641 PEnd = Params->end();
642 P != PEnd; ++P) {
643 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Val: *P)) {
644 ID.AddInteger(I: 0);
645 ID.AddBoolean(B: TTP->isParameterPack());
646 ID.AddInteger(
647 I: TTP->getNumExpansionParameters().toInternalRepresentation());
648 continue;
649 }
650
651 if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Val: *P)) {
652 ID.AddInteger(I: 1);
653 ID.AddBoolean(B: NTTP->isParameterPack());
654 ID.AddPointer(Ptr: C.getUnconstrainedType(T: C.getCanonicalType(T: NTTP->getType()))
655 .getAsOpaquePtr());
656 if (NTTP->isExpandedParameterPack()) {
657 ID.AddBoolean(B: true);
658 ID.AddInteger(I: NTTP->getNumExpansionTypes());
659 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
660 QualType T = NTTP->getExpansionType(I);
661 ID.AddPointer(Ptr: T.getCanonicalType().getAsOpaquePtr());
662 }
663 } else
664 ID.AddBoolean(B: false);
665 continue;
666 }
667
668 auto *TTP = cast<TemplateTemplateParmDecl>(Val: *P);
669 ID.AddInteger(I: 2);
670 Profile(ID, C, Parm: TTP);
671 }
672}
673
674TemplateTemplateParmDecl *
675ASTContext::getCanonicalTemplateTemplateParmDecl(
676 TemplateTemplateParmDecl *TTP) const {
677 // Check if we already have a canonical template template parameter.
678 llvm::FoldingSetNodeID ID;
679 CanonicalTemplateTemplateParm::Profile(ID, C: *this, Parm: TTP);
680 void *InsertPos = nullptr;
681 CanonicalTemplateTemplateParm *Canonical
682 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
683 if (Canonical)
684 return Canonical->getParam();
685
686 // Build a canonical template parameter list.
687 TemplateParameterList *Params = TTP->getTemplateParameters();
688 SmallVector<NamedDecl *, 4> CanonParams;
689 CanonParams.reserve(N: Params->size());
690 for (TemplateParameterList::const_iterator P = Params->begin(),
691 PEnd = Params->end();
692 P != PEnd; ++P) {
693 // Note that, per C++20 [temp.over.link]/6, when determining whether
694 // template-parameters are equivalent, constraints are ignored.
695 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Val: *P)) {
696 TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create(
697 C: *this, DC: getTranslationUnitDecl(), KeyLoc: SourceLocation(), NameLoc: SourceLocation(),
698 D: TTP->getDepth(), P: TTP->getIndex(), Id: nullptr, Typename: false,
699 ParameterPack: TTP->isParameterPack(), /*HasTypeConstraint=*/false,
700 NumExpanded: TTP->getNumExpansionParameters());
701 CanonParams.push_back(Elt: NewTTP);
702 } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Val: *P)) {
703 QualType T = getUnconstrainedType(T: getCanonicalType(T: NTTP->getType()));
704 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T);
705 NonTypeTemplateParmDecl *Param;
706 if (NTTP->isExpandedParameterPack()) {
707 SmallVector<QualType, 2> ExpandedTypes;
708 SmallVector<TypeSourceInfo *, 2> ExpandedTInfos;
709 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
710 ExpandedTypes.push_back(Elt: getCanonicalType(T: NTTP->getExpansionType(I)));
711 ExpandedTInfos.push_back(
712 Elt: getTrivialTypeSourceInfo(T: ExpandedTypes.back()));
713 }
714
715 Param = NonTypeTemplateParmDecl::Create(C: *this, DC: getTranslationUnitDecl(),
716 StartLoc: SourceLocation(),
717 IdLoc: SourceLocation(),
718 D: NTTP->getDepth(),
719 P: NTTP->getPosition(), Id: nullptr,
720 T,
721 TInfo,
722 ExpandedTypes,
723 ExpandedTInfos);
724 } else {
725 Param = NonTypeTemplateParmDecl::Create(C: *this, DC: getTranslationUnitDecl(),
726 StartLoc: SourceLocation(),
727 IdLoc: SourceLocation(),
728 D: NTTP->getDepth(),
729 P: NTTP->getPosition(), Id: nullptr,
730 T,
731 ParameterPack: NTTP->isParameterPack(),
732 TInfo);
733 }
734 CanonParams.push_back(Elt: Param);
735 } else
736 CanonParams.push_back(Elt: getCanonicalTemplateTemplateParmDecl(
737 TTP: cast<TemplateTemplateParmDecl>(Val: *P)));
738 }
739
740 TemplateTemplateParmDecl *CanonTTP = TemplateTemplateParmDecl::Create(
741 C: *this, DC: getTranslationUnitDecl(), L: SourceLocation(), D: TTP->getDepth(),
742 P: TTP->getPosition(), ParameterPack: TTP->isParameterPack(), Id: nullptr,
743 ParameterKind: TTP->templateParameterKind(),
744 /*Typename=*/false,
745 Params: TemplateParameterList::Create(C: *this, TemplateLoc: SourceLocation(), LAngleLoc: SourceLocation(),
746 Params: CanonParams, RAngleLoc: SourceLocation(),
747 /*RequiresClause=*/nullptr));
748
749 // Get the new insert position for the node we care about.
750 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
751 assert(!Canonical && "Shouldn't be in the map!");
752 (void)Canonical;
753
754 // Create the canonical template template parameter entry.
755 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP);
756 CanonTemplateTemplateParms.InsertNode(N: Canonical, InsertPos);
757 return CanonTTP;
758}
759
760TemplateTemplateParmDecl *
761ASTContext::findCanonicalTemplateTemplateParmDeclInternal(
762 TemplateTemplateParmDecl *TTP) const {
763 llvm::FoldingSetNodeID ID;
764 CanonicalTemplateTemplateParm::Profile(ID, C: *this, Parm: TTP);
765 void *InsertPos = nullptr;
766 CanonicalTemplateTemplateParm *Canonical =
767 CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
768 return Canonical ? Canonical->getParam() : nullptr;
769}
770
771TemplateTemplateParmDecl *
772ASTContext::insertCanonicalTemplateTemplateParmDeclInternal(
773 TemplateTemplateParmDecl *CanonTTP) const {
774 llvm::FoldingSetNodeID ID;
775 CanonicalTemplateTemplateParm::Profile(ID, C: *this, Parm: CanonTTP);
776 void *InsertPos = nullptr;
777 if (auto *Existing =
778 CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos))
779 return Existing->getParam();
780 CanonTemplateTemplateParms.InsertNode(
781 N: new (*this) CanonicalTemplateTemplateParm(CanonTTP), InsertPos);
782 return CanonTTP;
783}
784
785/// For the purposes of overflow pattern exclusion, does this match the
786/// while(i--) pattern?
787static bool matchesPostDecrInWhile(const UnaryOperator *UO, ASTContext &Ctx) {
788 if (UO->getOpcode() != UO_PostDec)
789 return false;
790
791 if (!UO->getType()->isUnsignedIntegerType())
792 return false;
793
794 // -fsanitize-undefined-ignore-overflow-pattern=unsigned-post-decr-while
795 if (!Ctx.getLangOpts().isOverflowPatternExcluded(
796 Kind: LangOptions::OverflowPatternExclusionKind::PostDecrInWhile))
797 return false;
798
799 // all Parents (usually just one) must be a WhileStmt
800 return llvm::all_of(
801 Range: Ctx.getParentMapContext().getParents(Node: *UO),
802 P: [](const DynTypedNode &P) { return P.get<WhileStmt>() != nullptr; });
803}
804
805bool ASTContext::isUnaryOverflowPatternExcluded(const UnaryOperator *UO) {
806 // -fsanitize-undefined-ignore-overflow-pattern=negated-unsigned-const
807 // ... like -1UL;
808 if (UO->getOpcode() == UO_Minus &&
809 getLangOpts().isOverflowPatternExcluded(
810 Kind: LangOptions::OverflowPatternExclusionKind::NegUnsignedConst) &&
811 UO->isIntegerConstantExpr(Ctx: *this)) {
812 return true;
813 }
814
815 if (matchesPostDecrInWhile(UO, Ctx&: *this))
816 return true;
817
818 return false;
819}
820
821/// Check if a type can have its sanitizer instrumentation elided based on its
822/// presence within an ignorelist.
823bool ASTContext::isTypeIgnoredBySanitizer(const SanitizerMask &Mask,
824 const QualType &Ty) const {
825 std::string TyName = Ty.getUnqualifiedType().getAsString(Policy: getPrintingPolicy());
826 return NoSanitizeL->containsType(Mask, MangledTypeName: TyName);
827}
828
829TargetCXXABI::Kind ASTContext::getCXXABIKind() const {
830 auto Kind = getTargetInfo().getCXXABI().getKind();
831 return getLangOpts().CXXABI.value_or(u&: Kind);
832}
833
834CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
835 if (!LangOpts.CPlusPlus) return nullptr;
836
837 switch (getCXXABIKind()) {
838 case TargetCXXABI::AppleARM64:
839 case TargetCXXABI::Fuchsia:
840 case TargetCXXABI::GenericARM: // Same as Itanium at this level
841 case TargetCXXABI::iOS:
842 case TargetCXXABI::WatchOS:
843 case TargetCXXABI::GenericAArch64:
844 case TargetCXXABI::GenericMIPS:
845 case TargetCXXABI::GenericItanium:
846 case TargetCXXABI::WebAssembly:
847 case TargetCXXABI::XL:
848 return CreateItaniumCXXABI(Ctx&: *this);
849 case TargetCXXABI::Microsoft:
850 return CreateMicrosoftCXXABI(Ctx&: *this);
851 }
852 llvm_unreachable("Invalid CXXABI type!");
853}
854
855interp::Context &ASTContext::getInterpContext() const {
856 if (!InterpContext) {
857 InterpContext.reset(p: new interp::Context(const_cast<ASTContext &>(*this)));
858 }
859 return *InterpContext;
860}
861
862ParentMapContext &ASTContext::getParentMapContext() {
863 if (!ParentMapCtx)
864 ParentMapCtx.reset(p: new ParentMapContext(*this));
865 return *ParentMapCtx;
866}
867
868static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI,
869 const LangOptions &LangOpts) {
870 switch (LangOpts.getAddressSpaceMapMangling()) {
871 case LangOptions::ASMM_Target:
872 return TI.useAddressSpaceMapMangling();
873 case LangOptions::ASMM_On:
874 return true;
875 case LangOptions::ASMM_Off:
876 return false;
877 }
878 llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything.");
879}
880
881ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM,
882 IdentifierTable &idents, SelectorTable &sels,
883 Builtin::Context &builtins, TranslationUnitKind TUKind)
884 : ConstantArrayTypes(this_(), ConstantArrayTypesLog2InitSize),
885 DependentSizedArrayTypes(this_()), DependentSizedExtVectorTypes(this_()),
886 DependentAddressSpaceTypes(this_()), DependentVectorTypes(this_()),
887 DependentSizedMatrixTypes(this_()),
888 FunctionProtoTypes(this_(), FunctionProtoTypesLog2InitSize),
889 DependentTypeOfExprTypes(this_()), DependentDecltypeTypes(this_()),
890 DependentPackIndexingTypes(this_()), TemplateSpecializationTypes(this_()),
891 DependentBitIntTypes(this_()), SubstTemplateTemplateParmPacks(this_()),
892 DeducedTemplates(this_()), ArrayParameterTypes(this_()),
893 CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts),
894 NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)),
895 XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles,
896 LangOpts.XRayNeverInstrumentFiles,
897 LangOpts.XRayAttrListFiles, SM)),
898 ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)),
899 PrintingPolicy(LOpts), Idents(idents), Selectors(sels),
900 BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this),
901 Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts),
902 CompCategories(this_()), LastSDM(nullptr, 0) {
903 addTranslationUnitDecl();
904}
905
906void ASTContext::cleanup() {
907 // Release the DenseMaps associated with DeclContext objects.
908 // FIXME: Is this the ideal solution?
909 ReleaseDeclContextMaps();
910
911 // Call all of the deallocation functions on all of their targets.
912 for (auto &Pair : Deallocations)
913 (Pair.first)(Pair.second);
914 Deallocations.clear();
915
916 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
917 // because they can contain DenseMaps.
918 for (llvm::DenseMap<const ObjCInterfaceDecl *,
919 const ASTRecordLayout *>::iterator
920 I = ObjCLayouts.begin(),
921 E = ObjCLayouts.end();
922 I != E;)
923 // Increment in loop to prevent using deallocated memory.
924 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
925 R->Destroy(Ctx&: *this);
926 ObjCLayouts.clear();
927
928 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
929 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
930 // Increment in loop to prevent using deallocated memory.
931 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
932 R->Destroy(Ctx&: *this);
933 }
934 ASTRecordLayouts.clear();
935
936 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(),
937 AEnd = DeclAttrs.end();
938 A != AEnd; ++A)
939 A->second->~AttrVec();
940 DeclAttrs.clear();
941
942 for (const auto &Value : ModuleInitializers)
943 Value.second->~PerModuleInitializers();
944 ModuleInitializers.clear();
945
946 XRayFilter.reset();
947 NoSanitizeL.reset();
948}
949
950ASTContext::~ASTContext() { cleanup(); }
951
952void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) {
953 TraversalScope = TopLevelDecls;
954 getParentMapContext().clear();
955}
956
957void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const {
958 Deallocations.push_back(Elt: {Callback, Data});
959}
960
961void
962ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) {
963 ExternalSource = std::move(Source);
964}
965
966void ASTContext::PrintStats() const {
967 llvm::errs() << "\n*** AST Context Stats:\n";
968 llvm::errs() << " " << Types.size() << " types total.\n";
969
970 unsigned counts[] = {
971#define TYPE(Name, Parent) 0,
972#define ABSTRACT_TYPE(Name, Parent)
973#include "clang/AST/TypeNodes.inc"
974 0 // Extra
975 };
976
977 for (unsigned i = 0, e = Types.size(); i != e; ++i) {
978 Type *T = Types[i];
979 counts[(unsigned)T->getTypeClass()]++;
980 }
981
982 unsigned Idx = 0;
983 unsigned TotalBytes = 0;
984#define TYPE(Name, Parent) \
985 if (counts[Idx]) \
986 llvm::errs() << " " << counts[Idx] << " " << #Name \
987 << " types, " << sizeof(Name##Type) << " each " \
988 << "(" << counts[Idx] * sizeof(Name##Type) \
989 << " bytes)\n"; \
990 TotalBytes += counts[Idx] * sizeof(Name##Type); \
991 ++Idx;
992#define ABSTRACT_TYPE(Name, Parent)
993#include "clang/AST/TypeNodes.inc"
994
995 llvm::errs() << "Total bytes = " << TotalBytes << "\n";
996
997 // Implicit special member functions.
998 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/"
999 << NumImplicitDefaultConstructors
1000 << " implicit default constructors created\n";
1001 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/"
1002 << NumImplicitCopyConstructors
1003 << " implicit copy constructors created\n";
1004 if (getLangOpts().CPlusPlus)
1005 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/"
1006 << NumImplicitMoveConstructors
1007 << " implicit move constructors created\n";
1008 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/"
1009 << NumImplicitCopyAssignmentOperators
1010 << " implicit copy assignment operators created\n";
1011 if (getLangOpts().CPlusPlus)
1012 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/"
1013 << NumImplicitMoveAssignmentOperators
1014 << " implicit move assignment operators created\n";
1015 llvm::errs() << NumImplicitDestructorsDeclared << "/"
1016 << NumImplicitDestructors
1017 << " implicit destructors created\n";
1018
1019 if (ExternalSource) {
1020 llvm::errs() << "\n";
1021 ExternalSource->PrintStats();
1022 }
1023
1024 BumpAlloc.PrintStats();
1025}
1026
1027void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M,
1028 bool NotifyListeners) {
1029 if (NotifyListeners)
1030 if (auto *Listener = getASTMutationListener();
1031 Listener && !ND->isUnconditionallyVisible())
1032 Listener->RedefinedHiddenDefinition(D: ND, M);
1033
1034 MergedDefModules[cast<NamedDecl>(Val: ND->getCanonicalDecl())].push_back(NewVal: M);
1035}
1036
1037void ASTContext::deduplicateMergedDefinitionsFor(NamedDecl *ND) {
1038 auto It = MergedDefModules.find(Val: cast<NamedDecl>(Val: ND->getCanonicalDecl()));
1039 if (It == MergedDefModules.end())
1040 return;
1041
1042 auto &Merged = It->second;
1043 llvm::DenseSet<Module*> Found;
1044 for (Module *&M : Merged)
1045 if (!Found.insert(V: M).second)
1046 M = nullptr;
1047 llvm::erase(C&: Merged, V: nullptr);
1048}
1049
1050ArrayRef<Module *>
1051ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) {
1052 auto MergedIt =
1053 MergedDefModules.find(Val: cast<NamedDecl>(Val: Def->getCanonicalDecl()));
1054 if (MergedIt == MergedDefModules.end())
1055 return {};
1056 return MergedIt->second;
1057}
1058
1059void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) {
1060 if (LazyInitializers.empty())
1061 return;
1062
1063 auto *Source = Ctx.getExternalSource();
1064 assert(Source && "lazy initializers but no external source");
1065
1066 auto LazyInits = std::move(LazyInitializers);
1067 LazyInitializers.clear();
1068
1069 for (auto ID : LazyInits)
1070 Initializers.push_back(Elt: Source->GetExternalDecl(ID));
1071
1072 assert(LazyInitializers.empty() &&
1073 "GetExternalDecl for lazy module initializer added more inits");
1074}
1075
1076void ASTContext::addModuleInitializer(Module *M, Decl *D) {
1077 // One special case: if we add a module initializer that imports another
1078 // module, and that module's only initializer is an ImportDecl, simplify.
1079 if (const auto *ID = dyn_cast<ImportDecl>(Val: D)) {
1080 auto It = ModuleInitializers.find(Val: ID->getImportedModule());
1081
1082 // Maybe the ImportDecl does nothing at all. (Common case.)
1083 if (It == ModuleInitializers.end())
1084 return;
1085
1086 // Maybe the ImportDecl only imports another ImportDecl.
1087 auto &Imported = *It->second;
1088 if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) {
1089 Imported.resolve(Ctx&: *this);
1090 auto *OnlyDecl = Imported.Initializers.front();
1091 if (isa<ImportDecl>(Val: OnlyDecl))
1092 D = OnlyDecl;
1093 }
1094 }
1095
1096 auto *&Inits = ModuleInitializers[M];
1097 if (!Inits)
1098 Inits = new (*this) PerModuleInitializers;
1099 Inits->Initializers.push_back(Elt: D);
1100}
1101
1102void ASTContext::addLazyModuleInitializers(Module *M,
1103 ArrayRef<GlobalDeclID> IDs) {
1104 auto *&Inits = ModuleInitializers[M];
1105 if (!Inits)
1106 Inits = new (*this) PerModuleInitializers;
1107 Inits->LazyInitializers.insert(I: Inits->LazyInitializers.end(),
1108 From: IDs.begin(), To: IDs.end());
1109}
1110
1111ArrayRef<Decl *> ASTContext::getModuleInitializers(Module *M) {
1112 auto It = ModuleInitializers.find(Val: M);
1113 if (It == ModuleInitializers.end())
1114 return {};
1115
1116 auto *Inits = It->second;
1117 Inits->resolve(Ctx&: *this);
1118 return Inits->Initializers;
1119}
1120
1121void ASTContext::setCurrentNamedModule(Module *M) {
1122 assert(M->isNamedModule());
1123 assert(!CurrentCXXNamedModule &&
1124 "We should set named module for ASTContext for only once");
1125 CurrentCXXNamedModule = M;
1126}
1127
1128bool ASTContext::isInSameModule(const Module *M1, const Module *M2) const {
1129 if (!M1 != !M2)
1130 return false;
1131
1132 /// Get the representative module for M. The representative module is the
1133 /// first module unit for a specific primary module name. So that the module
1134 /// units have the same representative module belongs to the same module.
1135 ///
1136 /// The process is helpful to reduce the expensive string operations.
1137 auto GetRepresentativeModule = [this](const Module *M) {
1138 auto Iter = SameModuleLookupSet.find(Val: M);
1139 if (Iter != SameModuleLookupSet.end())
1140 return Iter->second;
1141
1142 const Module *RepresentativeModule =
1143 PrimaryModuleNameMap.try_emplace(Key: M->getPrimaryModuleInterfaceName(), Args&: M)
1144 .first->second;
1145 SameModuleLookupSet[M] = RepresentativeModule;
1146 return RepresentativeModule;
1147 };
1148
1149 assert(M1 && "Shouldn't call `isInSameModule` if both M1 and M2 are none.");
1150 return GetRepresentativeModule(M1) == GetRepresentativeModule(M2);
1151}
1152
1153ExternCContextDecl *ASTContext::getExternCContextDecl() const {
1154 if (!ExternCContext)
1155 ExternCContext = ExternCContextDecl::Create(C: *this, TU: getTranslationUnitDecl());
1156
1157 return ExternCContext;
1158}
1159
1160BuiltinTemplateDecl *
1161ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK,
1162 const IdentifierInfo *II) const {
1163 auto *BuiltinTemplate =
1164 BuiltinTemplateDecl::Create(C: *this, DC: getTranslationUnitDecl(), Name: II, BTK);
1165 BuiltinTemplate->setImplicit();
1166 getTranslationUnitDecl()->addDecl(D: BuiltinTemplate);
1167
1168 return BuiltinTemplate;
1169}
1170
1171#define BuiltinTemplate(BTName) \
1172 BuiltinTemplateDecl *ASTContext::get##BTName##Decl() const { \
1173 if (!Decl##BTName) \
1174 Decl##BTName = \
1175 buildBuiltinTemplateDecl(BTK##BTName, get##BTName##Name()); \
1176 return Decl##BTName; \
1177 }
1178#include "clang/Basic/BuiltinTemplates.inc"
1179
1180RecordDecl *ASTContext::buildImplicitRecord(StringRef Name,
1181 RecordDecl::TagKind TK) const {
1182 SourceLocation Loc;
1183 RecordDecl *NewDecl;
1184 if (getLangOpts().CPlusPlus)
1185 NewDecl = CXXRecordDecl::Create(C: *this, TK, DC: getTranslationUnitDecl(), StartLoc: Loc,
1186 IdLoc: Loc, Id: &Idents.get(Name));
1187 else
1188 NewDecl = RecordDecl::Create(C: *this, TK, DC: getTranslationUnitDecl(), StartLoc: Loc, IdLoc: Loc,
1189 Id: &Idents.get(Name));
1190 NewDecl->setImplicit();
1191 NewDecl->addAttr(A: TypeVisibilityAttr::CreateImplicit(
1192 Ctx&: const_cast<ASTContext &>(*this), Visibility: TypeVisibilityAttr::Default));
1193 return NewDecl;
1194}
1195
1196TypedefDecl *ASTContext::buildImplicitTypedef(QualType T,
1197 StringRef Name) const {
1198 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T);
1199 TypedefDecl *NewDecl = TypedefDecl::Create(
1200 C&: const_cast<ASTContext &>(*this), DC: getTranslationUnitDecl(),
1201 StartLoc: SourceLocation(), IdLoc: SourceLocation(), Id: &Idents.get(Name), TInfo);
1202 NewDecl->setImplicit();
1203 return NewDecl;
1204}
1205
1206TypedefDecl *ASTContext::getInt128Decl() const {
1207 if (!Int128Decl)
1208 Int128Decl = buildImplicitTypedef(T: Int128Ty, Name: "__int128_t");
1209 return Int128Decl;
1210}
1211
1212TypedefDecl *ASTContext::getUInt128Decl() const {
1213 if (!UInt128Decl)
1214 UInt128Decl = buildImplicitTypedef(T: UnsignedInt128Ty, Name: "__uint128_t");
1215 return UInt128Decl;
1216}
1217
1218void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) {
1219 auto *Ty = new (*this, alignof(BuiltinType)) BuiltinType(K);
1220 R = CanQualType::CreateUnsafe(Other: QualType(Ty, 0));
1221 Types.push_back(Elt: Ty);
1222}
1223
1224void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
1225 const TargetInfo *AuxTarget) {
1226 assert((!this->Target || this->Target == &Target) &&
1227 "Incorrect target reinitialization");
1228 assert(VoidTy.isNull() && "Context reinitialized?");
1229
1230 this->Target = &Target;
1231 this->AuxTarget = AuxTarget;
1232
1233 ABI.reset(p: createCXXABI(T: Target));
1234 AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(TI: Target, LangOpts);
1235
1236 // C99 6.2.5p19.
1237 InitBuiltinType(R&: VoidTy, K: BuiltinType::Void);
1238
1239 // C99 6.2.5p2.
1240 InitBuiltinType(R&: BoolTy, K: BuiltinType::Bool);
1241 // C99 6.2.5p3.
1242 if (LangOpts.CharIsSigned)
1243 InitBuiltinType(R&: CharTy, K: BuiltinType::Char_S);
1244 else
1245 InitBuiltinType(R&: CharTy, K: BuiltinType::Char_U);
1246 // C99 6.2.5p4.
1247 InitBuiltinType(R&: SignedCharTy, K: BuiltinType::SChar);
1248 InitBuiltinType(R&: ShortTy, K: BuiltinType::Short);
1249 InitBuiltinType(R&: IntTy, K: BuiltinType::Int);
1250 InitBuiltinType(R&: LongTy, K: BuiltinType::Long);
1251 InitBuiltinType(R&: LongLongTy, K: BuiltinType::LongLong);
1252
1253 // C99 6.2.5p6.
1254 InitBuiltinType(R&: UnsignedCharTy, K: BuiltinType::UChar);
1255 InitBuiltinType(R&: UnsignedShortTy, K: BuiltinType::UShort);
1256 InitBuiltinType(R&: UnsignedIntTy, K: BuiltinType::UInt);
1257 InitBuiltinType(R&: UnsignedLongTy, K: BuiltinType::ULong);
1258 InitBuiltinType(R&: UnsignedLongLongTy, K: BuiltinType::ULongLong);
1259
1260 // C99 6.2.5p10.
1261 InitBuiltinType(R&: FloatTy, K: BuiltinType::Float);
1262 InitBuiltinType(R&: DoubleTy, K: BuiltinType::Double);
1263 InitBuiltinType(R&: LongDoubleTy, K: BuiltinType::LongDouble);
1264
1265 // GNU extension, __float128 for IEEE quadruple precision
1266 InitBuiltinType(R&: Float128Ty, K: BuiltinType::Float128);
1267
1268 // __ibm128 for IBM extended precision
1269 InitBuiltinType(R&: Ibm128Ty, K: BuiltinType::Ibm128);
1270
1271 // C11 extension ISO/IEC TS 18661-3
1272 InitBuiltinType(R&: Float16Ty, K: BuiltinType::Float16);
1273
1274 // ISO/IEC JTC1 SC22 WG14 N1169 Extension
1275 InitBuiltinType(R&: ShortAccumTy, K: BuiltinType::ShortAccum);
1276 InitBuiltinType(R&: AccumTy, K: BuiltinType::Accum);
1277 InitBuiltinType(R&: LongAccumTy, K: BuiltinType::LongAccum);
1278 InitBuiltinType(R&: UnsignedShortAccumTy, K: BuiltinType::UShortAccum);
1279 InitBuiltinType(R&: UnsignedAccumTy, K: BuiltinType::UAccum);
1280 InitBuiltinType(R&: UnsignedLongAccumTy, K: BuiltinType::ULongAccum);
1281 InitBuiltinType(R&: ShortFractTy, K: BuiltinType::ShortFract);
1282 InitBuiltinType(R&: FractTy, K: BuiltinType::Fract);
1283 InitBuiltinType(R&: LongFractTy, K: BuiltinType::LongFract);
1284 InitBuiltinType(R&: UnsignedShortFractTy, K: BuiltinType::UShortFract);
1285 InitBuiltinType(R&: UnsignedFractTy, K: BuiltinType::UFract);
1286 InitBuiltinType(R&: UnsignedLongFractTy, K: BuiltinType::ULongFract);
1287 InitBuiltinType(R&: SatShortAccumTy, K: BuiltinType::SatShortAccum);
1288 InitBuiltinType(R&: SatAccumTy, K: BuiltinType::SatAccum);
1289 InitBuiltinType(R&: SatLongAccumTy, K: BuiltinType::SatLongAccum);
1290 InitBuiltinType(R&: SatUnsignedShortAccumTy, K: BuiltinType::SatUShortAccum);
1291 InitBuiltinType(R&: SatUnsignedAccumTy, K: BuiltinType::SatUAccum);
1292 InitBuiltinType(R&: SatUnsignedLongAccumTy, K: BuiltinType::SatULongAccum);
1293 InitBuiltinType(R&: SatShortFractTy, K: BuiltinType::SatShortFract);
1294 InitBuiltinType(R&: SatFractTy, K: BuiltinType::SatFract);
1295 InitBuiltinType(R&: SatLongFractTy, K: BuiltinType::SatLongFract);
1296 InitBuiltinType(R&: SatUnsignedShortFractTy, K: BuiltinType::SatUShortFract);
1297 InitBuiltinType(R&: SatUnsignedFractTy, K: BuiltinType::SatUFract);
1298 InitBuiltinType(R&: SatUnsignedLongFractTy, K: BuiltinType::SatULongFract);
1299
1300 // GNU extension, 128-bit integers.
1301 InitBuiltinType(R&: Int128Ty, K: BuiltinType::Int128);
1302 InitBuiltinType(R&: UnsignedInt128Ty, K: BuiltinType::UInt128);
1303
1304 // C++ 3.9.1p5
1305 if (TargetInfo::isTypeSigned(T: Target.getWCharType()))
1306 InitBuiltinType(R&: WCharTy, K: BuiltinType::WChar_S);
1307 else // -fshort-wchar makes wchar_t be unsigned.
1308 InitBuiltinType(R&: WCharTy, K: BuiltinType::WChar_U);
1309 if (LangOpts.CPlusPlus && LangOpts.WChar)
1310 WideCharTy = WCharTy;
1311 else {
1312 // C99 (or C++ using -fno-wchar).
1313 WideCharTy = getFromTargetType(Type: Target.getWCharType());
1314 }
1315
1316 WIntTy = getFromTargetType(Type: Target.getWIntType());
1317
1318 // C++20 (proposed)
1319 InitBuiltinType(R&: Char8Ty, K: BuiltinType::Char8);
1320
1321 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1322 InitBuiltinType(R&: Char16Ty, K: BuiltinType::Char16);
1323 else // C99
1324 Char16Ty = getFromTargetType(Type: Target.getChar16Type());
1325
1326 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1327 InitBuiltinType(R&: Char32Ty, K: BuiltinType::Char32);
1328 else // C99
1329 Char32Ty = getFromTargetType(Type: Target.getChar32Type());
1330
1331 // Placeholder type for type-dependent expressions whose type is
1332 // completely unknown. No code should ever check a type against
1333 // DependentTy and users should never see it; however, it is here to
1334 // help diagnose failures to properly check for type-dependent
1335 // expressions.
1336 InitBuiltinType(R&: DependentTy, K: BuiltinType::Dependent);
1337
1338 // Placeholder type for functions.
1339 InitBuiltinType(R&: OverloadTy, K: BuiltinType::Overload);
1340
1341 // Placeholder type for bound members.
1342 InitBuiltinType(R&: BoundMemberTy, K: BuiltinType::BoundMember);
1343
1344 // Placeholder type for unresolved templates.
1345 InitBuiltinType(R&: UnresolvedTemplateTy, K: BuiltinType::UnresolvedTemplate);
1346
1347 // Placeholder type for pseudo-objects.
1348 InitBuiltinType(R&: PseudoObjectTy, K: BuiltinType::PseudoObject);
1349
1350 // "any" type; useful for debugger-like clients.
1351 InitBuiltinType(R&: UnknownAnyTy, K: BuiltinType::UnknownAny);
1352
1353 // Placeholder type for unbridged ARC casts.
1354 InitBuiltinType(R&: ARCUnbridgedCastTy, K: BuiltinType::ARCUnbridgedCast);
1355
1356 // Placeholder type for builtin functions.
1357 InitBuiltinType(R&: BuiltinFnTy, K: BuiltinType::BuiltinFn);
1358
1359 // Placeholder type for OMP array sections.
1360 if (LangOpts.OpenMP) {
1361 InitBuiltinType(R&: ArraySectionTy, K: BuiltinType::ArraySection);
1362 InitBuiltinType(R&: OMPArrayShapingTy, K: BuiltinType::OMPArrayShaping);
1363 InitBuiltinType(R&: OMPIteratorTy, K: BuiltinType::OMPIterator);
1364 }
1365 // Placeholder type for OpenACC array sections, if we are ALSO in OMP mode,
1366 // don't bother, as we're just using the same type as OMP.
1367 if (LangOpts.OpenACC && !LangOpts.OpenMP) {
1368 InitBuiltinType(R&: ArraySectionTy, K: BuiltinType::ArraySection);
1369 }
1370 if (LangOpts.MatrixTypes)
1371 InitBuiltinType(R&: IncompleteMatrixIdxTy, K: BuiltinType::IncompleteMatrixIdx);
1372
1373 // Builtin types for 'id', 'Class', and 'SEL'.
1374 InitBuiltinType(R&: ObjCBuiltinIdTy, K: BuiltinType::ObjCId);
1375 InitBuiltinType(R&: ObjCBuiltinClassTy, K: BuiltinType::ObjCClass);
1376 InitBuiltinType(R&: ObjCBuiltinSelTy, K: BuiltinType::ObjCSel);
1377
1378 if (LangOpts.OpenCL) {
1379#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
1380 InitBuiltinType(SingletonId, BuiltinType::Id);
1381#include "clang/Basic/OpenCLImageTypes.def"
1382
1383 InitBuiltinType(R&: OCLSamplerTy, K: BuiltinType::OCLSampler);
1384 InitBuiltinType(R&: OCLEventTy, K: BuiltinType::OCLEvent);
1385 InitBuiltinType(R&: OCLClkEventTy, K: BuiltinType::OCLClkEvent);
1386 InitBuiltinType(R&: OCLQueueTy, K: BuiltinType::OCLQueue);
1387 InitBuiltinType(R&: OCLReserveIDTy, K: BuiltinType::OCLReserveID);
1388
1389#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
1390 InitBuiltinType(Id##Ty, BuiltinType::Id);
1391#include "clang/Basic/OpenCLExtensionTypes.def"
1392 }
1393
1394 if (LangOpts.HLSL) {
1395#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) \
1396 InitBuiltinType(SingletonId, BuiltinType::Id);
1397#include "clang/Basic/HLSLIntangibleTypes.def"
1398 }
1399
1400 if (Target.hasAArch64ACLETypes() ||
1401 (AuxTarget && AuxTarget->hasAArch64ACLETypes())) {
1402#define SVE_TYPE(Name, Id, SingletonId) \
1403 InitBuiltinType(SingletonId, BuiltinType::Id);
1404#include "clang/Basic/AArch64ACLETypes.def"
1405 }
1406
1407 if (Target.getTriple().isPPC64()) {
1408#define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
1409 InitBuiltinType(Id##Ty, BuiltinType::Id);
1410#include "clang/Basic/PPCTypes.def"
1411#define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
1412 InitBuiltinType(Id##Ty, BuiltinType::Id);
1413#include "clang/Basic/PPCTypes.def"
1414 }
1415
1416 if (Target.hasRISCVVTypes()) {
1417#define RVV_TYPE(Name, Id, SingletonId) \
1418 InitBuiltinType(SingletonId, BuiltinType::Id);
1419#include "clang/Basic/RISCVVTypes.def"
1420 }
1421
1422 if (Target.getTriple().isWasm() && Target.hasFeature(Feature: "reference-types")) {
1423#define WASM_TYPE(Name, Id, SingletonId) \
1424 InitBuiltinType(SingletonId, BuiltinType::Id);
1425#include "clang/Basic/WebAssemblyReferenceTypes.def"
1426 }
1427
1428 if (Target.getTriple().isAMDGPU() ||
1429 (AuxTarget && AuxTarget->getTriple().isAMDGPU())) {
1430#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) \
1431 InitBuiltinType(SingletonId, BuiltinType::Id);
1432#include "clang/Basic/AMDGPUTypes.def"
1433 }
1434
1435 // Builtin type for __objc_yes and __objc_no
1436 ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ?
1437 SignedCharTy : BoolTy);
1438
1439 ObjCConstantStringType = QualType();
1440
1441 ObjCSuperType = QualType();
1442
1443 // void * type
1444 if (LangOpts.OpenCLGenericAddressSpace) {
1445 auto Q = VoidTy.getQualifiers();
1446 Q.setAddressSpace(LangAS::opencl_generic);
1447 VoidPtrTy = getPointerType(T: getCanonicalType(
1448 T: getQualifiedType(T: VoidTy.getUnqualifiedType(), Qs: Q)));
1449 } else {
1450 VoidPtrTy = getPointerType(T: VoidTy);
1451 }
1452
1453 // nullptr type (C++0x 2.14.7)
1454 InitBuiltinType(R&: NullPtrTy, K: BuiltinType::NullPtr);
1455
1456 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16
1457 InitBuiltinType(R&: HalfTy, K: BuiltinType::Half);
1458
1459 InitBuiltinType(R&: BFloat16Ty, K: BuiltinType::BFloat16);
1460
1461 // Builtin type used to help define __builtin_va_list.
1462 VaListTagDecl = nullptr;
1463
1464 // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls.
1465 if (LangOpts.MicrosoftExt || LangOpts.Borland) {
1466 MSGuidTagDecl = buildImplicitRecord(Name: "_GUID");
1467 getTranslationUnitDecl()->addDecl(D: MSGuidTagDecl);
1468 }
1469}
1470
1471DiagnosticsEngine &ASTContext::getDiagnostics() const {
1472 return SourceMgr.getDiagnostics();
1473}
1474
1475AttrVec& ASTContext::getDeclAttrs(const Decl *D) {
1476 AttrVec *&Result = DeclAttrs[D];
1477 if (!Result) {
1478 void *Mem = Allocate(Size: sizeof(AttrVec));
1479 Result = new (Mem) AttrVec;
1480 }
1481
1482 return *Result;
1483}
1484
1485/// Erase the attributes corresponding to the given declaration.
1486void ASTContext::eraseDeclAttrs(const Decl *D) {
1487 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(Val: D);
1488 if (Pos != DeclAttrs.end()) {
1489 Pos->second->~AttrVec();
1490 DeclAttrs.erase(I: Pos);
1491 }
1492}
1493
1494// FIXME: Remove ?
1495MemberSpecializationInfo *
1496ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) {
1497 assert(Var->isStaticDataMember() && "Not a static data member");
1498 return getTemplateOrSpecializationInfo(Var)
1499 .dyn_cast<MemberSpecializationInfo *>();
1500}
1501
1502ASTContext::TemplateOrSpecializationInfo
1503ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) {
1504 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos =
1505 TemplateOrInstantiation.find(Val: Var);
1506 if (Pos == TemplateOrInstantiation.end())
1507 return {};
1508
1509 return Pos->second;
1510}
1511
1512void
1513ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl,
1514 TemplateSpecializationKind TSK,
1515 SourceLocation PointOfInstantiation) {
1516 assert(Inst->isStaticDataMember() && "Not a static data member");
1517 assert(Tmpl->isStaticDataMember() && "Not a static data member");
1518 setTemplateOrSpecializationInfo(Inst, TSI: new (*this) MemberSpecializationInfo(
1519 Tmpl, TSK, PointOfInstantiation));
1520}
1521
1522void
1523ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst,
1524 TemplateOrSpecializationInfo TSI) {
1525 assert(!TemplateOrInstantiation[Inst] &&
1526 "Already noted what the variable was instantiated from");
1527 TemplateOrInstantiation[Inst] = TSI;
1528}
1529
1530NamedDecl *
1531ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) {
1532 return InstantiatedFromUsingDecl.lookup(Val: UUD);
1533}
1534
1535void
1536ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) {
1537 assert((isa<UsingDecl>(Pattern) ||
1538 isa<UnresolvedUsingValueDecl>(Pattern) ||
1539 isa<UnresolvedUsingTypenameDecl>(Pattern)) &&
1540 "pattern decl is not a using decl");
1541 assert((isa<UsingDecl>(Inst) ||
1542 isa<UnresolvedUsingValueDecl>(Inst) ||
1543 isa<UnresolvedUsingTypenameDecl>(Inst)) &&
1544 "instantiation did not produce a using decl");
1545 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists");
1546 InstantiatedFromUsingDecl[Inst] = Pattern;
1547}
1548
1549UsingEnumDecl *
1550ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) {
1551 return InstantiatedFromUsingEnumDecl.lookup(Val: UUD);
1552}
1553
1554void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst,
1555 UsingEnumDecl *Pattern) {
1556 assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists");
1557 InstantiatedFromUsingEnumDecl[Inst] = Pattern;
1558}
1559
1560UsingShadowDecl *
1561ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) {
1562 return InstantiatedFromUsingShadowDecl.lookup(Val: Inst);
1563}
1564
1565void
1566ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst,
1567 UsingShadowDecl *Pattern) {
1568 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists");
1569 InstantiatedFromUsingShadowDecl[Inst] = Pattern;
1570}
1571
1572FieldDecl *
1573ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) const {
1574 return InstantiatedFromUnnamedFieldDecl.lookup(Val: Field);
1575}
1576
1577void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst,
1578 FieldDecl *Tmpl) {
1579 assert((!Inst->getDeclName() || Inst->isPlaceholderVar(getLangOpts())) &&
1580 "Instantiated field decl is not unnamed");
1581 assert((!Inst->getDeclName() || Inst->isPlaceholderVar(getLangOpts())) &&
1582 "Template field decl is not unnamed");
1583 assert(!InstantiatedFromUnnamedFieldDecl[Inst] &&
1584 "Already noted what unnamed field was instantiated from");
1585
1586 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl;
1587}
1588
1589ASTContext::overridden_cxx_method_iterator
1590ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const {
1591 return overridden_methods(Method).begin();
1592}
1593
1594ASTContext::overridden_cxx_method_iterator
1595ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const {
1596 return overridden_methods(Method).end();
1597}
1598
1599unsigned
1600ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const {
1601 auto Range = overridden_methods(Method);
1602 return Range.end() - Range.begin();
1603}
1604
1605ASTContext::overridden_method_range
1606ASTContext::overridden_methods(const CXXMethodDecl *Method) const {
1607 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos =
1608 OverriddenMethods.find(Val: Method->getCanonicalDecl());
1609 if (Pos == OverriddenMethods.end())
1610 return overridden_method_range(nullptr, nullptr);
1611 return overridden_method_range(Pos->second.begin(), Pos->second.end());
1612}
1613
1614void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method,
1615 const CXXMethodDecl *Overridden) {
1616 assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl());
1617 OverriddenMethods[Method].push_back(NewVal: Overridden);
1618}
1619
1620void ASTContext::getOverriddenMethods(
1621 const NamedDecl *D,
1622 SmallVectorImpl<const NamedDecl *> &Overridden) const {
1623 assert(D);
1624
1625 if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(Val: D)) {
1626 Overridden.append(in_start: overridden_methods_begin(Method: CXXMethod),
1627 in_end: overridden_methods_end(Method: CXXMethod));
1628 return;
1629 }
1630
1631 const auto *Method = dyn_cast<ObjCMethodDecl>(Val: D);
1632 if (!Method)
1633 return;
1634
1635 SmallVector<const ObjCMethodDecl *, 8> OverDecls;
1636 Method->getOverriddenMethods(Overridden&: OverDecls);
1637 Overridden.append(in_start: OverDecls.begin(), in_end: OverDecls.end());
1638}
1639
1640std::optional<ASTContext::CXXRecordDeclRelocationInfo>
1641ASTContext::getRelocationInfoForCXXRecord(const CXXRecordDecl *RD) const {
1642 assert(RD);
1643 CXXRecordDecl *D = RD->getDefinition();
1644 auto it = RelocatableClasses.find(Val: D);
1645 if (it != RelocatableClasses.end())
1646 return it->getSecond();
1647 return std::nullopt;
1648}
1649
1650void ASTContext::setRelocationInfoForCXXRecord(
1651 const CXXRecordDecl *RD, CXXRecordDeclRelocationInfo Info) {
1652 assert(RD);
1653 CXXRecordDecl *D = RD->getDefinition();
1654 assert(RelocatableClasses.find(D) == RelocatableClasses.end());
1655 RelocatableClasses.insert(KV: {D, Info});
1656}
1657
1658static bool primaryBaseHaseAddressDiscriminatedVTableAuthentication(
1659 const ASTContext &Context, const CXXRecordDecl *Class) {
1660 if (!Class->isPolymorphic())
1661 return false;
1662 const CXXRecordDecl *BaseType = Context.baseForVTableAuthentication(ThisClass: Class);
1663 using AuthAttr = VTablePointerAuthenticationAttr;
1664 const AuthAttr *ExplicitAuth = BaseType->getAttr<AuthAttr>();
1665 if (!ExplicitAuth)
1666 return Context.getLangOpts().PointerAuthVTPtrAddressDiscrimination;
1667 AuthAttr::AddressDiscriminationMode AddressDiscrimination =
1668 ExplicitAuth->getAddressDiscrimination();
1669 if (AddressDiscrimination == AuthAttr::DefaultAddressDiscrimination)
1670 return Context.getLangOpts().PointerAuthVTPtrAddressDiscrimination;
1671 return AddressDiscrimination == AuthAttr::AddressDiscrimination;
1672}
1673
1674ASTContext::PointerAuthContent
1675ASTContext::findPointerAuthContent(QualType T) const {
1676 assert(isPointerAuthenticationAvailable());
1677
1678 T = T.getCanonicalType();
1679 if (T->isDependentType())
1680 return PointerAuthContent::None;
1681
1682 if (T.hasAddressDiscriminatedPointerAuth())
1683 return PointerAuthContent::AddressDiscriminatedData;
1684 const RecordDecl *RD = T->getAsRecordDecl();
1685 if (!RD)
1686 return PointerAuthContent::None;
1687
1688 if (RD->isInvalidDecl())
1689 return PointerAuthContent::None;
1690
1691 if (auto Existing = RecordContainsAddressDiscriminatedPointerAuth.find(Val: RD);
1692 Existing != RecordContainsAddressDiscriminatedPointerAuth.end())
1693 return Existing->second;
1694
1695 PointerAuthContent Result = PointerAuthContent::None;
1696
1697 auto SaveResultAndReturn = [&]() -> PointerAuthContent {
1698 auto [ResultIter, DidAdd] =
1699 RecordContainsAddressDiscriminatedPointerAuth.try_emplace(Key: RD, Args&: Result);
1700 (void)ResultIter;
1701 (void)DidAdd;
1702 assert(DidAdd);
1703 return Result;
1704 };
1705 auto ShouldContinueAfterUpdate = [&](PointerAuthContent NewResult) {
1706 static_assert(PointerAuthContent::None <
1707 PointerAuthContent::AddressDiscriminatedVTable);
1708 static_assert(PointerAuthContent::AddressDiscriminatedVTable <
1709 PointerAuthContent::AddressDiscriminatedData);
1710 if (NewResult > Result)
1711 Result = NewResult;
1712 return Result != PointerAuthContent::AddressDiscriminatedData;
1713 };
1714 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) {
1715 if (primaryBaseHaseAddressDiscriminatedVTableAuthentication(Context: *this, Class: CXXRD) &&
1716 !ShouldContinueAfterUpdate(
1717 PointerAuthContent::AddressDiscriminatedVTable))
1718 return SaveResultAndReturn();
1719 for (auto Base : CXXRD->bases()) {
1720 if (!ShouldContinueAfterUpdate(findPointerAuthContent(T: Base.getType())))
1721 return SaveResultAndReturn();
1722 }
1723 }
1724 for (auto *FieldDecl : RD->fields()) {
1725 if (!ShouldContinueAfterUpdate(
1726 findPointerAuthContent(T: FieldDecl->getType())))
1727 return SaveResultAndReturn();
1728 }
1729 return SaveResultAndReturn();
1730}
1731
1732void ASTContext::addedLocalImportDecl(ImportDecl *Import) {
1733 assert(!Import->getNextLocalImport() &&
1734 "Import declaration already in the chain");
1735 assert(!Import->isFromASTFile() && "Non-local import declaration");
1736 if (!FirstLocalImport) {
1737 FirstLocalImport = Import;
1738 LastLocalImport = Import;
1739 return;
1740 }
1741
1742 LastLocalImport->setNextLocalImport(Import);
1743 LastLocalImport = Import;
1744}
1745
1746//===----------------------------------------------------------------------===//
1747// Type Sizing and Analysis
1748//===----------------------------------------------------------------------===//
1749
1750/// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
1751/// scalar floating point type.
1752const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
1753 switch (T->castAs<BuiltinType>()->getKind()) {
1754 default:
1755 llvm_unreachable("Not a floating point type!");
1756 case BuiltinType::BFloat16:
1757 return Target->getBFloat16Format();
1758 case BuiltinType::Float16:
1759 return Target->getHalfFormat();
1760 case BuiltinType::Half:
1761 return Target->getHalfFormat();
1762 case BuiltinType::Float: return Target->getFloatFormat();
1763 case BuiltinType::Double: return Target->getDoubleFormat();
1764 case BuiltinType::Ibm128:
1765 return Target->getIbm128Format();
1766 case BuiltinType::LongDouble:
1767 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice)
1768 return AuxTarget->getLongDoubleFormat();
1769 return Target->getLongDoubleFormat();
1770 case BuiltinType::Float128:
1771 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice)
1772 return AuxTarget->getFloat128Format();
1773 return Target->getFloat128Format();
1774 }
1775}
1776
1777CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
1778 unsigned Align = Target->getCharWidth();
1779
1780 const unsigned AlignFromAttr = D->getMaxAlignment();
1781 if (AlignFromAttr)
1782 Align = AlignFromAttr;
1783
1784 // __attribute__((aligned)) can increase or decrease alignment
1785 // *except* on a struct or struct member, where it only increases
1786 // alignment unless 'packed' is also specified.
1787 //
1788 // It is an error for alignas to decrease alignment, so we can
1789 // ignore that possibility; Sema should diagnose it.
1790 bool UseAlignAttrOnly;
1791 if (const FieldDecl *FD = dyn_cast<FieldDecl>(Val: D))
1792 UseAlignAttrOnly =
1793 FD->hasAttr<PackedAttr>() || FD->getParent()->hasAttr<PackedAttr>();
1794 else
1795 UseAlignAttrOnly = AlignFromAttr != 0;
1796 // If we're using the align attribute only, just ignore everything
1797 // else about the declaration and its type.
1798 if (UseAlignAttrOnly) {
1799 // do nothing
1800 } else if (const auto *VD = dyn_cast<ValueDecl>(Val: D)) {
1801 QualType T = VD->getType();
1802 if (const auto *RT = T->getAs<ReferenceType>()) {
1803 if (ForAlignof)
1804 T = RT->getPointeeType();
1805 else
1806 T = getPointerType(T: RT->getPointeeType());
1807 }
1808 QualType BaseT = getBaseElementType(QT: T);
1809 if (T->isFunctionType())
1810 Align = getTypeInfoImpl(T: T.getTypePtr()).Align;
1811 else if (!BaseT->isIncompleteType()) {
1812 // Adjust alignments of declarations with array type by the
1813 // large-array alignment on the target.
1814 if (const ArrayType *arrayType = getAsArrayType(T)) {
1815 unsigned MinWidth = Target->getLargeArrayMinWidth();
1816 if (!ForAlignof && MinWidth) {
1817 if (isa<VariableArrayType>(Val: arrayType))
1818 Align = std::max(a: Align, b: Target->getLargeArrayAlign());
1819 else if (isa<ConstantArrayType>(Val: arrayType) &&
1820 MinWidth <= getTypeSize(T: cast<ConstantArrayType>(Val: arrayType)))
1821 Align = std::max(a: Align, b: Target->getLargeArrayAlign());
1822 }
1823 }
1824 Align = std::max(a: Align, b: getPreferredTypeAlign(T: T.getTypePtr()));
1825 if (BaseT.getQualifiers().hasUnaligned())
1826 Align = Target->getCharWidth();
1827 }
1828
1829 // Ensure minimum alignment for global variables.
1830 if (const auto *VD = dyn_cast<VarDecl>(Val: D))
1831 if (VD->hasGlobalStorage() && !ForAlignof) {
1832 uint64_t TypeSize =
1833 !BaseT->isIncompleteType() ? getTypeSize(T: T.getTypePtr()) : 0;
1834 Align = std::max(a: Align, b: getMinGlobalAlignOfVar(Size: TypeSize, VD));
1835 }
1836
1837 // Fields can be subject to extra alignment constraints, like if
1838 // the field is packed, the struct is packed, or the struct has a
1839 // a max-field-alignment constraint (#pragma pack). So calculate
1840 // the actual alignment of the field within the struct, and then
1841 // (as we're expected to) constrain that by the alignment of the type.
1842 if (const auto *Field = dyn_cast<FieldDecl>(Val: VD)) {
1843 const RecordDecl *Parent = Field->getParent();
1844 // We can only produce a sensible answer if the record is valid.
1845 if (!Parent->isInvalidDecl()) {
1846 const ASTRecordLayout &Layout = getASTRecordLayout(D: Parent);
1847
1848 // Start with the record's overall alignment.
1849 unsigned FieldAlign = toBits(CharSize: Layout.getAlignment());
1850
1851 // Use the GCD of that and the offset within the record.
1852 uint64_t Offset = Layout.getFieldOffset(FieldNo: Field->getFieldIndex());
1853 if (Offset > 0) {
1854 // Alignment is always a power of 2, so the GCD will be a power of 2,
1855 // which means we get to do this crazy thing instead of Euclid's.
1856 uint64_t LowBitOfOffset = Offset & (~Offset + 1);
1857 if (LowBitOfOffset < FieldAlign)
1858 FieldAlign = static_cast<unsigned>(LowBitOfOffset);
1859 }
1860
1861 Align = std::min(a: Align, b: FieldAlign);
1862 }
1863 }
1864 }
1865
1866 // Some targets have hard limitation on the maximum requestable alignment in
1867 // aligned attribute for static variables.
1868 const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute();
1869 const auto *VD = dyn_cast<VarDecl>(Val: D);
1870 if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static)
1871 Align = std::min(a: Align, b: MaxAlignedAttr);
1872
1873 return toCharUnitsFromBits(BitSize: Align);
1874}
1875
1876CharUnits ASTContext::getExnObjectAlignment() const {
1877 return toCharUnitsFromBits(BitSize: Target->getExnObjectAlignment());
1878}
1879
1880// getTypeInfoDataSizeInChars - Return the size of a type, in
1881// chars. If the type is a record, its data size is returned. This is
1882// the size of the memcpy that's performed when assigning this type
1883// using a trivial copy/move assignment operator.
1884TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const {
1885 TypeInfoChars Info = getTypeInfoInChars(T);
1886
1887 // In C++, objects can sometimes be allocated into the tail padding
1888 // of a base-class subobject. We decide whether that's possible
1889 // during class layout, so here we can just trust the layout results.
1890 if (getLangOpts().CPlusPlus) {
1891 if (const auto *RD = T->getAsCXXRecordDecl(); RD && !RD->isInvalidDecl()) {
1892 const ASTRecordLayout &layout = getASTRecordLayout(D: RD);
1893 Info.Width = layout.getDataSize();
1894 }
1895 }
1896
1897 return Info;
1898}
1899
1900/// getConstantArrayInfoInChars - Performing the computation in CharUnits
1901/// instead of in bits prevents overflowing the uint64_t for some large arrays.
1902TypeInfoChars
1903static getConstantArrayInfoInChars(const ASTContext &Context,
1904 const ConstantArrayType *CAT) {
1905 TypeInfoChars EltInfo = Context.getTypeInfoInChars(T: CAT->getElementType());
1906 uint64_t Size = CAT->getZExtSize();
1907 assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <=
1908 (uint64_t)(-1)/Size) &&
1909 "Overflow in array type char size evaluation");
1910 uint64_t Width = EltInfo.Width.getQuantity() * Size;
1911 unsigned Align = EltInfo.Align.getQuantity();
1912 if (!Context.getTargetInfo().getCXXABI().isMicrosoft() ||
1913 Context.getTargetInfo().getPointerWidth(AddrSpace: LangAS::Default) == 64)
1914 Width = llvm::alignTo(Value: Width, Align);
1915 return TypeInfoChars(CharUnits::fromQuantity(Quantity: Width),
1916 CharUnits::fromQuantity(Quantity: Align),
1917 EltInfo.AlignRequirement);
1918}
1919
1920TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const {
1921 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: T))
1922 return getConstantArrayInfoInChars(Context: *this, CAT);
1923 TypeInfo Info = getTypeInfo(T);
1924 return TypeInfoChars(toCharUnitsFromBits(BitSize: Info.Width),
1925 toCharUnitsFromBits(BitSize: Info.Align), Info.AlignRequirement);
1926}
1927
1928TypeInfoChars ASTContext::getTypeInfoInChars(QualType T) const {
1929 return getTypeInfoInChars(T: T.getTypePtr());
1930}
1931
1932bool ASTContext::isPromotableIntegerType(QualType T) const {
1933 // HLSL doesn't promote all small integer types to int, it
1934 // just uses the rank-based promotion rules for all types.
1935 if (getLangOpts().HLSL)
1936 return false;
1937
1938 if (const auto *BT = T->getAs<BuiltinType>())
1939 switch (BT->getKind()) {
1940 case BuiltinType::Bool:
1941 case BuiltinType::Char_S:
1942 case BuiltinType::Char_U:
1943 case BuiltinType::SChar:
1944 case BuiltinType::UChar:
1945 case BuiltinType::Short:
1946 case BuiltinType::UShort:
1947 case BuiltinType::WChar_S:
1948 case BuiltinType::WChar_U:
1949 case BuiltinType::Char8:
1950 case BuiltinType::Char16:
1951 case BuiltinType::Char32:
1952 return true;
1953 default:
1954 return false;
1955 }
1956
1957 // Enumerated types are promotable to their compatible integer types
1958 // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2).
1959 if (const auto *ED = T->getAsEnumDecl()) {
1960 if (T->isDependentType() || ED->getPromotionType().isNull() ||
1961 ED->isScoped())
1962 return false;
1963
1964 return true;
1965 }
1966
1967 // OverflowBehaviorTypes are promotable if their underlying type is promotable
1968 if (const auto *OBT = T->getAs<OverflowBehaviorType>()) {
1969 return isPromotableIntegerType(T: OBT->getUnderlyingType());
1970 }
1971
1972 return false;
1973}
1974
1975bool ASTContext::isAlignmentRequired(const Type *T) const {
1976 return getTypeInfo(T).AlignRequirement != AlignRequirementKind::None;
1977}
1978
1979bool ASTContext::isAlignmentRequired(QualType T) const {
1980 return isAlignmentRequired(T: T.getTypePtr());
1981}
1982
1983unsigned ASTContext::getTypeAlignIfKnown(QualType T,
1984 bool NeedsPreferredAlignment) const {
1985 // An alignment on a typedef overrides anything else.
1986 if (const auto *TT = T->getAs<TypedefType>())
1987 if (unsigned Align = TT->getDecl()->getMaxAlignment())
1988 return Align;
1989
1990 // If we have an (array of) complete type, we're done.
1991 T = getBaseElementType(QT: T);
1992 if (!T->isIncompleteType())
1993 return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T);
1994
1995 // If we had an array type, its element type might be a typedef
1996 // type with an alignment attribute.
1997 if (const auto *TT = T->getAs<TypedefType>())
1998 if (unsigned Align = TT->getDecl()->getMaxAlignment())
1999 return Align;
2000
2001 // Otherwise, see if the declaration of the type had an attribute.
2002 if (const auto *TD = T->getAsTagDecl())
2003 return TD->getMaxAlignment();
2004
2005 return 0;
2006}
2007
2008TypeInfo ASTContext::getTypeInfo(const Type *T) const {
2009 TypeInfoMap::iterator I = MemoizedTypeInfo.find(Val: T);
2010 if (I != MemoizedTypeInfo.end())
2011 return I->second;
2012
2013 // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup.
2014 TypeInfo TI = getTypeInfoImpl(T);
2015 MemoizedTypeInfo[T] = TI;
2016 return TI;
2017}
2018
2019/// getTypeInfoImpl - Return the size of the specified type, in bits. This
2020/// method does not work on incomplete types.
2021///
2022/// FIXME: Pointers into different addr spaces could have different sizes and
2023/// alignment requirements: getPointerInfo should take an AddrSpace, this
2024/// should take a QualType, &c.
2025TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
2026 uint64_t Width = 0;
2027 unsigned Align = 8;
2028 AlignRequirementKind AlignRequirement = AlignRequirementKind::None;
2029 LangAS AS = LangAS::Default;
2030 switch (T->getTypeClass()) {
2031#define TYPE(Class, Base)
2032#define ABSTRACT_TYPE(Class, Base)
2033#define NON_CANONICAL_TYPE(Class, Base)
2034#define DEPENDENT_TYPE(Class, Base) case Type::Class:
2035#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \
2036 case Type::Class: \
2037 assert(!T->isDependentType() && "should not see dependent types here"); \
2038 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr());
2039#include "clang/AST/TypeNodes.inc"
2040 llvm_unreachable("Should not see dependent types");
2041
2042 case Type::FunctionNoProto:
2043 case Type::FunctionProto:
2044 // GCC extension: alignof(function) = 32 bits
2045 Width = 0;
2046 Align = 32;
2047 break;
2048
2049 case Type::IncompleteArray:
2050 case Type::VariableArray:
2051 case Type::ConstantArray:
2052 case Type::ArrayParameter: {
2053 // Model non-constant sized arrays as size zero, but track the alignment.
2054 uint64_t Size = 0;
2055 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: T))
2056 Size = CAT->getZExtSize();
2057
2058 TypeInfo EltInfo = getTypeInfo(T: cast<ArrayType>(Val: T)->getElementType());
2059 assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&
2060 "Overflow in array type bit size evaluation");
2061 Width = EltInfo.Width * Size;
2062 Align = EltInfo.Align;
2063 AlignRequirement = EltInfo.AlignRequirement;
2064 if (!getTargetInfo().getCXXABI().isMicrosoft() ||
2065 getTargetInfo().getPointerWidth(AddrSpace: LangAS::Default) == 64)
2066 Width = llvm::alignTo(Value: Width, Align);
2067 break;
2068 }
2069
2070 case Type::ExtVector:
2071 case Type::Vector: {
2072 const auto *VT = cast<VectorType>(Val: T);
2073 TypeInfo EltInfo = getTypeInfo(T: VT->getElementType());
2074 Width = VT->isPackedVectorBoolType(ctx: *this)
2075 ? VT->getNumElements()
2076 : EltInfo.Width * VT->getNumElements();
2077 // Enforce at least byte size and alignment.
2078 Width = std::max<unsigned>(a: 8, b: Width);
2079 Align = std::max<unsigned>(
2080 a: 8, b: Target->vectorsAreElementAligned() ? EltInfo.Width : Width);
2081
2082 // If the alignment is not a power of 2, round up to the next power of 2.
2083 // This happens for non-power-of-2 length vectors.
2084 if (Align & (Align-1)) {
2085 Align = llvm::bit_ceil(Value: Align);
2086 Width = llvm::alignTo(Value: Width, Align);
2087 }
2088 // Adjust the alignment based on the target max.
2089 uint64_t TargetVectorAlign = Target->getMaxVectorAlign();
2090 if (TargetVectorAlign && TargetVectorAlign < Align)
2091 Align = TargetVectorAlign;
2092 if (VT->getVectorKind() == VectorKind::SveFixedLengthData)
2093 // Adjust the alignment for fixed-length SVE vectors. This is important
2094 // for non-power-of-2 vector lengths.
2095 Align = 128;
2096 else if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
2097 // Adjust the alignment for fixed-length SVE predicates.
2098 Align = 16;
2099 else if (VT->getVectorKind() == VectorKind::RVVFixedLengthData ||
2100 VT->getVectorKind() == VectorKind::RVVFixedLengthMask ||
2101 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_1 ||
2102 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_2 ||
2103 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_4)
2104 // Adjust the alignment for fixed-length RVV vectors.
2105 Align = std::min<unsigned>(a: 64, b: Width);
2106 break;
2107 }
2108
2109 case Type::ConstantMatrix: {
2110 const auto *MT = cast<ConstantMatrixType>(Val: T);
2111 TypeInfo ElementInfo = getTypeInfo(T: MT->getElementType());
2112 // The internal layout of a matrix value is implementation defined.
2113 // Initially be ABI compatible with arrays with respect to alignment and
2114 // size.
2115 Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns();
2116 Align = ElementInfo.Align;
2117 break;
2118 }
2119
2120 case Type::Builtin:
2121 switch (cast<BuiltinType>(Val: T)->getKind()) {
2122 default: llvm_unreachable("Unknown builtin type!");
2123 case BuiltinType::Void:
2124 // GCC extension: alignof(void) = 8 bits.
2125 Width = 0;
2126 Align = 8;
2127 break;
2128 case BuiltinType::Bool:
2129 Width = Target->getBoolWidth();
2130 Align = Target->getBoolAlign();
2131 break;
2132 case BuiltinType::Char_S:
2133 case BuiltinType::Char_U:
2134 case BuiltinType::UChar:
2135 case BuiltinType::SChar:
2136 case BuiltinType::Char8:
2137 Width = Target->getCharWidth();
2138 Align = Target->getCharAlign();
2139 break;
2140 case BuiltinType::WChar_S:
2141 case BuiltinType::WChar_U:
2142 Width = Target->getWCharWidth();
2143 Align = Target->getWCharAlign();
2144 break;
2145 case BuiltinType::Char16:
2146 Width = Target->getChar16Width();
2147 Align = Target->getChar16Align();
2148 break;
2149 case BuiltinType::Char32:
2150 Width = Target->getChar32Width();
2151 Align = Target->getChar32Align();
2152 break;
2153 case BuiltinType::UShort:
2154 case BuiltinType::Short:
2155 Width = Target->getShortWidth();
2156 Align = Target->getShortAlign();
2157 break;
2158 case BuiltinType::UInt:
2159 case BuiltinType::Int:
2160 Width = Target->getIntWidth();
2161 Align = Target->getIntAlign();
2162 break;
2163 case BuiltinType::ULong:
2164 case BuiltinType::Long:
2165 Width = Target->getLongWidth();
2166 Align = Target->getLongAlign();
2167 break;
2168 case BuiltinType::ULongLong:
2169 case BuiltinType::LongLong:
2170 Width = Target->getLongLongWidth();
2171 Align = Target->getLongLongAlign();
2172 break;
2173 case BuiltinType::Int128:
2174 case BuiltinType::UInt128:
2175 Width = 128;
2176 Align = Target->getInt128Align();
2177 break;
2178 case BuiltinType::ShortAccum:
2179 case BuiltinType::UShortAccum:
2180 case BuiltinType::SatShortAccum:
2181 case BuiltinType::SatUShortAccum:
2182 Width = Target->getShortAccumWidth();
2183 Align = Target->getShortAccumAlign();
2184 break;
2185 case BuiltinType::Accum:
2186 case BuiltinType::UAccum:
2187 case BuiltinType::SatAccum:
2188 case BuiltinType::SatUAccum:
2189 Width = Target->getAccumWidth();
2190 Align = Target->getAccumAlign();
2191 break;
2192 case BuiltinType::LongAccum:
2193 case BuiltinType::ULongAccum:
2194 case BuiltinType::SatLongAccum:
2195 case BuiltinType::SatULongAccum:
2196 Width = Target->getLongAccumWidth();
2197 Align = Target->getLongAccumAlign();
2198 break;
2199 case BuiltinType::ShortFract:
2200 case BuiltinType::UShortFract:
2201 case BuiltinType::SatShortFract:
2202 case BuiltinType::SatUShortFract:
2203 Width = Target->getShortFractWidth();
2204 Align = Target->getShortFractAlign();
2205 break;
2206 case BuiltinType::Fract:
2207 case BuiltinType::UFract:
2208 case BuiltinType::SatFract:
2209 case BuiltinType::SatUFract:
2210 Width = Target->getFractWidth();
2211 Align = Target->getFractAlign();
2212 break;
2213 case BuiltinType::LongFract:
2214 case BuiltinType::ULongFract:
2215 case BuiltinType::SatLongFract:
2216 case BuiltinType::SatULongFract:
2217 Width = Target->getLongFractWidth();
2218 Align = Target->getLongFractAlign();
2219 break;
2220 case BuiltinType::BFloat16:
2221 if (Target->hasBFloat16Type()) {
2222 Width = Target->getBFloat16Width();
2223 Align = Target->getBFloat16Align();
2224 } else if ((getLangOpts().SYCLIsDevice ||
2225 (getLangOpts().OpenMP &&
2226 getLangOpts().OpenMPIsTargetDevice)) &&
2227 AuxTarget->hasBFloat16Type()) {
2228 Width = AuxTarget->getBFloat16Width();
2229 Align = AuxTarget->getBFloat16Align();
2230 }
2231 break;
2232 case BuiltinType::Float16:
2233 case BuiltinType::Half:
2234 if (Target->hasFloat16Type() || !getLangOpts().OpenMP ||
2235 !getLangOpts().OpenMPIsTargetDevice) {
2236 Width = Target->getHalfWidth();
2237 Align = Target->getHalfAlign();
2238 } else {
2239 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2240 "Expected OpenMP device compilation.");
2241 Width = AuxTarget->getHalfWidth();
2242 Align = AuxTarget->getHalfAlign();
2243 }
2244 break;
2245 case BuiltinType::Float:
2246 Width = Target->getFloatWidth();
2247 Align = Target->getFloatAlign();
2248 break;
2249 case BuiltinType::Double:
2250 Width = Target->getDoubleWidth();
2251 Align = Target->getDoubleAlign();
2252 break;
2253 case BuiltinType::Ibm128:
2254 Width = Target->getIbm128Width();
2255 Align = Target->getIbm128Align();
2256 break;
2257 case BuiltinType::LongDouble:
2258 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2259 (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() ||
2260 Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) {
2261 Width = AuxTarget->getLongDoubleWidth();
2262 Align = AuxTarget->getLongDoubleAlign();
2263 } else {
2264 Width = Target->getLongDoubleWidth();
2265 Align = Target->getLongDoubleAlign();
2266 }
2267 break;
2268 case BuiltinType::Float128:
2269 if (Target->hasFloat128Type() || !getLangOpts().OpenMP ||
2270 !getLangOpts().OpenMPIsTargetDevice) {
2271 Width = Target->getFloat128Width();
2272 Align = Target->getFloat128Align();
2273 } else {
2274 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2275 "Expected OpenMP device compilation.");
2276 Width = AuxTarget->getFloat128Width();
2277 Align = AuxTarget->getFloat128Align();
2278 }
2279 break;
2280 case BuiltinType::NullPtr:
2281 // C++ 3.9.1p11: sizeof(nullptr_t) == sizeof(void*)
2282 Width = Target->getPointerWidth(AddrSpace: LangAS::Default);
2283 Align = Target->getPointerAlign(AddrSpace: LangAS::Default);
2284 break;
2285 case BuiltinType::ObjCId:
2286 case BuiltinType::ObjCClass:
2287 case BuiltinType::ObjCSel:
2288 Width = Target->getPointerWidth(AddrSpace: LangAS::Default);
2289 Align = Target->getPointerAlign(AddrSpace: LangAS::Default);
2290 break;
2291 case BuiltinType::OCLSampler:
2292 case BuiltinType::OCLEvent:
2293 case BuiltinType::OCLClkEvent:
2294 case BuiltinType::OCLQueue:
2295 case BuiltinType::OCLReserveID:
2296#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
2297 case BuiltinType::Id:
2298#include "clang/Basic/OpenCLImageTypes.def"
2299#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
2300 case BuiltinType::Id:
2301#include "clang/Basic/OpenCLExtensionTypes.def"
2302 AS = Target->getOpenCLTypeAddrSpace(TK: getOpenCLTypeKind(T));
2303 Width = Target->getPointerWidth(AddrSpace: AS);
2304 Align = Target->getPointerAlign(AddrSpace: AS);
2305 break;
2306 // The SVE types are effectively target-specific. The length of an
2307 // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple
2308 // of 128 bits. There is one predicate bit for each vector byte, so the
2309 // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits.
2310 //
2311 // Because the length is only known at runtime, we use a dummy value
2312 // of 0 for the static length. The alignment values are those defined
2313 // by the Procedure Call Standard for the Arm Architecture.
2314#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId) \
2315 case BuiltinType::Id: \
2316 Width = 0; \
2317 Align = 128; \
2318 break;
2319#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId) \
2320 case BuiltinType::Id: \
2321 Width = 0; \
2322 Align = 16; \
2323 break;
2324#define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingletonId) \
2325 case BuiltinType::Id: \
2326 Width = 0; \
2327 Align = 16; \
2328 break;
2329#define SVE_SCALAR_TYPE(Name, MangledName, Id, SingletonId, Bits) \
2330 case BuiltinType::Id: \
2331 Width = Bits; \
2332 Align = Bits; \
2333 break;
2334#include "clang/Basic/AArch64ACLETypes.def"
2335#define PPC_VECTOR_TYPE(Name, Id, Size) \
2336 case BuiltinType::Id: \
2337 Width = Size; \
2338 Align = Size; \
2339 break;
2340#include "clang/Basic/PPCTypes.def"
2341#define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \
2342 IsFP, IsBF) \
2343 case BuiltinType::Id: \
2344 Width = 0; \
2345 Align = ElBits; \
2346 break;
2347#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \
2348 case BuiltinType::Id: \
2349 Width = 0; \
2350 Align = 8; \
2351 break;
2352#include "clang/Basic/RISCVVTypes.def"
2353#define WASM_TYPE(Name, Id, SingletonId) \
2354 case BuiltinType::Id: \
2355 Width = 0; \
2356 Align = 8; \
2357 break;
2358#include "clang/Basic/WebAssemblyReferenceTypes.def"
2359#define AMDGPU_TYPE(NAME, ID, SINGLETONID, WIDTH, ALIGN) \
2360 case BuiltinType::ID: \
2361 Width = WIDTH; \
2362 Align = ALIGN; \
2363 break;
2364#include "clang/Basic/AMDGPUTypes.def"
2365#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
2366#include "clang/Basic/HLSLIntangibleTypes.def"
2367 Width = Target->getPointerWidth(AddrSpace: LangAS::Default);
2368 Align = Target->getPointerAlign(AddrSpace: LangAS::Default);
2369 break;
2370 }
2371 break;
2372 case Type::ObjCObjectPointer:
2373 Width = Target->getPointerWidth(AddrSpace: LangAS::Default);
2374 Align = Target->getPointerAlign(AddrSpace: LangAS::Default);
2375 break;
2376 case Type::BlockPointer:
2377 AS = cast<BlockPointerType>(Val: T)->getPointeeType().getAddressSpace();
2378 Width = Target->getPointerWidth(AddrSpace: AS);
2379 Align = Target->getPointerAlign(AddrSpace: AS);
2380 break;
2381 case Type::LValueReference:
2382 case Type::RValueReference:
2383 // alignof and sizeof should never enter this code path here, so we go
2384 // the pointer route.
2385 AS = cast<ReferenceType>(Val: T)->getPointeeType().getAddressSpace();
2386 Width = Target->getPointerWidth(AddrSpace: AS);
2387 Align = Target->getPointerAlign(AddrSpace: AS);
2388 break;
2389 case Type::Pointer:
2390 AS = cast<PointerType>(Val: T)->getPointeeType().getAddressSpace();
2391 Width = Target->getPointerWidth(AddrSpace: AS);
2392 Align = Target->getPointerAlign(AddrSpace: AS);
2393 break;
2394 case Type::MemberPointer: {
2395 const auto *MPT = cast<MemberPointerType>(Val: T);
2396 CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT);
2397 Width = MPI.Width;
2398 Align = MPI.Align;
2399 break;
2400 }
2401 case Type::Complex: {
2402 // Complex types have the same alignment as their elements, but twice the
2403 // size.
2404 TypeInfo EltInfo = getTypeInfo(T: cast<ComplexType>(Val: T)->getElementType());
2405 Width = EltInfo.Width * 2;
2406 Align = EltInfo.Align;
2407 break;
2408 }
2409 case Type::ObjCObject:
2410 return getTypeInfo(T: cast<ObjCObjectType>(Val: T)->getBaseType().getTypePtr());
2411 case Type::Adjusted:
2412 case Type::Decayed:
2413 return getTypeInfo(T: cast<AdjustedType>(Val: T)->getAdjustedType().getTypePtr());
2414 case Type::ObjCInterface: {
2415 const auto *ObjCI = cast<ObjCInterfaceType>(Val: T);
2416 if (ObjCI->getDecl()->isInvalidDecl()) {
2417 Width = 8;
2418 Align = 8;
2419 break;
2420 }
2421 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(D: ObjCI->getDecl());
2422 Width = toBits(CharSize: Layout.getSize());
2423 Align = toBits(CharSize: Layout.getAlignment());
2424 break;
2425 }
2426 case Type::BitInt: {
2427 const auto *EIT = cast<BitIntType>(Val: T);
2428 Align = Target->getBitIntAlign(NumBits: EIT->getNumBits());
2429 Width = Target->getBitIntWidth(NumBits: EIT->getNumBits());
2430 break;
2431 }
2432 case Type::Record:
2433 case Type::Enum: {
2434 const auto *TT = cast<TagType>(Val: T);
2435 const TagDecl *TD = TT->getDecl()->getDefinitionOrSelf();
2436
2437 if (TD->isInvalidDecl()) {
2438 Width = 8;
2439 Align = 8;
2440 break;
2441 }
2442
2443 if (isa<EnumType>(Val: TT)) {
2444 const EnumDecl *ED = cast<EnumDecl>(Val: TD);
2445 TypeInfo Info =
2446 getTypeInfo(T: ED->getIntegerType()->getUnqualifiedDesugaredType());
2447 if (unsigned AttrAlign = ED->getMaxAlignment()) {
2448 Info.Align = AttrAlign;
2449 Info.AlignRequirement = AlignRequirementKind::RequiredByEnum;
2450 }
2451 return Info;
2452 }
2453
2454 const auto *RD = cast<RecordDecl>(Val: TD);
2455 const ASTRecordLayout &Layout = getASTRecordLayout(D: RD);
2456 Width = toBits(CharSize: Layout.getSize());
2457 Align = toBits(CharSize: Layout.getAlignment());
2458 AlignRequirement = RD->hasAttr<AlignedAttr>()
2459 ? AlignRequirementKind::RequiredByRecord
2460 : AlignRequirementKind::None;
2461 break;
2462 }
2463
2464 case Type::SubstTemplateTypeParm:
2465 return getTypeInfo(T: cast<SubstTemplateTypeParmType>(Val: T)->
2466 getReplacementType().getTypePtr());
2467
2468 case Type::Auto:
2469 case Type::DeducedTemplateSpecialization: {
2470 const auto *A = cast<DeducedType>(Val: T);
2471 assert(!A->getDeducedType().isNull() &&
2472 "cannot request the size of an undeduced or dependent auto type");
2473 return getTypeInfo(T: A->getDeducedType().getTypePtr());
2474 }
2475
2476 case Type::Paren:
2477 return getTypeInfo(T: cast<ParenType>(Val: T)->getInnerType().getTypePtr());
2478
2479 case Type::MacroQualified:
2480 return getTypeInfo(
2481 T: cast<MacroQualifiedType>(Val: T)->getUnderlyingType().getTypePtr());
2482
2483 case Type::ObjCTypeParam:
2484 return getTypeInfo(T: cast<ObjCTypeParamType>(Val: T)->desugar().getTypePtr());
2485
2486 case Type::Using:
2487 return getTypeInfo(T: cast<UsingType>(Val: T)->desugar().getTypePtr());
2488
2489 case Type::Typedef: {
2490 const auto *TT = cast<TypedefType>(Val: T);
2491 TypeInfo Info = getTypeInfo(T: TT->desugar().getTypePtr());
2492 // If the typedef has an aligned attribute on it, it overrides any computed
2493 // alignment we have. This violates the GCC documentation (which says that
2494 // attribute(aligned) can only round up) but matches its implementation.
2495 if (unsigned AttrAlign = TT->getDecl()->getMaxAlignment()) {
2496 Align = AttrAlign;
2497 AlignRequirement = AlignRequirementKind::RequiredByTypedef;
2498 } else {
2499 Align = Info.Align;
2500 AlignRequirement = Info.AlignRequirement;
2501 }
2502 Width = Info.Width;
2503 break;
2504 }
2505
2506 case Type::Attributed:
2507 return getTypeInfo(
2508 T: cast<AttributedType>(Val: T)->getEquivalentType().getTypePtr());
2509
2510 case Type::CountAttributed:
2511 return getTypeInfo(T: cast<CountAttributedType>(Val: T)->desugar().getTypePtr());
2512
2513 case Type::BTFTagAttributed:
2514 return getTypeInfo(
2515 T: cast<BTFTagAttributedType>(Val: T)->getWrappedType().getTypePtr());
2516
2517 case Type::OverflowBehavior:
2518 return getTypeInfo(
2519 T: cast<OverflowBehaviorType>(Val: T)->getUnderlyingType().getTypePtr());
2520
2521 case Type::HLSLAttributedResource:
2522 return getTypeInfo(
2523 T: cast<HLSLAttributedResourceType>(Val: T)->getWrappedType().getTypePtr());
2524
2525 case Type::HLSLInlineSpirv: {
2526 const auto *ST = cast<HLSLInlineSpirvType>(Val: T);
2527 // Size is specified in bytes, convert to bits
2528 Width = ST->getSize() * 8;
2529 Align = ST->getAlignment();
2530 if (Width == 0 && Align == 0) {
2531 // We are defaulting to laying out opaque SPIR-V types as 32-bit ints.
2532 Width = 32;
2533 Align = 32;
2534 }
2535 break;
2536 }
2537
2538 case Type::Atomic: {
2539 // Start with the base type information.
2540 TypeInfo Info = getTypeInfo(T: cast<AtomicType>(Val: T)->getValueType());
2541 Width = Info.Width;
2542 Align = Info.Align;
2543
2544 if (!Width) {
2545 // An otherwise zero-sized type should still generate an
2546 // atomic operation.
2547 Width = Target->getCharWidth();
2548 assert(Align);
2549 } else if (Width <= Target->getMaxAtomicPromoteWidth()) {
2550 // If the size of the type doesn't exceed the platform's max
2551 // atomic promotion width, make the size and alignment more
2552 // favorable to atomic operations:
2553
2554 // Round the size up to a power of 2.
2555 Width = llvm::bit_ceil(Value: Width);
2556
2557 // Set the alignment equal to the size.
2558 Align = static_cast<unsigned>(Width);
2559 }
2560 }
2561 break;
2562
2563 case Type::PredefinedSugar:
2564 return getTypeInfo(T: cast<PredefinedSugarType>(Val: T)->desugar().getTypePtr());
2565
2566 case Type::Pipe:
2567 Width = Target->getPointerWidth(AddrSpace: LangAS::opencl_global);
2568 Align = Target->getPointerAlign(AddrSpace: LangAS::opencl_global);
2569 break;
2570 }
2571
2572 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2");
2573 return TypeInfo(Width, Align, AlignRequirement);
2574}
2575
2576unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const {
2577 UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(Val: T);
2578 if (I != MemoizedUnadjustedAlign.end())
2579 return I->second;
2580
2581 unsigned UnadjustedAlign;
2582 if (const auto *RT = T->getAsCanonical<RecordType>()) {
2583 const ASTRecordLayout &Layout = getASTRecordLayout(D: RT->getDecl());
2584 UnadjustedAlign = toBits(CharSize: Layout.getUnadjustedAlignment());
2585 } else if (const auto *ObjCI = T->getAsCanonical<ObjCInterfaceType>()) {
2586 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(D: ObjCI->getDecl());
2587 UnadjustedAlign = toBits(CharSize: Layout.getUnadjustedAlignment());
2588 } else {
2589 UnadjustedAlign = getTypeAlign(T: T->getUnqualifiedDesugaredType());
2590 }
2591
2592 MemoizedUnadjustedAlign[T] = UnadjustedAlign;
2593 return UnadjustedAlign;
2594}
2595
2596unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const {
2597 unsigned SimdAlign = llvm::OpenMPIRBuilder::getOpenMPDefaultSimdAlign(
2598 TargetTriple: getTargetInfo().getTriple(), Features: Target->getTargetOpts().FeatureMap);
2599 return SimdAlign;
2600}
2601
2602/// toCharUnitsFromBits - Convert a size in bits to a size in characters.
2603CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const {
2604 return CharUnits::fromQuantity(Quantity: BitSize / getCharWidth());
2605}
2606
2607/// toBits - Convert a size in characters to a size in characters.
2608int64_t ASTContext::toBits(CharUnits CharSize) const {
2609 return CharSize.getQuantity() * getCharWidth();
2610}
2611
2612/// getTypeSizeInChars - Return the size of the specified type, in characters.
2613/// This method does not work on incomplete types.
2614CharUnits ASTContext::getTypeSizeInChars(QualType T) const {
2615 return getTypeInfoInChars(T).Width;
2616}
2617CharUnits ASTContext::getTypeSizeInChars(const Type *T) const {
2618 return getTypeInfoInChars(T).Width;
2619}
2620
2621/// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
2622/// characters. This method does not work on incomplete types.
2623CharUnits ASTContext::getTypeAlignInChars(QualType T) const {
2624 return toCharUnitsFromBits(BitSize: getTypeAlign(T));
2625}
2626CharUnits ASTContext::getTypeAlignInChars(const Type *T) const {
2627 return toCharUnitsFromBits(BitSize: getTypeAlign(T));
2628}
2629
2630/// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a
2631/// type, in characters, before alignment adjustments. This method does
2632/// not work on incomplete types.
2633CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const {
2634 return toCharUnitsFromBits(BitSize: getTypeUnadjustedAlign(T));
2635}
2636CharUnits ASTContext::getTypeUnadjustedAlignInChars(const Type *T) const {
2637 return toCharUnitsFromBits(BitSize: getTypeUnadjustedAlign(T));
2638}
2639
2640/// getPreferredTypeAlign - Return the "preferred" alignment of the specified
2641/// type for the current target in bits. This can be different than the ABI
2642/// alignment in cases where it is beneficial for performance or backwards
2643/// compatibility preserving to overalign a data type. (Note: despite the name,
2644/// the preferred alignment is ABI-impacting, and not an optimization.)
2645unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
2646 TypeInfo TI = getTypeInfo(T);
2647 unsigned ABIAlign = TI.Align;
2648
2649 T = T->getBaseElementTypeUnsafe();
2650
2651 // The preferred alignment of member pointers is that of a pointer.
2652 if (T->isMemberPointerType())
2653 return getPreferredTypeAlign(T: getPointerDiffType().getTypePtr());
2654
2655 if (!Target->allowsLargerPreferedTypeAlignment())
2656 return ABIAlign;
2657
2658 if (const auto *RD = T->getAsRecordDecl()) {
2659 // When used as part of a typedef, or together with a 'packed' attribute,
2660 // the 'aligned' attribute can be used to decrease alignment. Note that the
2661 // 'packed' case is already taken into consideration when computing the
2662 // alignment, we only need to handle the typedef case here.
2663 if (TI.AlignRequirement == AlignRequirementKind::RequiredByTypedef ||
2664 RD->isInvalidDecl())
2665 return ABIAlign;
2666
2667 unsigned PreferredAlign = static_cast<unsigned>(
2668 toBits(CharSize: getASTRecordLayout(D: RD).PreferredAlignment));
2669 assert(PreferredAlign >= ABIAlign &&
2670 "PreferredAlign should be at least as large as ABIAlign.");
2671 return PreferredAlign;
2672 }
2673
2674 // Double (and, for targets supporting AIX `power` alignment, long double) and
2675 // long long should be naturally aligned (despite requiring less alignment) if
2676 // possible.
2677 if (const auto *CT = T->getAs<ComplexType>())
2678 T = CT->getElementType().getTypePtr();
2679 if (const auto *ED = T->getAsEnumDecl())
2680 T = ED->getIntegerType().getTypePtr();
2681 if (T->isSpecificBuiltinType(K: BuiltinType::Double) ||
2682 T->isSpecificBuiltinType(K: BuiltinType::LongLong) ||
2683 T->isSpecificBuiltinType(K: BuiltinType::ULongLong) ||
2684 (T->isSpecificBuiltinType(K: BuiltinType::LongDouble) &&
2685 Target->defaultsToAIXPowerAlignment()))
2686 // Don't increase the alignment if an alignment attribute was specified on a
2687 // typedef declaration.
2688 if (!TI.isAlignRequired())
2689 return std::max(a: ABIAlign, b: (unsigned)getTypeSize(T));
2690
2691 return ABIAlign;
2692}
2693
2694/// getTargetDefaultAlignForAttributeAligned - Return the default alignment
2695/// for __attribute__((aligned)) on this target, to be used if no alignment
2696/// value is specified.
2697unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const {
2698 return getTargetInfo().getDefaultAlignForAttributeAligned();
2699}
2700
2701/// getAlignOfGlobalVar - Return the alignment in bits that should be given
2702/// to a global variable of the specified type.
2703unsigned ASTContext::getAlignOfGlobalVar(QualType T, const VarDecl *VD) const {
2704 uint64_t TypeSize = getTypeSize(T: T.getTypePtr());
2705 return std::max(a: getPreferredTypeAlign(T),
2706 b: getMinGlobalAlignOfVar(Size: TypeSize, VD));
2707}
2708
2709/// getAlignOfGlobalVarInChars - Return the alignment in characters that
2710/// should be given to a global variable of the specified type.
2711CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T,
2712 const VarDecl *VD) const {
2713 return toCharUnitsFromBits(BitSize: getAlignOfGlobalVar(T, VD));
2714}
2715
2716unsigned ASTContext::getMinGlobalAlignOfVar(uint64_t Size,
2717 const VarDecl *VD) const {
2718 // Make the default handling as that of a non-weak definition in the
2719 // current translation unit.
2720 bool HasNonWeakDef = !VD || (VD->hasDefinition() && !VD->isWeak());
2721 return getTargetInfo().getMinGlobalAlign(Size, HasNonWeakDef);
2722}
2723
2724CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const {
2725 CharUnits Offset = CharUnits::Zero();
2726 const ASTRecordLayout *Layout = &getASTRecordLayout(D: RD);
2727 while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) {
2728 Offset += Layout->getBaseClassOffset(Base);
2729 Layout = &getASTRecordLayout(D: Base);
2730 }
2731 return Offset;
2732}
2733
2734CharUnits ASTContext::getMemberPointerPathAdjustment(const APValue &MP) const {
2735 const ValueDecl *MPD = MP.getMemberPointerDecl();
2736 CharUnits ThisAdjustment = CharUnits::Zero();
2737 ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath();
2738 bool DerivedMember = MP.isMemberPointerToDerivedMember();
2739 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Val: MPD->getDeclContext());
2740 for (unsigned I = 0, N = Path.size(); I != N; ++I) {
2741 const CXXRecordDecl *Base = RD;
2742 const CXXRecordDecl *Derived = Path[I];
2743 if (DerivedMember)
2744 std::swap(a&: Base, b&: Derived);
2745 ThisAdjustment += getASTRecordLayout(D: Derived).getBaseClassOffset(Base);
2746 RD = Path[I];
2747 }
2748 if (DerivedMember)
2749 ThisAdjustment = -ThisAdjustment;
2750 return ThisAdjustment;
2751}
2752
2753/// DeepCollectObjCIvars -
2754/// This routine first collects all declared, but not synthesized, ivars in
2755/// super class and then collects all ivars, including those synthesized for
2756/// current class. This routine is used for implementation of current class
2757/// when all ivars, declared and synthesized are known.
2758void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI,
2759 bool leafClass,
2760 SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const {
2761 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
2762 DeepCollectObjCIvars(OI: SuperClass, leafClass: false, Ivars);
2763 if (!leafClass) {
2764 llvm::append_range(C&: Ivars, R: OI->ivars());
2765 } else {
2766 auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
2767 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
2768 Iv= Iv->getNextIvar())
2769 Ivars.push_back(Elt: Iv);
2770 }
2771}
2772
2773/// CollectInheritedProtocols - Collect all protocols in current class and
2774/// those inherited by it.
2775void ASTContext::CollectInheritedProtocols(const Decl *CDecl,
2776 llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) {
2777 if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(Val: CDecl)) {
2778 // We can use protocol_iterator here instead of
2779 // all_referenced_protocol_iterator since we are walking all categories.
2780 for (auto *Proto : OI->all_referenced_protocols()) {
2781 CollectInheritedProtocols(CDecl: Proto, Protocols);
2782 }
2783
2784 // Categories of this Interface.
2785 for (const auto *Cat : OI->visible_categories())
2786 CollectInheritedProtocols(CDecl: Cat, Protocols);
2787
2788 if (ObjCInterfaceDecl *SD = OI->getSuperClass())
2789 while (SD) {
2790 CollectInheritedProtocols(CDecl: SD, Protocols);
2791 SD = SD->getSuperClass();
2792 }
2793 } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(Val: CDecl)) {
2794 for (auto *Proto : OC->protocols()) {
2795 CollectInheritedProtocols(CDecl: Proto, Protocols);
2796 }
2797 } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(Val: CDecl)) {
2798 // Insert the protocol.
2799 if (!Protocols.insert(
2800 Ptr: const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second)
2801 return;
2802
2803 for (auto *Proto : OP->protocols())
2804 CollectInheritedProtocols(CDecl: Proto, Protocols);
2805 }
2806}
2807
2808static bool unionHasUniqueObjectRepresentations(const ASTContext &Context,
2809 const RecordDecl *RD,
2810 bool CheckIfTriviallyCopyable) {
2811 assert(RD->isUnion() && "Must be union type");
2812 CharUnits UnionSize =
2813 Context.getTypeSizeInChars(T: Context.getCanonicalTagType(TD: RD));
2814
2815 for (const auto *Field : RD->fields()) {
2816 if (!Context.hasUniqueObjectRepresentations(Ty: Field->getType(),
2817 CheckIfTriviallyCopyable))
2818 return false;
2819 CharUnits FieldSize = Context.getTypeSizeInChars(T: Field->getType());
2820 if (FieldSize != UnionSize)
2821 return false;
2822 }
2823 return !RD->field_empty();
2824}
2825
2826static int64_t getSubobjectOffset(const FieldDecl *Field,
2827 const ASTContext &Context,
2828 const clang::ASTRecordLayout & /*Layout*/) {
2829 return Context.getFieldOffset(FD: Field);
2830}
2831
2832static int64_t getSubobjectOffset(const CXXRecordDecl *RD,
2833 const ASTContext &Context,
2834 const clang::ASTRecordLayout &Layout) {
2835 return Context.toBits(CharSize: Layout.getBaseClassOffset(Base: RD));
2836}
2837
2838static std::optional<int64_t>
2839structHasUniqueObjectRepresentations(const ASTContext &Context,
2840 const RecordDecl *RD,
2841 bool CheckIfTriviallyCopyable);
2842
2843static std::optional<int64_t>
2844getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context,
2845 bool CheckIfTriviallyCopyable) {
2846 if (const auto *RD = Field->getType()->getAsRecordDecl();
2847 RD && !RD->isUnion())
2848 return structHasUniqueObjectRepresentations(Context, RD,
2849 CheckIfTriviallyCopyable);
2850
2851 // A _BitInt type may not be unique if it has padding bits
2852 // but if it is a bitfield the padding bits are not used.
2853 bool IsBitIntType = Field->getType()->isBitIntType();
2854 if (!Field->getType()->isReferenceType() && !IsBitIntType &&
2855 !Context.hasUniqueObjectRepresentations(Ty: Field->getType(),
2856 CheckIfTriviallyCopyable))
2857 return std::nullopt;
2858
2859 int64_t FieldSizeInBits =
2860 Context.toBits(CharSize: Context.getTypeSizeInChars(T: Field->getType()));
2861 if (Field->isBitField()) {
2862 // If we have explicit padding bits, they don't contribute bits
2863 // to the actual object representation, so return 0.
2864 if (Field->isUnnamedBitField())
2865 return 0;
2866
2867 int64_t BitfieldSize = Field->getBitWidthValue();
2868 if (IsBitIntType) {
2869 if ((unsigned)BitfieldSize >
2870 cast<BitIntType>(Val: Field->getType())->getNumBits())
2871 return std::nullopt;
2872 } else if (BitfieldSize > FieldSizeInBits) {
2873 return std::nullopt;
2874 }
2875 FieldSizeInBits = BitfieldSize;
2876 } else if (IsBitIntType && !Context.hasUniqueObjectRepresentations(
2877 Ty: Field->getType(), CheckIfTriviallyCopyable)) {
2878 return std::nullopt;
2879 }
2880 return FieldSizeInBits;
2881}
2882
2883static std::optional<int64_t>
2884getSubobjectSizeInBits(const CXXRecordDecl *RD, const ASTContext &Context,
2885 bool CheckIfTriviallyCopyable) {
2886 return structHasUniqueObjectRepresentations(Context, RD,
2887 CheckIfTriviallyCopyable);
2888}
2889
2890template <typename RangeT>
2891static std::optional<int64_t> structSubobjectsHaveUniqueObjectRepresentations(
2892 const RangeT &Subobjects, int64_t CurOffsetInBits,
2893 const ASTContext &Context, const clang::ASTRecordLayout &Layout,
2894 bool CheckIfTriviallyCopyable) {
2895 for (const auto *Subobject : Subobjects) {
2896 std::optional<int64_t> SizeInBits =
2897 getSubobjectSizeInBits(Subobject, Context, CheckIfTriviallyCopyable);
2898 if (!SizeInBits)
2899 return std::nullopt;
2900 if (*SizeInBits != 0) {
2901 int64_t Offset = getSubobjectOffset(Subobject, Context, Layout);
2902 if (Offset != CurOffsetInBits)
2903 return std::nullopt;
2904 CurOffsetInBits += *SizeInBits;
2905 }
2906 }
2907 return CurOffsetInBits;
2908}
2909
2910static std::optional<int64_t>
2911structHasUniqueObjectRepresentations(const ASTContext &Context,
2912 const RecordDecl *RD,
2913 bool CheckIfTriviallyCopyable) {
2914 assert(!RD->isUnion() && "Must be struct/class type");
2915 const auto &Layout = Context.getASTRecordLayout(D: RD);
2916
2917 int64_t CurOffsetInBits = 0;
2918 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(Val: RD)) {
2919 if (ClassDecl->isDynamicClass())
2920 return std::nullopt;
2921
2922 SmallVector<CXXRecordDecl *, 4> Bases;
2923 for (const auto &Base : ClassDecl->bases()) {
2924 // Empty types can be inherited from, and non-empty types can potentially
2925 // have tail padding, so just make sure there isn't an error.
2926 Bases.emplace_back(Args: Base.getType()->getAsCXXRecordDecl());
2927 }
2928
2929 llvm::sort(C&: Bases, Comp: [&](const CXXRecordDecl *L, const CXXRecordDecl *R) {
2930 return Layout.getBaseClassOffset(Base: L) < Layout.getBaseClassOffset(Base: R);
2931 });
2932
2933 std::optional<int64_t> OffsetAfterBases =
2934 structSubobjectsHaveUniqueObjectRepresentations(
2935 Subobjects: Bases, CurOffsetInBits, Context, Layout, CheckIfTriviallyCopyable);
2936 if (!OffsetAfterBases)
2937 return std::nullopt;
2938 CurOffsetInBits = *OffsetAfterBases;
2939 }
2940
2941 std::optional<int64_t> OffsetAfterFields =
2942 structSubobjectsHaveUniqueObjectRepresentations(
2943 Subobjects: RD->fields(), CurOffsetInBits, Context, Layout,
2944 CheckIfTriviallyCopyable);
2945 if (!OffsetAfterFields)
2946 return std::nullopt;
2947 CurOffsetInBits = *OffsetAfterFields;
2948
2949 return CurOffsetInBits;
2950}
2951
2952bool ASTContext::hasUniqueObjectRepresentations(
2953 QualType Ty, bool CheckIfTriviallyCopyable) const {
2954 // C++17 [meta.unary.prop]:
2955 // The predicate condition for a template specialization
2956 // has_unique_object_representations<T> shall be satisfied if and only if:
2957 // (9.1) - T is trivially copyable, and
2958 // (9.2) - any two objects of type T with the same value have the same
2959 // object representation, where:
2960 // - two objects of array or non-union class type are considered to have
2961 // the same value if their respective sequences of direct subobjects
2962 // have the same values, and
2963 // - two objects of union type are considered to have the same value if
2964 // they have the same active member and the corresponding members have
2965 // the same value.
2966 // The set of scalar types for which this condition holds is
2967 // implementation-defined. [ Note: If a type has padding bits, the condition
2968 // does not hold; otherwise, the condition holds true for unsigned integral
2969 // types. -- end note ]
2970 assert(!Ty.isNull() && "Null QualType sent to unique object rep check");
2971
2972 // Arrays are unique only if their element type is unique.
2973 if (Ty->isArrayType())
2974 return hasUniqueObjectRepresentations(Ty: getBaseElementType(QT: Ty),
2975 CheckIfTriviallyCopyable);
2976
2977 assert((Ty->isVoidType() || !Ty->isIncompleteType()) &&
2978 "hasUniqueObjectRepresentations should not be called with an "
2979 "incomplete type");
2980
2981 // (9.1) - T is trivially copyable...
2982 if (CheckIfTriviallyCopyable && !Ty.isTriviallyCopyableType(Context: *this))
2983 return false;
2984
2985 // All integrals and enums are unique.
2986 if (Ty->isIntegralOrEnumerationType()) {
2987 // Address discriminated integer types are not unique.
2988 if (Ty.hasAddressDiscriminatedPointerAuth())
2989 return false;
2990 // Except _BitInt types that have padding bits.
2991 if (const auto *BIT = Ty->getAs<BitIntType>())
2992 return getTypeSize(T: BIT) == BIT->getNumBits();
2993
2994 return true;
2995 }
2996
2997 // All other pointers are unique.
2998 if (Ty->isPointerType())
2999 return !Ty.hasAddressDiscriminatedPointerAuth();
3000
3001 if (const auto *MPT = Ty->getAs<MemberPointerType>())
3002 return !ABI->getMemberPointerInfo(MPT).HasPadding;
3003
3004 if (const auto *Record = Ty->getAsRecordDecl()) {
3005 if (Record->isInvalidDecl())
3006 return false;
3007
3008 if (Record->isUnion())
3009 return unionHasUniqueObjectRepresentations(Context: *this, RD: Record,
3010 CheckIfTriviallyCopyable);
3011
3012 std::optional<int64_t> StructSize = structHasUniqueObjectRepresentations(
3013 Context: *this, RD: Record, CheckIfTriviallyCopyable);
3014
3015 return StructSize && *StructSize == static_cast<int64_t>(getTypeSize(T: Ty));
3016 }
3017
3018 // FIXME: More cases to handle here (list by rsmith):
3019 // vectors (careful about, eg, vector of 3 foo)
3020 // _Complex int and friends
3021 // _Atomic T
3022 // Obj-C block pointers
3023 // Obj-C object pointers
3024 // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t,
3025 // clk_event_t, queue_t, reserve_id_t)
3026 // There're also Obj-C class types and the Obj-C selector type, but I think it
3027 // makes sense for those to return false here.
3028
3029 return false;
3030}
3031
3032unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const {
3033 unsigned count = 0;
3034 // Count ivars declared in class extension.
3035 for (const auto *Ext : OI->known_extensions())
3036 count += Ext->ivar_size();
3037
3038 // Count ivar defined in this class's implementation. This
3039 // includes synthesized ivars.
3040 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation())
3041 count += ImplDecl->ivar_size();
3042
3043 return count;
3044}
3045
3046bool ASTContext::isSentinelNullExpr(const Expr *E) {
3047 if (!E)
3048 return false;
3049
3050 // nullptr_t is always treated as null.
3051 if (E->getType()->isNullPtrType()) return true;
3052
3053 if (E->getType()->isAnyPointerType() &&
3054 E->IgnoreParenCasts()->isNullPointerConstant(Ctx&: *this,
3055 NPC: Expr::NPC_ValueDependentIsNull))
3056 return true;
3057
3058 // Unfortunately, __null has type 'int'.
3059 if (isa<GNUNullExpr>(Val: E)) return true;
3060
3061 return false;
3062}
3063
3064/// Get the implementation of ObjCInterfaceDecl, or nullptr if none
3065/// exists.
3066ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) {
3067 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
3068 I = ObjCImpls.find(Val: D);
3069 if (I != ObjCImpls.end())
3070 return cast<ObjCImplementationDecl>(Val: I->second);
3071 return nullptr;
3072}
3073
3074/// Get the implementation of ObjCCategoryDecl, or nullptr if none
3075/// exists.
3076ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) {
3077 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
3078 I = ObjCImpls.find(Val: D);
3079 if (I != ObjCImpls.end())
3080 return cast<ObjCCategoryImplDecl>(Val: I->second);
3081 return nullptr;
3082}
3083
3084/// Set the implementation of ObjCInterfaceDecl.
3085void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD,
3086 ObjCImplementationDecl *ImplD) {
3087 assert(IFaceD && ImplD && "Passed null params");
3088 ObjCImpls[IFaceD] = ImplD;
3089}
3090
3091/// Set the implementation of ObjCCategoryDecl.
3092void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD,
3093 ObjCCategoryImplDecl *ImplD) {
3094 assert(CatD && ImplD && "Passed null params");
3095 ObjCImpls[CatD] = ImplD;
3096}
3097
3098const ObjCMethodDecl *
3099ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const {
3100 return ObjCMethodRedecls.lookup(Val: MD);
3101}
3102
3103void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD,
3104 const ObjCMethodDecl *Redecl) {
3105 assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration");
3106 ObjCMethodRedecls[MD] = Redecl;
3107}
3108
3109const ObjCInterfaceDecl *ASTContext::getObjContainingInterface(
3110 const NamedDecl *ND) const {
3111 if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(Val: ND->getDeclContext()))
3112 return ID;
3113 if (const auto *CD = dyn_cast<ObjCCategoryDecl>(Val: ND->getDeclContext()))
3114 return CD->getClassInterface();
3115 if (const auto *IMD = dyn_cast<ObjCImplDecl>(Val: ND->getDeclContext()))
3116 return IMD->getClassInterface();
3117
3118 return nullptr;
3119}
3120
3121/// Get the copy initialization expression of VarDecl, or nullptr if
3122/// none exists.
3123BlockVarCopyInit ASTContext::getBlockVarCopyInit(const VarDecl *VD) const {
3124 assert(VD && "Passed null params");
3125 assert(VD->hasAttr<BlocksAttr>() &&
3126 "getBlockVarCopyInits - not __block var");
3127 auto I = BlockVarCopyInits.find(Val: VD);
3128 if (I != BlockVarCopyInits.end())
3129 return I->second;
3130 return {nullptr, false};
3131}
3132
3133/// Set the copy initialization expression of a block var decl.
3134void ASTContext::setBlockVarCopyInit(const VarDecl*VD, Expr *CopyExpr,
3135 bool CanThrow) {
3136 assert(VD && CopyExpr && "Passed null params");
3137 assert(VD->hasAttr<BlocksAttr>() &&
3138 "setBlockVarCopyInits - not __block var");
3139 BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow);
3140}
3141
3142TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T,
3143 unsigned DataSize) const {
3144 if (!DataSize)
3145 DataSize = TypeLoc::getFullDataSizeForType(Ty: T);
3146 else
3147 assert(DataSize == TypeLoc::getFullDataSizeForType(T) &&
3148 "incorrect data size provided to CreateTypeSourceInfo!");
3149
3150 auto *TInfo =
3151 (TypeSourceInfo*)BumpAlloc.Allocate(Size: sizeof(TypeSourceInfo) + DataSize, Alignment: 8);
3152 new (TInfo) TypeSourceInfo(T, DataSize);
3153 return TInfo;
3154}
3155
3156TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T,
3157 SourceLocation L) const {
3158 TypeSourceInfo *TSI = CreateTypeSourceInfo(T);
3159 TSI->getTypeLoc().initialize(Context&: const_cast<ASTContext &>(*this), Loc: L);
3160 return TSI;
3161}
3162
3163const ASTRecordLayout &
3164ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const {
3165 return getObjCLayout(D);
3166}
3167
3168static auto getCanonicalTemplateArguments(const ASTContext &C,
3169 ArrayRef<TemplateArgument> Args,
3170 bool &AnyNonCanonArgs) {
3171 SmallVector<TemplateArgument, 16> CanonArgs(Args);
3172 AnyNonCanonArgs |= C.canonicalizeTemplateArguments(Args: CanonArgs);
3173 return CanonArgs;
3174}
3175
3176bool ASTContext::canonicalizeTemplateArguments(
3177 MutableArrayRef<TemplateArgument> Args) const {
3178 bool AnyNonCanonArgs = false;
3179 for (auto &Arg : Args) {
3180 TemplateArgument OrigArg = Arg;
3181 Arg = getCanonicalTemplateArgument(Arg);
3182 AnyNonCanonArgs |= !Arg.structurallyEquals(Other: OrigArg);
3183 }
3184 return AnyNonCanonArgs;
3185}
3186
3187//===----------------------------------------------------------------------===//
3188// Type creation/memoization methods
3189//===----------------------------------------------------------------------===//
3190
3191QualType
3192ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const {
3193 unsigned fastQuals = quals.getFastQualifiers();
3194 quals.removeFastQualifiers();
3195
3196 // Check if we've already instantiated this type.
3197 llvm::FoldingSetNodeID ID;
3198 ExtQuals::Profile(ID, BaseType: baseType, Quals: quals);
3199 void *insertPos = nullptr;
3200 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, InsertPos&: insertPos)) {
3201 assert(eq->getQualifiers() == quals);
3202 return QualType(eq, fastQuals);
3203 }
3204
3205 // If the base type is not canonical, make the appropriate canonical type.
3206 QualType canon;
3207 if (!baseType->isCanonicalUnqualified()) {
3208 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split();
3209 canonSplit.Quals.addConsistentQualifiers(qs: quals);
3210 canon = getExtQualType(baseType: canonSplit.Ty, quals: canonSplit.Quals);
3211
3212 // Re-find the insert position.
3213 (void) ExtQualNodes.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
3214 }
3215
3216 auto *eq = new (*this, alignof(ExtQuals)) ExtQuals(baseType, canon, quals);
3217 ExtQualNodes.InsertNode(N: eq, InsertPos: insertPos);
3218 return QualType(eq, fastQuals);
3219}
3220
3221QualType ASTContext::getAddrSpaceQualType(QualType T,
3222 LangAS AddressSpace) const {
3223 QualType CanT = getCanonicalType(T);
3224 if (CanT.getAddressSpace() == AddressSpace)
3225 return T;
3226
3227 // If we are composing extended qualifiers together, merge together
3228 // into one ExtQuals node.
3229 QualifierCollector Quals;
3230 const Type *TypeNode = Quals.strip(type: T);
3231
3232 // If this type already has an address space specified, it cannot get
3233 // another one.
3234 assert(!Quals.hasAddressSpace() &&
3235 "Type cannot be in multiple addr spaces!");
3236 Quals.addAddressSpace(space: AddressSpace);
3237
3238 return getExtQualType(baseType: TypeNode, quals: Quals);
3239}
3240
3241QualType ASTContext::removeAddrSpaceQualType(QualType T) const {
3242 // If the type is not qualified with an address space, just return it
3243 // immediately.
3244 if (!T.hasAddressSpace())
3245 return T;
3246
3247 QualifierCollector Quals;
3248 const Type *TypeNode;
3249 // For arrays, strip the qualifier off the element type, then reconstruct the
3250 // array type
3251 if (T.getTypePtr()->isArrayType()) {
3252 T = getUnqualifiedArrayType(T, Quals);
3253 TypeNode = T.getTypePtr();
3254 } else {
3255 // If we are composing extended qualifiers together, merge together
3256 // into one ExtQuals node.
3257 while (T.hasAddressSpace()) {
3258 TypeNode = Quals.strip(type: T);
3259
3260 // If the type no longer has an address space after stripping qualifiers,
3261 // jump out.
3262 if (!QualType(TypeNode, 0).hasAddressSpace())
3263 break;
3264
3265 // There might be sugar in the way. Strip it and try again.
3266 T = T.getSingleStepDesugaredType(Context: *this);
3267 }
3268 }
3269
3270 Quals.removeAddressSpace();
3271
3272 // Removal of the address space can mean there are no longer any
3273 // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts)
3274 // or required.
3275 if (Quals.hasNonFastQualifiers())
3276 return getExtQualType(baseType: TypeNode, quals: Quals);
3277 else
3278 return QualType(TypeNode, Quals.getFastQualifiers());
3279}
3280
3281uint16_t
3282ASTContext::getPointerAuthVTablePointerDiscriminator(const CXXRecordDecl *RD) {
3283 assert(RD->isPolymorphic() &&
3284 "Attempted to get vtable pointer discriminator on a monomorphic type");
3285 std::unique_ptr<MangleContext> MC(createMangleContext());
3286 SmallString<256> Str;
3287 llvm::raw_svector_ostream Out(Str);
3288 MC->mangleCXXVTable(RD, Out);
3289 return llvm::getPointerAuthStableSipHash(S: Str);
3290}
3291
3292/// Encode a function type for use in the discriminator of a function pointer
3293/// type. We can't use the itanium scheme for this since C has quite permissive
3294/// rules for type compatibility that we need to be compatible with.
3295///
3296/// Formally, this function associates every function pointer type T with an
3297/// encoded string E(T). Let the equivalence relation T1 ~ T2 be defined as
3298/// E(T1) == E(T2). E(T) is part of the ABI of values of type T. C type
3299/// compatibility requires equivalent treatment under the ABI, so
3300/// CCompatible(T1, T2) must imply E(T1) == E(T2), that is, CCompatible must be
3301/// a subset of ~. Crucially, however, it must be a proper subset because
3302/// CCompatible is not an equivalence relation: for example, int[] is compatible
3303/// with both int[1] and int[2], but the latter are not compatible with each
3304/// other. Therefore this encoding function must be careful to only distinguish
3305/// types if there is no third type with which they are both required to be
3306/// compatible.
3307static void encodeTypeForFunctionPointerAuth(const ASTContext &Ctx,
3308 raw_ostream &OS, QualType QT) {
3309 // FIXME: Consider address space qualifiers.
3310 const Type *T = QT.getCanonicalType().getTypePtr();
3311
3312 // FIXME: Consider using the C++ type mangling when we encounter a construct
3313 // that is incompatible with C.
3314
3315 switch (T->getTypeClass()) {
3316 case Type::Atomic:
3317 return encodeTypeForFunctionPointerAuth(
3318 Ctx, OS, QT: cast<AtomicType>(Val: T)->getValueType());
3319
3320 case Type::LValueReference:
3321 OS << "R";
3322 encodeTypeForFunctionPointerAuth(Ctx, OS,
3323 QT: cast<ReferenceType>(Val: T)->getPointeeType());
3324 return;
3325 case Type::RValueReference:
3326 OS << "O";
3327 encodeTypeForFunctionPointerAuth(Ctx, OS,
3328 QT: cast<ReferenceType>(Val: T)->getPointeeType());
3329 return;
3330
3331 case Type::Pointer:
3332 // C11 6.7.6.1p2:
3333 // For two pointer types to be compatible, both shall be identically
3334 // qualified and both shall be pointers to compatible types.
3335 // FIXME: we should also consider pointee types.
3336 OS << "P";
3337 return;
3338
3339 case Type::ObjCObjectPointer:
3340 case Type::BlockPointer:
3341 OS << "P";
3342 return;
3343
3344 case Type::Complex:
3345 OS << "C";
3346 return encodeTypeForFunctionPointerAuth(
3347 Ctx, OS, QT: cast<ComplexType>(Val: T)->getElementType());
3348
3349 case Type::VariableArray:
3350 case Type::ConstantArray:
3351 case Type::IncompleteArray:
3352 case Type::ArrayParameter:
3353 // C11 6.7.6.2p6:
3354 // For two array types to be compatible, both shall have compatible
3355 // element types, and if both size specifiers are present, and are integer
3356 // constant expressions, then both size specifiers shall have the same
3357 // constant value [...]
3358 //
3359 // So since ElemType[N] has to be compatible ElemType[], we can't encode the
3360 // width of the array.
3361 OS << "A";
3362 return encodeTypeForFunctionPointerAuth(
3363 Ctx, OS, QT: cast<ArrayType>(Val: T)->getElementType());
3364
3365 case Type::ObjCInterface:
3366 case Type::ObjCObject:
3367 OS << "<objc_object>";
3368 return;
3369
3370 case Type::Enum: {
3371 // C11 6.7.2.2p4:
3372 // Each enumerated type shall be compatible with char, a signed integer
3373 // type, or an unsigned integer type.
3374 //
3375 // So we have to treat enum types as integers.
3376 QualType UnderlyingType = T->castAsEnumDecl()->getIntegerType();
3377 return encodeTypeForFunctionPointerAuth(
3378 Ctx, OS, QT: UnderlyingType.isNull() ? Ctx.IntTy : UnderlyingType);
3379 }
3380
3381 case Type::FunctionNoProto:
3382 case Type::FunctionProto: {
3383 // C11 6.7.6.3p15:
3384 // For two function types to be compatible, both shall specify compatible
3385 // return types. Moreover, the parameter type lists, if both are present,
3386 // shall agree in the number of parameters and in the use of the ellipsis
3387 // terminator; corresponding parameters shall have compatible types.
3388 //
3389 // That paragraph goes on to describe how unprototyped functions are to be
3390 // handled, which we ignore here. Unprototyped function pointers are hashed
3391 // as though they were prototyped nullary functions since thats probably
3392 // what the user meant. This behavior is non-conforming.
3393 // FIXME: If we add a "custom discriminator" function type attribute we
3394 // should encode functions as their discriminators.
3395 OS << "F";
3396 const auto *FuncType = cast<FunctionType>(Val: T);
3397 encodeTypeForFunctionPointerAuth(Ctx, OS, QT: FuncType->getReturnType());
3398 if (const auto *FPT = dyn_cast<FunctionProtoType>(Val: FuncType)) {
3399 for (QualType Param : FPT->param_types()) {
3400 Param = Ctx.getSignatureParameterType(T: Param);
3401 encodeTypeForFunctionPointerAuth(Ctx, OS, QT: Param);
3402 }
3403 if (FPT->isVariadic())
3404 OS << "z";
3405 }
3406 OS << "E";
3407 return;
3408 }
3409
3410 case Type::MemberPointer: {
3411 OS << "M";
3412 const auto *MPT = T->castAs<MemberPointerType>();
3413 encodeTypeForFunctionPointerAuth(
3414 Ctx, OS, QT: QualType(MPT->getQualifier().getAsType(), 0));
3415 encodeTypeForFunctionPointerAuth(Ctx, OS, QT: MPT->getPointeeType());
3416 return;
3417 }
3418 case Type::ExtVector:
3419 case Type::Vector:
3420 OS << "Dv" << Ctx.getTypeSizeInChars(T).getQuantity();
3421 break;
3422
3423 // Don't bother discriminating based on these types.
3424 case Type::Pipe:
3425 case Type::BitInt:
3426 case Type::ConstantMatrix:
3427 OS << "?";
3428 return;
3429
3430 case Type::Builtin: {
3431 const auto *BTy = T->castAs<BuiltinType>();
3432 switch (BTy->getKind()) {
3433#define SIGNED_TYPE(Id, SingletonId) \
3434 case BuiltinType::Id: \
3435 OS << "i"; \
3436 return;
3437#define UNSIGNED_TYPE(Id, SingletonId) \
3438 case BuiltinType::Id: \
3439 OS << "i"; \
3440 return;
3441#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id:
3442#define BUILTIN_TYPE(Id, SingletonId)
3443#include "clang/AST/BuiltinTypes.def"
3444 llvm_unreachable("placeholder types should not appear here.");
3445
3446 case BuiltinType::Half:
3447 OS << "Dh";
3448 return;
3449 case BuiltinType::Float:
3450 OS << "f";
3451 return;
3452 case BuiltinType::Double:
3453 OS << "d";
3454 return;
3455 case BuiltinType::LongDouble:
3456 OS << "e";
3457 return;
3458 case BuiltinType::Float16:
3459 OS << "DF16_";
3460 return;
3461 case BuiltinType::Float128:
3462 OS << "g";
3463 return;
3464
3465 case BuiltinType::Void:
3466 OS << "v";
3467 return;
3468
3469 case BuiltinType::ObjCId:
3470 case BuiltinType::ObjCClass:
3471 case BuiltinType::ObjCSel:
3472 case BuiltinType::NullPtr:
3473 OS << "P";
3474 return;
3475
3476 // Don't bother discriminating based on OpenCL types.
3477 case BuiltinType::OCLSampler:
3478 case BuiltinType::OCLEvent:
3479 case BuiltinType::OCLClkEvent:
3480 case BuiltinType::OCLQueue:
3481 case BuiltinType::OCLReserveID:
3482 case BuiltinType::BFloat16:
3483 case BuiltinType::VectorQuad:
3484 case BuiltinType::VectorPair:
3485 case BuiltinType::DMR1024:
3486 case BuiltinType::DMR2048:
3487 OS << "?";
3488 return;
3489
3490 // Don't bother discriminating based on these seldom-used types.
3491 case BuiltinType::Ibm128:
3492 return;
3493#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3494 case BuiltinType::Id: \
3495 return;
3496#include "clang/Basic/OpenCLImageTypes.def"
3497#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3498 case BuiltinType::Id: \
3499 return;
3500#include "clang/Basic/OpenCLExtensionTypes.def"
3501#define SVE_TYPE(Name, Id, SingletonId) \
3502 case BuiltinType::Id: \
3503 return;
3504#include "clang/Basic/AArch64ACLETypes.def"
3505#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) \
3506 case BuiltinType::Id: \
3507 return;
3508#include "clang/Basic/HLSLIntangibleTypes.def"
3509 case BuiltinType::Dependent:
3510 llvm_unreachable("should never get here");
3511#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) case BuiltinType::Id:
3512#include "clang/Basic/AMDGPUTypes.def"
3513 case BuiltinType::WasmExternRef:
3514#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3515#include "clang/Basic/RISCVVTypes.def"
3516 llvm_unreachable("not yet implemented");
3517 }
3518 llvm_unreachable("should never get here");
3519 }
3520 case Type::Record: {
3521 const RecordDecl *RD = T->castAsCanonical<RecordType>()->getDecl();
3522 const IdentifierInfo *II = RD->getIdentifier();
3523
3524 // In C++, an immediate typedef of an anonymous struct or union
3525 // is considered to name it for ODR purposes, but C's specification
3526 // of type compatibility does not have a similar rule. Using the typedef
3527 // name in function type discriminators anyway, as we do here,
3528 // therefore technically violates the C standard: two function pointer
3529 // types defined in terms of two typedef'd anonymous structs with
3530 // different names are formally still compatible, but we are assigning
3531 // them different discriminators and therefore incompatible ABIs.
3532 //
3533 // This is a relatively minor violation that significantly improves
3534 // discrimination in some cases and has not caused problems in
3535 // practice. Regardless, it is now part of the ABI in places where
3536 // function type discrimination is used, and it can no longer be
3537 // changed except on new platforms.
3538
3539 if (!II)
3540 if (const TypedefNameDecl *Typedef = RD->getTypedefNameForAnonDecl())
3541 II = Typedef->getDeclName().getAsIdentifierInfo();
3542
3543 if (!II) {
3544 OS << "<anonymous_record>";
3545 return;
3546 }
3547 OS << II->getLength() << II->getName();
3548 return;
3549 }
3550 case Type::HLSLAttributedResource:
3551 case Type::HLSLInlineSpirv:
3552 llvm_unreachable("should never get here");
3553 break;
3554 case Type::OverflowBehavior:
3555 llvm_unreachable("should never get here");
3556 break;
3557 case Type::DeducedTemplateSpecialization:
3558 case Type::Auto:
3559#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3560#define DEPENDENT_TYPE(Class, Base) case Type::Class:
3561#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3562#define ABSTRACT_TYPE(Class, Base)
3563#define TYPE(Class, Base)
3564#include "clang/AST/TypeNodes.inc"
3565 llvm_unreachable("unexpected non-canonical or dependent type!");
3566 return;
3567 }
3568}
3569
3570uint16_t ASTContext::getPointerAuthTypeDiscriminator(QualType T) {
3571 assert(!T->isDependentType() &&
3572 "cannot compute type discriminator of a dependent type");
3573 SmallString<256> Str;
3574 llvm::raw_svector_ostream Out(Str);
3575
3576 if (T->isFunctionPointerType() || T->isFunctionReferenceType())
3577 T = T->getPointeeType();
3578
3579 if (T->isFunctionType()) {
3580 encodeTypeForFunctionPointerAuth(Ctx: *this, OS&: Out, QT: T);
3581 } else {
3582 T = T.getUnqualifiedType();
3583 // Calls to member function pointers don't need to worry about
3584 // language interop or the laxness of the C type compatibility rules.
3585 // We just mangle the member pointer type directly, which is
3586 // implicitly much stricter about type matching. However, we do
3587 // strip any top-level exception specification before this mangling.
3588 // C++23 requires calls to work when the function type is convertible
3589 // to the pointer type by a function pointer conversion, which can
3590 // change the exception specification. This does not technically
3591 // require the exception specification to not affect representation,
3592 // because the function pointer conversion is still always a direct
3593 // value conversion and therefore an opportunity to resign the
3594 // pointer. (This is in contrast to e.g. qualification conversions,
3595 // which can be applied in nested pointer positions, effectively
3596 // requiring qualified and unqualified representations to match.)
3597 // However, it is pragmatic to ignore exception specifications
3598 // because it allows a certain amount of `noexcept` mismatching
3599 // to not become a visible ODR problem. This also leaves some
3600 // room for the committee to add laxness to function pointer
3601 // conversions in future standards.
3602 if (auto *MPT = T->getAs<MemberPointerType>())
3603 if (MPT->isMemberFunctionPointer()) {
3604 QualType PointeeType = MPT->getPointeeType();
3605 if (PointeeType->castAs<FunctionProtoType>()->getExceptionSpecType() !=
3606 EST_None) {
3607 QualType FT = getFunctionTypeWithExceptionSpec(Orig: PointeeType, ESI: EST_None);
3608 T = getMemberPointerType(T: FT, Qualifier: MPT->getQualifier(),
3609 Cls: MPT->getMostRecentCXXRecordDecl());
3610 }
3611 }
3612 std::unique_ptr<MangleContext> MC(createMangleContext());
3613 MC->mangleCanonicalTypeName(T, Out);
3614 }
3615
3616 return llvm::getPointerAuthStableSipHash(S: Str);
3617}
3618
3619QualType ASTContext::getObjCGCQualType(QualType T,
3620 Qualifiers::GC GCAttr) const {
3621 QualType CanT = getCanonicalType(T);
3622 if (CanT.getObjCGCAttr() == GCAttr)
3623 return T;
3624
3625 if (const auto *ptr = T->getAs<PointerType>()) {
3626 QualType Pointee = ptr->getPointeeType();
3627 if (Pointee->isAnyPointerType()) {
3628 QualType ResultType = getObjCGCQualType(T: Pointee, GCAttr);
3629 return getPointerType(T: ResultType);
3630 }
3631 }
3632
3633 // If we are composing extended qualifiers together, merge together
3634 // into one ExtQuals node.
3635 QualifierCollector Quals;
3636 const Type *TypeNode = Quals.strip(type: T);
3637
3638 // If this type already has an ObjCGC specified, it cannot get
3639 // another one.
3640 assert(!Quals.hasObjCGCAttr() &&
3641 "Type cannot have multiple ObjCGCs!");
3642 Quals.addObjCGCAttr(type: GCAttr);
3643
3644 return getExtQualType(baseType: TypeNode, quals: Quals);
3645}
3646
3647QualType ASTContext::removePtrSizeAddrSpace(QualType T) const {
3648 if (const PointerType *Ptr = T->getAs<PointerType>()) {
3649 QualType Pointee = Ptr->getPointeeType();
3650 if (isPtrSizeAddressSpace(AS: Pointee.getAddressSpace())) {
3651 return getPointerType(T: removeAddrSpaceQualType(T: Pointee));
3652 }
3653 }
3654 return T;
3655}
3656
3657QualType ASTContext::getCountAttributedType(
3658 QualType WrappedTy, Expr *CountExpr, bool CountInBytes, bool OrNull,
3659 ArrayRef<TypeCoupledDeclRefInfo> DependentDecls) const {
3660 assert(WrappedTy->isPointerType() || WrappedTy->isArrayType());
3661
3662 llvm::FoldingSetNodeID ID;
3663 CountAttributedType::Profile(ID, WrappedTy, CountExpr, CountInBytes, Nullable: OrNull);
3664
3665 void *InsertPos = nullptr;
3666 CountAttributedType *CATy =
3667 CountAttributedTypes.FindNodeOrInsertPos(ID, InsertPos);
3668 if (CATy)
3669 return QualType(CATy, 0);
3670
3671 QualType CanonTy = getCanonicalType(T: WrappedTy);
3672 size_t Size = CountAttributedType::totalSizeToAlloc<TypeCoupledDeclRefInfo>(
3673 Counts: DependentDecls.size());
3674 CATy = (CountAttributedType *)Allocate(Size, Align: TypeAlignment);
3675 new (CATy) CountAttributedType(WrappedTy, CanonTy, CountExpr, CountInBytes,
3676 OrNull, DependentDecls);
3677 Types.push_back(Elt: CATy);
3678 CountAttributedTypes.InsertNode(N: CATy, InsertPos);
3679
3680 return QualType(CATy, 0);
3681}
3682
3683QualType
3684ASTContext::adjustType(QualType Orig,
3685 llvm::function_ref<QualType(QualType)> Adjust) const {
3686 switch (Orig->getTypeClass()) {
3687 case Type::Attributed: {
3688 const auto *AT = cast<AttributedType>(Val&: Orig);
3689 return getAttributedType(attrKind: AT->getAttrKind(),
3690 modifiedType: adjustType(Orig: AT->getModifiedType(), Adjust),
3691 equivalentType: adjustType(Orig: AT->getEquivalentType(), Adjust),
3692 attr: AT->getAttr());
3693 }
3694
3695 case Type::BTFTagAttributed: {
3696 const auto *BTFT = dyn_cast<BTFTagAttributedType>(Val&: Orig);
3697 return getBTFTagAttributedType(BTFAttr: BTFT->getAttr(),
3698 Wrapped: adjustType(Orig: BTFT->getWrappedType(), Adjust));
3699 }
3700
3701 case Type::OverflowBehavior: {
3702 const auto *OB = dyn_cast<OverflowBehaviorType>(Val&: Orig);
3703 return getOverflowBehaviorType(Kind: OB->getBehaviorKind(),
3704 Wrapped: adjustType(Orig: OB->getUnderlyingType(), Adjust));
3705 }
3706
3707 case Type::Paren:
3708 return getParenType(
3709 NamedType: adjustType(Orig: cast<ParenType>(Val&: Orig)->getInnerType(), Adjust));
3710
3711 case Type::Adjusted: {
3712 const auto *AT = cast<AdjustedType>(Val&: Orig);
3713 return getAdjustedType(Orig: AT->getOriginalType(),
3714 New: adjustType(Orig: AT->getAdjustedType(), Adjust));
3715 }
3716
3717 case Type::MacroQualified: {
3718 const auto *MQT = cast<MacroQualifiedType>(Val&: Orig);
3719 return getMacroQualifiedType(UnderlyingTy: adjustType(Orig: MQT->getUnderlyingType(), Adjust),
3720 MacroII: MQT->getMacroIdentifier());
3721 }
3722
3723 default:
3724 return Adjust(Orig);
3725 }
3726}
3727
3728const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T,
3729 FunctionType::ExtInfo Info) {
3730 if (T->getExtInfo() == Info)
3731 return T;
3732
3733 QualType Result;
3734 if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(Val: T)) {
3735 Result = getFunctionNoProtoType(ResultTy: FNPT->getReturnType(), Info);
3736 } else {
3737 const auto *FPT = cast<FunctionProtoType>(Val: T);
3738 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
3739 EPI.ExtInfo = Info;
3740 Result = getFunctionType(ResultTy: FPT->getReturnType(), Args: FPT->getParamTypes(), EPI);
3741 }
3742
3743 return cast<FunctionType>(Val: Result.getTypePtr());
3744}
3745
3746QualType ASTContext::adjustFunctionResultType(QualType FunctionType,
3747 QualType ResultType) {
3748 return adjustType(Orig: FunctionType, Adjust: [&](QualType Orig) {
3749 if (const auto *FNPT = Orig->getAs<FunctionNoProtoType>())
3750 return getFunctionNoProtoType(ResultTy: ResultType, Info: FNPT->getExtInfo());
3751
3752 const auto *FPT = Orig->castAs<FunctionProtoType>();
3753 return getFunctionType(ResultTy: ResultType, Args: FPT->getParamTypes(),
3754 EPI: FPT->getExtProtoInfo());
3755 });
3756}
3757
3758void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD,
3759 QualType ResultType) {
3760 FD = FD->getMostRecentDecl();
3761 while (true) {
3762 FD->setType(adjustFunctionResultType(FunctionType: FD->getType(), ResultType));
3763 if (FunctionDecl *Next = FD->getPreviousDecl())
3764 FD = Next;
3765 else
3766 break;
3767 }
3768 if (ASTMutationListener *L = getASTMutationListener())
3769 L->DeducedReturnType(FD, ReturnType: ResultType);
3770}
3771
3772/// Get a function type and produce the equivalent function type with the
3773/// specified exception specification. Type sugar that can be present on a
3774/// declaration of a function with an exception specification is permitted
3775/// and preserved. Other type sugar (for instance, typedefs) is not.
3776QualType ASTContext::getFunctionTypeWithExceptionSpec(
3777 QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const {
3778 return adjustType(Orig, Adjust: [&](QualType Ty) {
3779 const auto *Proto = Ty->castAs<FunctionProtoType>();
3780 return getFunctionType(ResultTy: Proto->getReturnType(), Args: Proto->getParamTypes(),
3781 EPI: Proto->getExtProtoInfo().withExceptionSpec(ESI));
3782 });
3783}
3784
3785bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T,
3786 QualType U) const {
3787 return hasSameType(T1: T, T2: U) ||
3788 (getLangOpts().CPlusPlus17 &&
3789 hasSameType(T1: getFunctionTypeWithExceptionSpec(Orig: T, ESI: EST_None),
3790 T2: getFunctionTypeWithExceptionSpec(Orig: U, ESI: EST_None)));
3791}
3792
3793QualType ASTContext::getFunctionTypeWithoutPtrSizes(QualType T) {
3794 if (const auto *Proto = T->getAs<FunctionProtoType>()) {
3795 QualType RetTy = removePtrSizeAddrSpace(T: Proto->getReturnType());
3796 SmallVector<QualType, 16> Args(Proto->param_types().size());
3797 for (unsigned i = 0, n = Args.size(); i != n; ++i)
3798 Args[i] = removePtrSizeAddrSpace(T: Proto->param_types()[i]);
3799 return getFunctionType(ResultTy: RetTy, Args, EPI: Proto->getExtProtoInfo());
3800 }
3801
3802 if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) {
3803 QualType RetTy = removePtrSizeAddrSpace(T: Proto->getReturnType());
3804 return getFunctionNoProtoType(ResultTy: RetTy, Info: Proto->getExtInfo());
3805 }
3806
3807 return T;
3808}
3809
3810bool ASTContext::hasSameFunctionTypeIgnoringPtrSizes(QualType T, QualType U) {
3811 return hasSameType(T1: T, T2: U) ||
3812 hasSameType(T1: getFunctionTypeWithoutPtrSizes(T),
3813 T2: getFunctionTypeWithoutPtrSizes(T: U));
3814}
3815
3816QualType ASTContext::getFunctionTypeWithoutParamABIs(QualType T) const {
3817 if (const auto *Proto = T->getAs<FunctionProtoType>()) {
3818 FunctionProtoType::ExtProtoInfo EPI = Proto->getExtProtoInfo();
3819 EPI.ExtParameterInfos = nullptr;
3820 return getFunctionType(ResultTy: Proto->getReturnType(), Args: Proto->param_types(), EPI);
3821 }
3822 return T;
3823}
3824
3825bool ASTContext::hasSameFunctionTypeIgnoringParamABI(QualType T,
3826 QualType U) const {
3827 return hasSameType(T1: T, T2: U) || hasSameType(T1: getFunctionTypeWithoutParamABIs(T),
3828 T2: getFunctionTypeWithoutParamABIs(T: U));
3829}
3830
3831void ASTContext::adjustExceptionSpec(
3832 FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI,
3833 bool AsWritten) {
3834 // Update the type.
3835 QualType Updated =
3836 getFunctionTypeWithExceptionSpec(Orig: FD->getType(), ESI);
3837 FD->setType(Updated);
3838
3839 if (!AsWritten)
3840 return;
3841
3842 // Update the type in the type source information too.
3843 if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) {
3844 // If the type and the type-as-written differ, we may need to update
3845 // the type-as-written too.
3846 if (TSInfo->getType() != FD->getType())
3847 Updated = getFunctionTypeWithExceptionSpec(Orig: TSInfo->getType(), ESI);
3848
3849 // FIXME: When we get proper type location information for exceptions,
3850 // we'll also have to rebuild the TypeSourceInfo. For now, we just patch
3851 // up the TypeSourceInfo;
3852 assert(TypeLoc::getFullDataSizeForType(Updated) ==
3853 TypeLoc::getFullDataSizeForType(TSInfo->getType()) &&
3854 "TypeLoc size mismatch from updating exception specification");
3855 TSInfo->overrideType(T: Updated);
3856 }
3857}
3858
3859/// getComplexType - Return the uniqued reference to the type for a complex
3860/// number with the specified element type.
3861QualType ASTContext::getComplexType(QualType T) const {
3862 // Unique pointers, to guarantee there is only one pointer of a particular
3863 // structure.
3864 llvm::FoldingSetNodeID ID;
3865 ComplexType::Profile(ID, Element: T);
3866
3867 void *InsertPos = nullptr;
3868 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos))
3869 return QualType(CT, 0);
3870
3871 // If the pointee type isn't canonical, this won't be a canonical type either,
3872 // so fill in the canonical type field.
3873 QualType Canonical;
3874 if (!T.isCanonical()) {
3875 Canonical = getComplexType(T: getCanonicalType(T));
3876
3877 // Get the new insert position for the node we care about.
3878 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos);
3879 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3880 }
3881 auto *New = new (*this, alignof(ComplexType)) ComplexType(T, Canonical);
3882 Types.push_back(Elt: New);
3883 ComplexTypes.InsertNode(N: New, InsertPos);
3884 return QualType(New, 0);
3885}
3886
3887/// getPointerType - Return the uniqued reference to the type for a pointer to
3888/// the specified type.
3889QualType ASTContext::getPointerType(QualType T) const {
3890 // Unique pointers, to guarantee there is only one pointer of a particular
3891 // structure.
3892 llvm::FoldingSetNodeID ID;
3893 PointerType::Profile(ID, Pointee: T);
3894
3895 void *InsertPos = nullptr;
3896 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3897 return QualType(PT, 0);
3898
3899 // If the pointee type isn't canonical, this won't be a canonical type either,
3900 // so fill in the canonical type field.
3901 QualType Canonical;
3902 if (!T.isCanonical()) {
3903 Canonical = getPointerType(T: getCanonicalType(T));
3904
3905 // Get the new insert position for the node we care about.
3906 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3907 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3908 }
3909 auto *New = new (*this, alignof(PointerType)) PointerType(T, Canonical);
3910 Types.push_back(Elt: New);
3911 PointerTypes.InsertNode(N: New, InsertPos);
3912 return QualType(New, 0);
3913}
3914
3915QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const {
3916 llvm::FoldingSetNodeID ID;
3917 AdjustedType::Profile(ID, Orig, New);
3918 void *InsertPos = nullptr;
3919 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3920 if (AT)
3921 return QualType(AT, 0);
3922
3923 QualType Canonical = getCanonicalType(T: New);
3924
3925 // Get the new insert position for the node we care about.
3926 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3927 assert(!AT && "Shouldn't be in the map!");
3928
3929 AT = new (*this, alignof(AdjustedType))
3930 AdjustedType(Type::Adjusted, Orig, New, Canonical);
3931 Types.push_back(Elt: AT);
3932 AdjustedTypes.InsertNode(N: AT, InsertPos);
3933 return QualType(AT, 0);
3934}
3935
3936QualType ASTContext::getDecayedType(QualType Orig, QualType Decayed) const {
3937 llvm::FoldingSetNodeID ID;
3938 AdjustedType::Profile(ID, Orig, New: Decayed);
3939 void *InsertPos = nullptr;
3940 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3941 if (AT)
3942 return QualType(AT, 0);
3943
3944 QualType Canonical = getCanonicalType(T: Decayed);
3945
3946 // Get the new insert position for the node we care about.
3947 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3948 assert(!AT && "Shouldn't be in the map!");
3949
3950 AT = new (*this, alignof(DecayedType)) DecayedType(Orig, Decayed, Canonical);
3951 Types.push_back(Elt: AT);
3952 AdjustedTypes.InsertNode(N: AT, InsertPos);
3953 return QualType(AT, 0);
3954}
3955
3956QualType ASTContext::getDecayedType(QualType T) const {
3957 assert((T->isArrayType() || T->isFunctionType()) && "T does not decay");
3958
3959 QualType Decayed;
3960
3961 // C99 6.7.5.3p7:
3962 // A declaration of a parameter as "array of type" shall be
3963 // adjusted to "qualified pointer to type", where the type
3964 // qualifiers (if any) are those specified within the [ and ] of
3965 // the array type derivation.
3966 if (T->isArrayType())
3967 Decayed = getArrayDecayedType(T);
3968
3969 // C99 6.7.5.3p8:
3970 // A declaration of a parameter as "function returning type"
3971 // shall be adjusted to "pointer to function returning type", as
3972 // in 6.3.2.1.
3973 if (T->isFunctionType())
3974 Decayed = getPointerType(T);
3975
3976 return getDecayedType(Orig: T, Decayed);
3977}
3978
3979QualType ASTContext::getArrayParameterType(QualType Ty) const {
3980 if (Ty->isArrayParameterType())
3981 return Ty;
3982 assert(Ty->isConstantArrayType() && "Ty must be an array type.");
3983 QualType DTy = Ty.getDesugaredType(Context: *this);
3984 const auto *ATy = cast<ConstantArrayType>(Val&: DTy);
3985 llvm::FoldingSetNodeID ID;
3986 ATy->Profile(ID, Ctx: *this, ET: ATy->getElementType(), ArraySize: ATy->getZExtSize(),
3987 SizeExpr: ATy->getSizeExpr(), SizeMod: ATy->getSizeModifier(),
3988 TypeQuals: ATy->getIndexTypeQualifiers().getAsOpaqueValue());
3989 void *InsertPos = nullptr;
3990 ArrayParameterType *AT =
3991 ArrayParameterTypes.FindNodeOrInsertPos(ID, InsertPos);
3992 if (AT)
3993 return QualType(AT, 0);
3994
3995 QualType Canonical;
3996 if (!DTy.isCanonical()) {
3997 Canonical = getArrayParameterType(Ty: getCanonicalType(T: Ty));
3998
3999 // Get the new insert position for the node we care about.
4000 AT = ArrayParameterTypes.FindNodeOrInsertPos(ID, InsertPos);
4001 assert(!AT && "Shouldn't be in the map!");
4002 }
4003
4004 AT = new (*this, alignof(ArrayParameterType))
4005 ArrayParameterType(ATy, Canonical);
4006 Types.push_back(Elt: AT);
4007 ArrayParameterTypes.InsertNode(N: AT, InsertPos);
4008 return QualType(AT, 0);
4009}
4010
4011/// getBlockPointerType - Return the uniqued reference to the type for
4012/// a pointer to the specified block.
4013QualType ASTContext::getBlockPointerType(QualType T) const {
4014 assert(T->isFunctionType() && "block of function types only");
4015 // Unique pointers, to guarantee there is only one block of a particular
4016 // structure.
4017 llvm::FoldingSetNodeID ID;
4018 BlockPointerType::Profile(ID, Pointee: T);
4019
4020 void *InsertPos = nullptr;
4021 if (BlockPointerType *PT =
4022 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
4023 return QualType(PT, 0);
4024
4025 // If the block pointee type isn't canonical, this won't be a canonical
4026 // type either so fill in the canonical type field.
4027 QualType Canonical;
4028 if (!T.isCanonical()) {
4029 Canonical = getBlockPointerType(T: getCanonicalType(T));
4030
4031 // Get the new insert position for the node we care about.
4032 BlockPointerType *NewIP =
4033 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
4034 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4035 }
4036 auto *New =
4037 new (*this, alignof(BlockPointerType)) BlockPointerType(T, Canonical);
4038 Types.push_back(Elt: New);
4039 BlockPointerTypes.InsertNode(N: New, InsertPos);
4040 return QualType(New, 0);
4041}
4042
4043/// getLValueReferenceType - Return the uniqued reference to the type for an
4044/// lvalue reference to the specified type.
4045QualType
4046ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
4047 assert((!T->isPlaceholderType() ||
4048 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) &&
4049 "Unresolved placeholder type");
4050
4051 // Unique pointers, to guarantee there is only one pointer of a particular
4052 // structure.
4053 llvm::FoldingSetNodeID ID;
4054 ReferenceType::Profile(ID, Referencee: T, SpelledAsLValue);
4055
4056 void *InsertPos = nullptr;
4057 if (LValueReferenceType *RT =
4058 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
4059 return QualType(RT, 0);
4060
4061 const auto *InnerRef = T->getAs<ReferenceType>();
4062
4063 // If the referencee type isn't canonical, this won't be a canonical type
4064 // either, so fill in the canonical type field.
4065 QualType Canonical;
4066 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) {
4067 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
4068 Canonical = getLValueReferenceType(T: getCanonicalType(T: PointeeType));
4069
4070 // Get the new insert position for the node we care about.
4071 LValueReferenceType *NewIP =
4072 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
4073 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4074 }
4075
4076 auto *New = new (*this, alignof(LValueReferenceType))
4077 LValueReferenceType(T, Canonical, SpelledAsLValue);
4078 Types.push_back(Elt: New);
4079 LValueReferenceTypes.InsertNode(N: New, InsertPos);
4080
4081 return QualType(New, 0);
4082}
4083
4084/// getRValueReferenceType - Return the uniqued reference to the type for an
4085/// rvalue reference to the specified type.
4086QualType ASTContext::getRValueReferenceType(QualType T) const {
4087 assert((!T->isPlaceholderType() ||
4088 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) &&
4089 "Unresolved placeholder type");
4090
4091 // Unique pointers, to guarantee there is only one pointer of a particular
4092 // structure.
4093 llvm::FoldingSetNodeID ID;
4094 ReferenceType::Profile(ID, Referencee: T, SpelledAsLValue: false);
4095
4096 void *InsertPos = nullptr;
4097 if (RValueReferenceType *RT =
4098 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
4099 return QualType(RT, 0);
4100
4101 const auto *InnerRef = T->getAs<ReferenceType>();
4102
4103 // If the referencee type isn't canonical, this won't be a canonical type
4104 // either, so fill in the canonical type field.
4105 QualType Canonical;
4106 if (InnerRef || !T.isCanonical()) {
4107 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
4108 Canonical = getRValueReferenceType(T: getCanonicalType(T: PointeeType));
4109
4110 // Get the new insert position for the node we care about.
4111 RValueReferenceType *NewIP =
4112 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
4113 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4114 }
4115
4116 auto *New = new (*this, alignof(RValueReferenceType))
4117 RValueReferenceType(T, Canonical);
4118 Types.push_back(Elt: New);
4119 RValueReferenceTypes.InsertNode(N: New, InsertPos);
4120 return QualType(New, 0);
4121}
4122
4123QualType ASTContext::getMemberPointerType(QualType T,
4124 NestedNameSpecifier Qualifier,
4125 const CXXRecordDecl *Cls) const {
4126 if (!Qualifier) {
4127 assert(Cls && "At least one of Qualifier or Cls must be provided");
4128 Qualifier = NestedNameSpecifier(getCanonicalTagType(TD: Cls).getTypePtr());
4129 } else if (!Cls) {
4130 Cls = Qualifier.getAsRecordDecl();
4131 }
4132 // Unique pointers, to guarantee there is only one pointer of a particular
4133 // structure.
4134 llvm::FoldingSetNodeID ID;
4135 MemberPointerType::Profile(ID, Pointee: T, Qualifier, Cls);
4136
4137 void *InsertPos = nullptr;
4138 if (MemberPointerType *PT =
4139 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
4140 return QualType(PT, 0);
4141
4142 NestedNameSpecifier CanonicalQualifier = [&] {
4143 if (!Cls)
4144 return Qualifier.getCanonical();
4145 NestedNameSpecifier R(getCanonicalTagType(TD: Cls).getTypePtr());
4146 assert(R.isCanonical());
4147 return R;
4148 }();
4149 // If the pointee or class type isn't canonical, this won't be a canonical
4150 // type either, so fill in the canonical type field.
4151 QualType Canonical;
4152 if (!T.isCanonical() || Qualifier != CanonicalQualifier) {
4153 Canonical =
4154 getMemberPointerType(T: getCanonicalType(T), Qualifier: CanonicalQualifier, Cls);
4155 assert(!cast<MemberPointerType>(Canonical)->isSugared());
4156 // Get the new insert position for the node we care about.
4157 [[maybe_unused]] MemberPointerType *NewIP =
4158 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
4159 assert(!NewIP && "Shouldn't be in the map!");
4160 }
4161 auto *New = new (*this, alignof(MemberPointerType))
4162 MemberPointerType(T, Qualifier, Canonical);
4163 Types.push_back(Elt: New);
4164 MemberPointerTypes.InsertNode(N: New, InsertPos);
4165 return QualType(New, 0);
4166}
4167
4168/// getConstantArrayType - Return the unique reference to the type for an
4169/// array of the specified element type.
4170QualType ASTContext::getConstantArrayType(QualType EltTy,
4171 const llvm::APInt &ArySizeIn,
4172 const Expr *SizeExpr,
4173 ArraySizeModifier ASM,
4174 unsigned IndexTypeQuals) const {
4175 assert((EltTy->isDependentType() ||
4176 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) &&
4177 "Constant array of VLAs is illegal!");
4178
4179 // We only need the size as part of the type if it's instantiation-dependent.
4180 if (SizeExpr && !SizeExpr->isInstantiationDependent())
4181 SizeExpr = nullptr;
4182
4183 // Convert the array size into a canonical width matching the pointer size for
4184 // the target.
4185 llvm::APInt ArySize(ArySizeIn);
4186 ArySize = ArySize.zextOrTrunc(width: Target->getMaxPointerWidth());
4187
4188 llvm::FoldingSetNodeID ID;
4189 ConstantArrayType::Profile(ID, Ctx: *this, ET: EltTy, ArraySize: ArySize.getZExtValue(), SizeExpr,
4190 SizeMod: ASM, TypeQuals: IndexTypeQuals);
4191
4192 void *InsertPos = nullptr;
4193 if (ConstantArrayType *ATP =
4194 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos))
4195 return QualType(ATP, 0);
4196
4197 // If the element type isn't canonical or has qualifiers, or the array bound
4198 // is instantiation-dependent, this won't be a canonical type either, so fill
4199 // in the canonical type field.
4200 QualType Canon;
4201 // FIXME: Check below should look for qualifiers behind sugar.
4202 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) {
4203 SplitQualType canonSplit = getCanonicalType(T: EltTy).split();
4204 Canon = getConstantArrayType(EltTy: QualType(canonSplit.Ty, 0), ArySizeIn: ArySize, SizeExpr: nullptr,
4205 ASM, IndexTypeQuals);
4206 Canon = getQualifiedType(T: Canon, Qs: canonSplit.Quals);
4207
4208 // Get the new insert position for the node we care about.
4209 ConstantArrayType *NewIP =
4210 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
4211 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4212 }
4213
4214 auto *New = ConstantArrayType::Create(Ctx: *this, ET: EltTy, Can: Canon, Sz: ArySize, SzExpr: SizeExpr,
4215 SzMod: ASM, Qual: IndexTypeQuals);
4216 ConstantArrayTypes.InsertNode(N: New, InsertPos);
4217 Types.push_back(Elt: New);
4218 return QualType(New, 0);
4219}
4220
4221/// getVariableArrayDecayedType - Turns the given type, which may be
4222/// variably-modified, into the corresponding type with all the known
4223/// sizes replaced with [*].
4224QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
4225 // Vastly most common case.
4226 if (!type->isVariablyModifiedType()) return type;
4227
4228 QualType result;
4229
4230 SplitQualType split = type.getSplitDesugaredType();
4231 const Type *ty = split.Ty;
4232 switch (ty->getTypeClass()) {
4233#define TYPE(Class, Base)
4234#define ABSTRACT_TYPE(Class, Base)
4235#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
4236#include "clang/AST/TypeNodes.inc"
4237 llvm_unreachable("didn't desugar past all non-canonical types?");
4238
4239 // These types should never be variably-modified.
4240 case Type::Builtin:
4241 case Type::Complex:
4242 case Type::Vector:
4243 case Type::DependentVector:
4244 case Type::ExtVector:
4245 case Type::DependentSizedExtVector:
4246 case Type::ConstantMatrix:
4247 case Type::DependentSizedMatrix:
4248 case Type::DependentAddressSpace:
4249 case Type::ObjCObject:
4250 case Type::ObjCInterface:
4251 case Type::ObjCObjectPointer:
4252 case Type::Record:
4253 case Type::Enum:
4254 case Type::UnresolvedUsing:
4255 case Type::TypeOfExpr:
4256 case Type::TypeOf:
4257 case Type::Decltype:
4258 case Type::UnaryTransform:
4259 case Type::DependentName:
4260 case Type::InjectedClassName:
4261 case Type::TemplateSpecialization:
4262 case Type::TemplateTypeParm:
4263 case Type::SubstTemplateTypeParmPack:
4264 case Type::SubstBuiltinTemplatePack:
4265 case Type::Auto:
4266 case Type::DeducedTemplateSpecialization:
4267 case Type::PackExpansion:
4268 case Type::PackIndexing:
4269 case Type::BitInt:
4270 case Type::DependentBitInt:
4271 case Type::ArrayParameter:
4272 case Type::HLSLAttributedResource:
4273 case Type::HLSLInlineSpirv:
4274 case Type::OverflowBehavior:
4275 llvm_unreachable("type should never be variably-modified");
4276
4277 // These types can be variably-modified but should never need to
4278 // further decay.
4279 case Type::FunctionNoProto:
4280 case Type::FunctionProto:
4281 case Type::BlockPointer:
4282 case Type::MemberPointer:
4283 case Type::Pipe:
4284 return type;
4285
4286 // These types can be variably-modified. All these modifications
4287 // preserve structure except as noted by comments.
4288 // TODO: if we ever care about optimizing VLAs, there are no-op
4289 // optimizations available here.
4290 case Type::Pointer:
4291 result = getPointerType(T: getVariableArrayDecayedType(
4292 type: cast<PointerType>(Val: ty)->getPointeeType()));
4293 break;
4294
4295 case Type::LValueReference: {
4296 const auto *lv = cast<LValueReferenceType>(Val: ty);
4297 result = getLValueReferenceType(
4298 T: getVariableArrayDecayedType(type: lv->getPointeeType()),
4299 SpelledAsLValue: lv->isSpelledAsLValue());
4300 break;
4301 }
4302
4303 case Type::RValueReference: {
4304 const auto *lv = cast<RValueReferenceType>(Val: ty);
4305 result = getRValueReferenceType(
4306 T: getVariableArrayDecayedType(type: lv->getPointeeType()));
4307 break;
4308 }
4309
4310 case Type::Atomic: {
4311 const auto *at = cast<AtomicType>(Val: ty);
4312 result = getAtomicType(T: getVariableArrayDecayedType(type: at->getValueType()));
4313 break;
4314 }
4315
4316 case Type::ConstantArray: {
4317 const auto *cat = cast<ConstantArrayType>(Val: ty);
4318 result = getConstantArrayType(
4319 EltTy: getVariableArrayDecayedType(type: cat->getElementType()),
4320 ArySizeIn: cat->getSize(),
4321 SizeExpr: cat->getSizeExpr(),
4322 ASM: cat->getSizeModifier(),
4323 IndexTypeQuals: cat->getIndexTypeCVRQualifiers());
4324 break;
4325 }
4326
4327 case Type::DependentSizedArray: {
4328 const auto *dat = cast<DependentSizedArrayType>(Val: ty);
4329 result = getDependentSizedArrayType(
4330 EltTy: getVariableArrayDecayedType(type: dat->getElementType()), NumElts: dat->getSizeExpr(),
4331 ASM: dat->getSizeModifier(), IndexTypeQuals: dat->getIndexTypeCVRQualifiers());
4332 break;
4333 }
4334
4335 // Turn incomplete types into [*] types.
4336 case Type::IncompleteArray: {
4337 const auto *iat = cast<IncompleteArrayType>(Val: ty);
4338 result =
4339 getVariableArrayType(EltTy: getVariableArrayDecayedType(type: iat->getElementType()),
4340 /*size*/ NumElts: nullptr, ASM: ArraySizeModifier::Normal,
4341 IndexTypeQuals: iat->getIndexTypeCVRQualifiers());
4342 break;
4343 }
4344
4345 // Turn VLA types into [*] types.
4346 case Type::VariableArray: {
4347 const auto *vat = cast<VariableArrayType>(Val: ty);
4348 result =
4349 getVariableArrayType(EltTy: getVariableArrayDecayedType(type: vat->getElementType()),
4350 /*size*/ NumElts: nullptr, ASM: ArraySizeModifier::Star,
4351 IndexTypeQuals: vat->getIndexTypeCVRQualifiers());
4352 break;
4353 }
4354 }
4355
4356 // Apply the top-level qualifiers from the original.
4357 return getQualifiedType(T: result, Qs: split.Quals);
4358}
4359
4360/// getVariableArrayType - Returns a non-unique reference to the type for a
4361/// variable array of the specified element type.
4362QualType ASTContext::getVariableArrayType(QualType EltTy, Expr *NumElts,
4363 ArraySizeModifier ASM,
4364 unsigned IndexTypeQuals) const {
4365 // Since we don't unique expressions, it isn't possible to unique VLA's
4366 // that have an expression provided for their size.
4367 QualType Canon;
4368
4369 // Be sure to pull qualifiers off the element type.
4370 // FIXME: Check below should look for qualifiers behind sugar.
4371 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
4372 SplitQualType canonSplit = getCanonicalType(T: EltTy).split();
4373 Canon = getVariableArrayType(EltTy: QualType(canonSplit.Ty, 0), NumElts, ASM,
4374 IndexTypeQuals);
4375 Canon = getQualifiedType(T: Canon, Qs: canonSplit.Quals);
4376 }
4377
4378 auto *New = new (*this, alignof(VariableArrayType))
4379 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals);
4380
4381 VariableArrayTypes.push_back(x: New);
4382 Types.push_back(Elt: New);
4383 return QualType(New, 0);
4384}
4385
4386/// getDependentSizedArrayType - Returns a non-unique reference to
4387/// the type for a dependently-sized array of the specified element
4388/// type.
4389QualType
4390ASTContext::getDependentSizedArrayType(QualType elementType, Expr *numElements,
4391 ArraySizeModifier ASM,
4392 unsigned elementTypeQuals) const {
4393 assert((!numElements || numElements->isTypeDependent() ||
4394 numElements->isValueDependent()) &&
4395 "Size must be type- or value-dependent!");
4396
4397 SplitQualType canonElementType = getCanonicalType(T: elementType).split();
4398
4399 void *insertPos = nullptr;
4400 llvm::FoldingSetNodeID ID;
4401 DependentSizedArrayType::Profile(
4402 ID, Context: *this, ET: numElements ? QualType(canonElementType.Ty, 0) : elementType,
4403 SizeMod: ASM, TypeQuals: elementTypeQuals, E: numElements);
4404
4405 // Look for an existing type with these properties.
4406 DependentSizedArrayType *canonTy =
4407 DependentSizedArrayTypes.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
4408
4409 // Dependently-sized array types that do not have a specified number
4410 // of elements will have their sizes deduced from a dependent
4411 // initializer.
4412 if (!numElements) {
4413 if (canonTy)
4414 return QualType(canonTy, 0);
4415
4416 auto *newType = new (*this, alignof(DependentSizedArrayType))
4417 DependentSizedArrayType(elementType, QualType(), numElements, ASM,
4418 elementTypeQuals);
4419 DependentSizedArrayTypes.InsertNode(N: newType, InsertPos: insertPos);
4420 Types.push_back(Elt: newType);
4421 return QualType(newType, 0);
4422 }
4423
4424 // If we don't have one, build one.
4425 if (!canonTy) {
4426 canonTy = new (*this, alignof(DependentSizedArrayType))
4427 DependentSizedArrayType(QualType(canonElementType.Ty, 0), QualType(),
4428 numElements, ASM, elementTypeQuals);
4429 DependentSizedArrayTypes.InsertNode(N: canonTy, InsertPos: insertPos);
4430 Types.push_back(Elt: canonTy);
4431 }
4432
4433 // Apply qualifiers from the element type to the array.
4434 QualType canon = getQualifiedType(T: QualType(canonTy,0),
4435 Qs: canonElementType.Quals);
4436
4437 // If we didn't need extra canonicalization for the element type or the size
4438 // expression, then just use that as our result.
4439 if (QualType(canonElementType.Ty, 0) == elementType &&
4440 canonTy->getSizeExpr() == numElements)
4441 return canon;
4442
4443 // Otherwise, we need to build a type which follows the spelling
4444 // of the element type.
4445 auto *sugaredType = new (*this, alignof(DependentSizedArrayType))
4446 DependentSizedArrayType(elementType, canon, numElements, ASM,
4447 elementTypeQuals);
4448 Types.push_back(Elt: sugaredType);
4449 return QualType(sugaredType, 0);
4450}
4451
4452QualType ASTContext::getIncompleteArrayType(QualType elementType,
4453 ArraySizeModifier ASM,
4454 unsigned elementTypeQuals) const {
4455 llvm::FoldingSetNodeID ID;
4456 IncompleteArrayType::Profile(ID, ET: elementType, SizeMod: ASM, TypeQuals: elementTypeQuals);
4457
4458 void *insertPos = nullptr;
4459 if (IncompleteArrayType *iat =
4460 IncompleteArrayTypes.FindNodeOrInsertPos(ID, InsertPos&: insertPos))
4461 return QualType(iat, 0);
4462
4463 // If the element type isn't canonical, this won't be a canonical type
4464 // either, so fill in the canonical type field. We also have to pull
4465 // qualifiers off the element type.
4466 QualType canon;
4467
4468 // FIXME: Check below should look for qualifiers behind sugar.
4469 if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) {
4470 SplitQualType canonSplit = getCanonicalType(T: elementType).split();
4471 canon = getIncompleteArrayType(elementType: QualType(canonSplit.Ty, 0),
4472 ASM, elementTypeQuals);
4473 canon = getQualifiedType(T: canon, Qs: canonSplit.Quals);
4474
4475 // Get the new insert position for the node we care about.
4476 IncompleteArrayType *existing =
4477 IncompleteArrayTypes.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
4478 assert(!existing && "Shouldn't be in the map!"); (void) existing;
4479 }
4480
4481 auto *newType = new (*this, alignof(IncompleteArrayType))
4482 IncompleteArrayType(elementType, canon, ASM, elementTypeQuals);
4483
4484 IncompleteArrayTypes.InsertNode(N: newType, InsertPos: insertPos);
4485 Types.push_back(Elt: newType);
4486 return QualType(newType, 0);
4487}
4488
4489ASTContext::BuiltinVectorTypeInfo
4490ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const {
4491#define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \
4492 {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \
4493 NUMVECTORS};
4494
4495#define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \
4496 {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS};
4497
4498 switch (Ty->getKind()) {
4499 default:
4500 llvm_unreachable("Unsupported builtin vector type");
4501
4502#define SVE_VECTOR_TYPE_INT(Name, MangledName, Id, SingletonId, NumEls, \
4503 ElBits, NF, IsSigned) \
4504 case BuiltinType::Id: \
4505 return {getIntTypeForBitwidth(ElBits, IsSigned), \
4506 llvm::ElementCount::getScalable(NumEls), NF};
4507#define SVE_VECTOR_TYPE_FLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4508 ElBits, NF) \
4509 case BuiltinType::Id: \
4510 return {ElBits == 16 ? HalfTy : (ElBits == 32 ? FloatTy : DoubleTy), \
4511 llvm::ElementCount::getScalable(NumEls), NF};
4512#define SVE_VECTOR_TYPE_BFLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4513 ElBits, NF) \
4514 case BuiltinType::Id: \
4515 return {BFloat16Ty, llvm::ElementCount::getScalable(NumEls), NF};
4516#define SVE_VECTOR_TYPE_MFLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4517 ElBits, NF) \
4518 case BuiltinType::Id: \
4519 return {MFloat8Ty, llvm::ElementCount::getScalable(NumEls), NF};
4520#define SVE_PREDICATE_TYPE_ALL(Name, MangledName, Id, SingletonId, NumEls, NF) \
4521 case BuiltinType::Id: \
4522 return {BoolTy, llvm::ElementCount::getScalable(NumEls), NF};
4523#include "clang/Basic/AArch64ACLETypes.def"
4524
4525#define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \
4526 IsSigned) \
4527 case BuiltinType::Id: \
4528 return {getIntTypeForBitwidth(ElBits, IsSigned), \
4529 llvm::ElementCount::getScalable(NumEls), NF};
4530#define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
4531 case BuiltinType::Id: \
4532 return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \
4533 llvm::ElementCount::getScalable(NumEls), NF};
4534#define RVV_VECTOR_TYPE_BFLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
4535 case BuiltinType::Id: \
4536 return {BFloat16Ty, llvm::ElementCount::getScalable(NumEls), NF};
4537#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
4538 case BuiltinType::Id: \
4539 return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1};
4540#include "clang/Basic/RISCVVTypes.def"
4541 }
4542}
4543
4544/// getExternrefType - Return a WebAssembly externref type, which represents an
4545/// opaque reference to a host value.
4546QualType ASTContext::getWebAssemblyExternrefType() const {
4547 if (Target->getTriple().isWasm() && Target->hasFeature(Feature: "reference-types")) {
4548#define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \
4549 if (BuiltinType::Id == BuiltinType::WasmExternRef) \
4550 return SingletonId;
4551#include "clang/Basic/WebAssemblyReferenceTypes.def"
4552 }
4553 llvm_unreachable(
4554 "shouldn't try to generate type externref outside WebAssembly target");
4555}
4556
4557/// getScalableVectorType - Return the unique reference to a scalable vector
4558/// type of the specified element type and size. VectorType must be a built-in
4559/// type.
4560QualType ASTContext::getScalableVectorType(QualType EltTy, unsigned NumElts,
4561 unsigned NumFields) const {
4562 auto K = llvm::ScalableVecTyKey{.EltTy: EltTy, .NumElts: NumElts, .NumFields: NumFields};
4563 if (auto It = ScalableVecTyMap.find(Val: K); It != ScalableVecTyMap.end())
4564 return It->second;
4565
4566 if (Target->hasAArch64ACLETypes()) {
4567 uint64_t EltTySize = getTypeSize(T: EltTy);
4568
4569#define SVE_VECTOR_TYPE_INT(Name, MangledName, Id, SingletonId, NumEls, \
4570 ElBits, NF, IsSigned) \
4571 if (EltTy->hasIntegerRepresentation() && !EltTy->isBooleanType() && \
4572 EltTy->hasSignedIntegerRepresentation() == IsSigned && \
4573 EltTySize == ElBits && NumElts == (NumEls * NF) && NumFields == 1) { \
4574 return ScalableVecTyMap[K] = SingletonId; \
4575 }
4576#define SVE_VECTOR_TYPE_FLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4577 ElBits, NF) \
4578 if (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
4579 EltTySize == ElBits && NumElts == (NumEls * NF) && NumFields == 1) { \
4580 return ScalableVecTyMap[K] = SingletonId; \
4581 }
4582#define SVE_VECTOR_TYPE_BFLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4583 ElBits, NF) \
4584 if (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
4585 EltTySize == ElBits && NumElts == (NumEls * NF) && NumFields == 1) { \
4586 return ScalableVecTyMap[K] = SingletonId; \
4587 }
4588#define SVE_VECTOR_TYPE_MFLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4589 ElBits, NF) \
4590 if (EltTy->isMFloat8Type() && EltTySize == ElBits && \
4591 NumElts == (NumEls * NF) && NumFields == 1) { \
4592 return ScalableVecTyMap[K] = SingletonId; \
4593 }
4594#define SVE_PREDICATE_TYPE_ALL(Name, MangledName, Id, SingletonId, NumEls, NF) \
4595 if (EltTy->isBooleanType() && NumElts == (NumEls * NF) && NumFields == 1) \
4596 return ScalableVecTyMap[K] = SingletonId;
4597#include "clang/Basic/AArch64ACLETypes.def"
4598 } else if (Target->hasRISCVVTypes()) {
4599 uint64_t EltTySize = getTypeSize(T: EltTy);
4600#define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \
4601 IsFP, IsBF) \
4602 if (!EltTy->isBooleanType() && \
4603 ((EltTy->hasIntegerRepresentation() && \
4604 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
4605 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
4606 IsFP && !IsBF) || \
4607 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
4608 IsBF && !IsFP)) && \
4609 EltTySize == ElBits && NumElts == NumEls && NumFields == NF) \
4610 return ScalableVecTyMap[K] = SingletonId;
4611#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
4612 if (EltTy->isBooleanType() && NumElts == NumEls) \
4613 return ScalableVecTyMap[K] = SingletonId;
4614#include "clang/Basic/RISCVVTypes.def"
4615 }
4616 return QualType();
4617}
4618
4619/// getVectorType - Return the unique reference to a vector type of
4620/// the specified element type and size. VectorType must be a built-in type.
4621QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
4622 VectorKind VecKind) const {
4623 assert(vecType->isBuiltinType() ||
4624 (vecType->isBitIntType() &&
4625 // Only support _BitInt elements with byte-sized power of 2 NumBits.
4626 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits())));
4627
4628 // Check if we've already instantiated a vector of this type.
4629 llvm::FoldingSetNodeID ID;
4630 VectorType::Profile(ID, ElementType: vecType, NumElements: NumElts, TypeClass: Type::Vector, VecKind);
4631
4632 void *InsertPos = nullptr;
4633 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
4634 return QualType(VTP, 0);
4635
4636 // If the element type isn't canonical, this won't be a canonical type either,
4637 // so fill in the canonical type field.
4638 QualType Canonical;
4639 if (!vecType.isCanonical()) {
4640 Canonical = getVectorType(vecType: getCanonicalType(T: vecType), NumElts, VecKind);
4641
4642 // Get the new insert position for the node we care about.
4643 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4644 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4645 }
4646 auto *New = new (*this, alignof(VectorType))
4647 VectorType(vecType, NumElts, Canonical, VecKind);
4648 VectorTypes.InsertNode(N: New, InsertPos);
4649 Types.push_back(Elt: New);
4650 return QualType(New, 0);
4651}
4652
4653QualType ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr,
4654 SourceLocation AttrLoc,
4655 VectorKind VecKind) const {
4656 llvm::FoldingSetNodeID ID;
4657 DependentVectorType::Profile(ID, Context: *this, ElementType: getCanonicalType(T: VecType), SizeExpr,
4658 VecKind);
4659 void *InsertPos = nullptr;
4660 DependentVectorType *Canon =
4661 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4662 DependentVectorType *New;
4663
4664 if (Canon) {
4665 New = new (*this, alignof(DependentVectorType)) DependentVectorType(
4666 VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind);
4667 } else {
4668 QualType CanonVecTy = getCanonicalType(T: VecType);
4669 if (CanonVecTy == VecType) {
4670 New = new (*this, alignof(DependentVectorType))
4671 DependentVectorType(VecType, QualType(), SizeExpr, AttrLoc, VecKind);
4672
4673 DependentVectorType *CanonCheck =
4674 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4675 assert(!CanonCheck &&
4676 "Dependent-sized vector_size canonical type broken");
4677 (void)CanonCheck;
4678 DependentVectorTypes.InsertNode(N: New, InsertPos);
4679 } else {
4680 QualType CanonTy = getDependentVectorType(VecType: CanonVecTy, SizeExpr,
4681 AttrLoc: SourceLocation(), VecKind);
4682 New = new (*this, alignof(DependentVectorType))
4683 DependentVectorType(VecType, CanonTy, SizeExpr, AttrLoc, VecKind);
4684 }
4685 }
4686
4687 Types.push_back(Elt: New);
4688 return QualType(New, 0);
4689}
4690
4691/// getExtVectorType - Return the unique reference to an extended vector type of
4692/// the specified element type and size. VectorType must be a built-in type.
4693QualType ASTContext::getExtVectorType(QualType vecType,
4694 unsigned NumElts) const {
4695 assert(vecType->isBuiltinType() || vecType->isDependentType() ||
4696 (vecType->isBitIntType() &&
4697 // Only support _BitInt elements with byte-sized power of 2 NumBits.
4698 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits())));
4699
4700 // Check if we've already instantiated a vector of this type.
4701 llvm::FoldingSetNodeID ID;
4702 VectorType::Profile(ID, ElementType: vecType, NumElements: NumElts, TypeClass: Type::ExtVector,
4703 VecKind: VectorKind::Generic);
4704 void *InsertPos = nullptr;
4705 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
4706 return QualType(VTP, 0);
4707
4708 // If the element type isn't canonical, this won't be a canonical type either,
4709 // so fill in the canonical type field.
4710 QualType Canonical;
4711 if (!vecType.isCanonical()) {
4712 Canonical = getExtVectorType(vecType: getCanonicalType(T: vecType), NumElts);
4713
4714 // Get the new insert position for the node we care about.
4715 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4716 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4717 }
4718 auto *New = new (*this, alignof(ExtVectorType))
4719 ExtVectorType(vecType, NumElts, Canonical);
4720 VectorTypes.InsertNode(N: New, InsertPos);
4721 Types.push_back(Elt: New);
4722 return QualType(New, 0);
4723}
4724
4725QualType
4726ASTContext::getDependentSizedExtVectorType(QualType vecType,
4727 Expr *SizeExpr,
4728 SourceLocation AttrLoc) const {
4729 llvm::FoldingSetNodeID ID;
4730 DependentSizedExtVectorType::Profile(ID, Context: *this, ElementType: getCanonicalType(T: vecType),
4731 SizeExpr);
4732
4733 void *InsertPos = nullptr;
4734 DependentSizedExtVectorType *Canon
4735 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4736 DependentSizedExtVectorType *New;
4737 if (Canon) {
4738 // We already have a canonical version of this array type; use it as
4739 // the canonical type for a newly-built type.
4740 New = new (*this, alignof(DependentSizedExtVectorType))
4741 DependentSizedExtVectorType(vecType, QualType(Canon, 0), SizeExpr,
4742 AttrLoc);
4743 } else {
4744 QualType CanonVecTy = getCanonicalType(T: vecType);
4745 if (CanonVecTy == vecType) {
4746 New = new (*this, alignof(DependentSizedExtVectorType))
4747 DependentSizedExtVectorType(vecType, QualType(), SizeExpr, AttrLoc);
4748
4749 DependentSizedExtVectorType *CanonCheck
4750 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4751 assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken");
4752 (void)CanonCheck;
4753 DependentSizedExtVectorTypes.InsertNode(N: New, InsertPos);
4754 } else {
4755 QualType CanonExtTy = getDependentSizedExtVectorType(vecType: CanonVecTy, SizeExpr,
4756 AttrLoc: SourceLocation());
4757 New = new (*this, alignof(DependentSizedExtVectorType))
4758 DependentSizedExtVectorType(vecType, CanonExtTy, SizeExpr, AttrLoc);
4759 }
4760 }
4761
4762 Types.push_back(Elt: New);
4763 return QualType(New, 0);
4764}
4765
4766QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows,
4767 unsigned NumColumns) const {
4768 llvm::FoldingSetNodeID ID;
4769 ConstantMatrixType::Profile(ID, ElementType: ElementTy, NumRows, NumColumns,
4770 TypeClass: Type::ConstantMatrix);
4771
4772 assert(MatrixType::isValidElementType(ElementTy, getLangOpts()) &&
4773 "need a valid element type");
4774 assert(NumRows > 0 && NumRows <= LangOpts.MaxMatrixDimension &&
4775 NumColumns > 0 && NumColumns <= LangOpts.MaxMatrixDimension &&
4776 "need valid matrix dimensions");
4777 void *InsertPos = nullptr;
4778 if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos))
4779 return QualType(MTP, 0);
4780
4781 QualType Canonical;
4782 if (!ElementTy.isCanonical()) {
4783 Canonical =
4784 getConstantMatrixType(ElementTy: getCanonicalType(T: ElementTy), NumRows, NumColumns);
4785
4786 ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4787 assert(!NewIP && "Matrix type shouldn't already exist in the map");
4788 (void)NewIP;
4789 }
4790
4791 auto *New = new (*this, alignof(ConstantMatrixType))
4792 ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical);
4793 MatrixTypes.InsertNode(N: New, InsertPos);
4794 Types.push_back(Elt: New);
4795 return QualType(New, 0);
4796}
4797
4798QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy,
4799 Expr *RowExpr,
4800 Expr *ColumnExpr,
4801 SourceLocation AttrLoc) const {
4802 QualType CanonElementTy = getCanonicalType(T: ElementTy);
4803 llvm::FoldingSetNodeID ID;
4804 DependentSizedMatrixType::Profile(ID, Context: *this, ElementType: CanonElementTy, RowExpr,
4805 ColumnExpr);
4806
4807 void *InsertPos = nullptr;
4808 DependentSizedMatrixType *Canon =
4809 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4810
4811 if (!Canon) {
4812 Canon = new (*this, alignof(DependentSizedMatrixType))
4813 DependentSizedMatrixType(CanonElementTy, QualType(), RowExpr,
4814 ColumnExpr, AttrLoc);
4815#ifndef NDEBUG
4816 DependentSizedMatrixType *CanonCheck =
4817 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4818 assert(!CanonCheck && "Dependent-sized matrix canonical type broken");
4819#endif
4820 DependentSizedMatrixTypes.InsertNode(N: Canon, InsertPos);
4821 Types.push_back(Elt: Canon);
4822 }
4823
4824 // Already have a canonical version of the matrix type
4825 //
4826 // If it exactly matches the requested type, use it directly.
4827 if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr &&
4828 Canon->getRowExpr() == ColumnExpr)
4829 return QualType(Canon, 0);
4830
4831 // Use Canon as the canonical type for newly-built type.
4832 DependentSizedMatrixType *New = new (*this, alignof(DependentSizedMatrixType))
4833 DependentSizedMatrixType(ElementTy, QualType(Canon, 0), RowExpr,
4834 ColumnExpr, AttrLoc);
4835 Types.push_back(Elt: New);
4836 return QualType(New, 0);
4837}
4838
4839QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType,
4840 Expr *AddrSpaceExpr,
4841 SourceLocation AttrLoc) const {
4842 assert(AddrSpaceExpr->isInstantiationDependent());
4843
4844 QualType canonPointeeType = getCanonicalType(T: PointeeType);
4845
4846 void *insertPos = nullptr;
4847 llvm::FoldingSetNodeID ID;
4848 DependentAddressSpaceType::Profile(ID, Context: *this, PointeeType: canonPointeeType,
4849 AddrSpaceExpr);
4850
4851 DependentAddressSpaceType *canonTy =
4852 DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
4853
4854 if (!canonTy) {
4855 canonTy = new (*this, alignof(DependentAddressSpaceType))
4856 DependentAddressSpaceType(canonPointeeType, QualType(), AddrSpaceExpr,
4857 AttrLoc);
4858 DependentAddressSpaceTypes.InsertNode(N: canonTy, InsertPos: insertPos);
4859 Types.push_back(Elt: canonTy);
4860 }
4861
4862 if (canonPointeeType == PointeeType &&
4863 canonTy->getAddrSpaceExpr() == AddrSpaceExpr)
4864 return QualType(canonTy, 0);
4865
4866 auto *sugaredType = new (*this, alignof(DependentAddressSpaceType))
4867 DependentAddressSpaceType(PointeeType, QualType(canonTy, 0),
4868 AddrSpaceExpr, AttrLoc);
4869 Types.push_back(Elt: sugaredType);
4870 return QualType(sugaredType, 0);
4871}
4872
4873/// Determine whether \p T is canonical as the result type of a function.
4874static bool isCanonicalResultType(QualType T) {
4875 return T.isCanonical() &&
4876 (T.getObjCLifetime() == Qualifiers::OCL_None ||
4877 T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone);
4878}
4879
4880/// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
4881QualType
4882ASTContext::getFunctionNoProtoType(QualType ResultTy,
4883 const FunctionType::ExtInfo &Info) const {
4884 // FIXME: This assertion cannot be enabled (yet) because the ObjC rewriter
4885 // functionality creates a function without a prototype regardless of
4886 // language mode (so it makes them even in C++). Once the rewriter has been
4887 // fixed, this assertion can be enabled again.
4888 //assert(!LangOpts.requiresStrictPrototypes() &&
4889 // "strict prototypes are disabled");
4890
4891 // Unique functions, to guarantee there is only one function of a particular
4892 // structure.
4893 llvm::FoldingSetNodeID ID;
4894 FunctionNoProtoType::Profile(ID, ResultType: ResultTy, Info);
4895
4896 void *InsertPos = nullptr;
4897 if (FunctionNoProtoType *FT =
4898 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
4899 return QualType(FT, 0);
4900
4901 QualType Canonical;
4902 if (!isCanonicalResultType(T: ResultTy)) {
4903 Canonical =
4904 getFunctionNoProtoType(ResultTy: getCanonicalFunctionResultType(ResultType: ResultTy), Info);
4905
4906 // Get the new insert position for the node we care about.
4907 FunctionNoProtoType *NewIP =
4908 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
4909 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4910 }
4911
4912 auto *New = new (*this, alignof(FunctionNoProtoType))
4913 FunctionNoProtoType(ResultTy, Canonical, Info);
4914 Types.push_back(Elt: New);
4915 FunctionNoProtoTypes.InsertNode(N: New, InsertPos);
4916 return QualType(New, 0);
4917}
4918
4919CanQualType
4920ASTContext::getCanonicalFunctionResultType(QualType ResultType) const {
4921 CanQualType CanResultType = getCanonicalType(T: ResultType);
4922
4923 // Canonical result types do not have ARC lifetime qualifiers.
4924 if (CanResultType.getQualifiers().hasObjCLifetime()) {
4925 Qualifiers Qs = CanResultType.getQualifiers();
4926 Qs.removeObjCLifetime();
4927 return CanQualType::CreateUnsafe(
4928 Other: getQualifiedType(T: CanResultType.getUnqualifiedType(), Qs));
4929 }
4930
4931 return CanResultType;
4932}
4933
4934static bool isCanonicalExceptionSpecification(
4935 const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) {
4936 if (ESI.Type == EST_None)
4937 return true;
4938 if (!NoexceptInType)
4939 return false;
4940
4941 // C++17 onwards: exception specification is part of the type, as a simple
4942 // boolean "can this function type throw".
4943 if (ESI.Type == EST_BasicNoexcept)
4944 return true;
4945
4946 // A noexcept(expr) specification is (possibly) canonical if expr is
4947 // value-dependent.
4948 if (ESI.Type == EST_DependentNoexcept)
4949 return true;
4950
4951 // A dynamic exception specification is canonical if it only contains pack
4952 // expansions (so we can't tell whether it's non-throwing) and all its
4953 // contained types are canonical.
4954 if (ESI.Type == EST_Dynamic) {
4955 bool AnyPackExpansions = false;
4956 for (QualType ET : ESI.Exceptions) {
4957 if (!ET.isCanonical())
4958 return false;
4959 if (ET->getAs<PackExpansionType>())
4960 AnyPackExpansions = true;
4961 }
4962 return AnyPackExpansions;
4963 }
4964
4965 return false;
4966}
4967
4968QualType ASTContext::getFunctionTypeInternal(
4969 QualType ResultTy, ArrayRef<QualType> ArgArray,
4970 const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const {
4971 size_t NumArgs = ArgArray.size();
4972
4973 // Unique functions, to guarantee there is only one function of a particular
4974 // structure.
4975 llvm::FoldingSetNodeID ID;
4976 FunctionProtoType::Profile(ID, Result: ResultTy, ArgTys: ArgArray.begin(), NumArgs, EPI,
4977 Context: *this, Canonical: true);
4978
4979 QualType Canonical;
4980 bool Unique = false;
4981
4982 void *InsertPos = nullptr;
4983 if (FunctionProtoType *FPT =
4984 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) {
4985 QualType Existing = QualType(FPT, 0);
4986
4987 // If we find a pre-existing equivalent FunctionProtoType, we can just reuse
4988 // it so long as our exception specification doesn't contain a dependent
4989 // noexcept expression, or we're just looking for a canonical type.
4990 // Otherwise, we're going to need to create a type
4991 // sugar node to hold the concrete expression.
4992 if (OnlyWantCanonical || !isComputedNoexcept(ESpecType: EPI.ExceptionSpec.Type) ||
4993 EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr())
4994 return Existing;
4995
4996 // We need a new type sugar node for this one, to hold the new noexcept
4997 // expression. We do no canonicalization here, but that's OK since we don't
4998 // expect to see the same noexcept expression much more than once.
4999 Canonical = getCanonicalType(T: Existing);
5000 Unique = true;
5001 }
5002
5003 bool NoexceptInType = getLangOpts().CPlusPlus17;
5004 bool IsCanonicalExceptionSpec =
5005 isCanonicalExceptionSpecification(ESI: EPI.ExceptionSpec, NoexceptInType);
5006
5007 // Determine whether the type being created is already canonical or not.
5008 bool isCanonical = !Unique && IsCanonicalExceptionSpec &&
5009 isCanonicalResultType(T: ResultTy) && !EPI.HasTrailingReturn;
5010 for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
5011 if (!ArgArray[i].isCanonicalAsParam())
5012 isCanonical = false;
5013
5014 if (OnlyWantCanonical)
5015 assert(isCanonical &&
5016 "given non-canonical parameters constructing canonical type");
5017
5018 // If this type isn't canonical, get the canonical version of it if we don't
5019 // already have it. The exception spec is only partially part of the
5020 // canonical type, and only in C++17 onwards.
5021 if (!isCanonical && Canonical.isNull()) {
5022 SmallVector<QualType, 16> CanonicalArgs;
5023 CanonicalArgs.reserve(N: NumArgs);
5024 for (unsigned i = 0; i != NumArgs; ++i)
5025 CanonicalArgs.push_back(Elt: getCanonicalParamType(T: ArgArray[i]));
5026
5027 llvm::SmallVector<QualType, 8> ExceptionTypeStorage;
5028 FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI;
5029 CanonicalEPI.HasTrailingReturn = false;
5030
5031 if (IsCanonicalExceptionSpec) {
5032 // Exception spec is already OK.
5033 } else if (NoexceptInType) {
5034 switch (EPI.ExceptionSpec.Type) {
5035 case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated:
5036 // We don't know yet. It shouldn't matter what we pick here; no-one
5037 // should ever look at this.
5038 [[fallthrough]];
5039 case EST_None: case EST_MSAny: case EST_NoexceptFalse:
5040 CanonicalEPI.ExceptionSpec.Type = EST_None;
5041 break;
5042
5043 // A dynamic exception specification is almost always "not noexcept",
5044 // with the exception that a pack expansion might expand to no types.
5045 case EST_Dynamic: {
5046 bool AnyPacks = false;
5047 for (QualType ET : EPI.ExceptionSpec.Exceptions) {
5048 if (ET->getAs<PackExpansionType>())
5049 AnyPacks = true;
5050 ExceptionTypeStorage.push_back(Elt: getCanonicalType(T: ET));
5051 }
5052 if (!AnyPacks)
5053 CanonicalEPI.ExceptionSpec.Type = EST_None;
5054 else {
5055 CanonicalEPI.ExceptionSpec.Type = EST_Dynamic;
5056 CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage;
5057 }
5058 break;
5059 }
5060
5061 case EST_DynamicNone:
5062 case EST_BasicNoexcept:
5063 case EST_NoexceptTrue:
5064 case EST_NoThrow:
5065 CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept;
5066 break;
5067
5068 case EST_DependentNoexcept:
5069 llvm_unreachable("dependent noexcept is already canonical");
5070 }
5071 } else {
5072 CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo();
5073 }
5074
5075 // Adjust the canonical function result type.
5076 CanQualType CanResultTy = getCanonicalFunctionResultType(ResultType: ResultTy);
5077 Canonical =
5078 getFunctionTypeInternal(ResultTy: CanResultTy, ArgArray: CanonicalArgs, EPI: CanonicalEPI, OnlyWantCanonical: true);
5079
5080 // Get the new insert position for the node we care about.
5081 FunctionProtoType *NewIP =
5082 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
5083 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
5084 }
5085
5086 // Compute the needed size to hold this FunctionProtoType and the
5087 // various trailing objects.
5088 auto ESH = FunctionProtoType::getExceptionSpecSize(
5089 EST: EPI.ExceptionSpec.Type, NumExceptions: EPI.ExceptionSpec.Exceptions.size());
5090 size_t Size = FunctionProtoType::totalSizeToAlloc<
5091 QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields,
5092 FunctionType::FunctionTypeExtraAttributeInfo,
5093 FunctionType::FunctionTypeArmAttributes, FunctionType::ExceptionType,
5094 Expr *, FunctionDecl *, FunctionProtoType::ExtParameterInfo, Qualifiers,
5095 FunctionEffect, EffectConditionExpr>(
5096 Counts: NumArgs, Counts: EPI.Variadic, Counts: EPI.requiresFunctionProtoTypeExtraBitfields(),
5097 Counts: EPI.requiresFunctionProtoTypeExtraAttributeInfo(),
5098 Counts: EPI.requiresFunctionProtoTypeArmAttributes(), Counts: ESH.NumExceptionType,
5099 Counts: ESH.NumExprPtr, Counts: ESH.NumFunctionDeclPtr,
5100 Counts: EPI.ExtParameterInfos ? NumArgs : 0,
5101 Counts: EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0, Counts: EPI.FunctionEffects.size(),
5102 Counts: EPI.FunctionEffects.conditions().size());
5103
5104 auto *FTP = (FunctionProtoType *)Allocate(Size, Align: alignof(FunctionProtoType));
5105 FunctionProtoType::ExtProtoInfo newEPI = EPI;
5106 new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI);
5107 Types.push_back(Elt: FTP);
5108 if (!Unique)
5109 FunctionProtoTypes.InsertNode(N: FTP, InsertPos);
5110 if (!EPI.FunctionEffects.empty())
5111 AnyFunctionEffects = true;
5112 return QualType(FTP, 0);
5113}
5114
5115QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const {
5116 llvm::FoldingSetNodeID ID;
5117 PipeType::Profile(ID, T, isRead: ReadOnly);
5118
5119 void *InsertPos = nullptr;
5120 if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos))
5121 return QualType(PT, 0);
5122
5123 // If the pipe element type isn't canonical, this won't be a canonical type
5124 // either, so fill in the canonical type field.
5125 QualType Canonical;
5126 if (!T.isCanonical()) {
5127 Canonical = getPipeType(T: getCanonicalType(T), ReadOnly);
5128
5129 // Get the new insert position for the node we care about.
5130 PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos);
5131 assert(!NewIP && "Shouldn't be in the map!");
5132 (void)NewIP;
5133 }
5134 auto *New = new (*this, alignof(PipeType)) PipeType(T, Canonical, ReadOnly);
5135 Types.push_back(Elt: New);
5136 PipeTypes.InsertNode(N: New, InsertPos);
5137 return QualType(New, 0);
5138}
5139
5140QualType ASTContext::adjustStringLiteralBaseType(QualType Ty) const {
5141 // OpenCL v1.1 s6.5.3: a string literal is in the constant address space.
5142 return LangOpts.OpenCL ? getAddrSpaceQualType(T: Ty, AddressSpace: LangAS::opencl_constant)
5143 : Ty;
5144}
5145
5146QualType ASTContext::getReadPipeType(QualType T) const {
5147 return getPipeType(T, ReadOnly: true);
5148}
5149
5150QualType ASTContext::getWritePipeType(QualType T) const {
5151 return getPipeType(T, ReadOnly: false);
5152}
5153
5154QualType ASTContext::getBitIntType(bool IsUnsigned, unsigned NumBits) const {
5155 llvm::FoldingSetNodeID ID;
5156 BitIntType::Profile(ID, IsUnsigned, NumBits);
5157
5158 void *InsertPos = nullptr;
5159 if (BitIntType *EIT = BitIntTypes.FindNodeOrInsertPos(ID, InsertPos))
5160 return QualType(EIT, 0);
5161
5162 auto *New = new (*this, alignof(BitIntType)) BitIntType(IsUnsigned, NumBits);
5163 BitIntTypes.InsertNode(N: New, InsertPos);
5164 Types.push_back(Elt: New);
5165 return QualType(New, 0);
5166}
5167
5168QualType ASTContext::getDependentBitIntType(bool IsUnsigned,
5169 Expr *NumBitsExpr) const {
5170 assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent");
5171 llvm::FoldingSetNodeID ID;
5172 DependentBitIntType::Profile(ID, Context: *this, IsUnsigned, NumBitsExpr);
5173
5174 void *InsertPos = nullptr;
5175 if (DependentBitIntType *Existing =
5176 DependentBitIntTypes.FindNodeOrInsertPos(ID, InsertPos))
5177 return QualType(Existing, 0);
5178
5179 auto *New = new (*this, alignof(DependentBitIntType))
5180 DependentBitIntType(IsUnsigned, NumBitsExpr);
5181 DependentBitIntTypes.InsertNode(N: New, InsertPos);
5182
5183 Types.push_back(Elt: New);
5184 return QualType(New, 0);
5185}
5186
5187QualType
5188ASTContext::getPredefinedSugarType(PredefinedSugarType::Kind KD) const {
5189 using Kind = PredefinedSugarType::Kind;
5190
5191 if (auto *Target = PredefinedSugarTypes[llvm::to_underlying(E: KD)];
5192 Target != nullptr)
5193 return QualType(Target, 0);
5194
5195 auto getCanonicalType = [](const ASTContext &Ctx, Kind KDI) -> QualType {
5196 switch (KDI) {
5197 // size_t (C99TC3 6.5.3.4), signed size_t (C++23 5.13.2) and
5198 // ptrdiff_t (C99TC3 6.5.6) Although these types are not built-in, they
5199 // are part of the core language and are widely used. Using
5200 // PredefinedSugarType makes these types as named sugar types rather than
5201 // standard integer types, enabling better hints and diagnostics.
5202 case Kind::SizeT:
5203 return Ctx.getFromTargetType(Type: Ctx.Target->getSizeType());
5204 case Kind::SignedSizeT:
5205 return Ctx.getFromTargetType(Type: Ctx.Target->getSignedSizeType());
5206 case Kind::PtrdiffT:
5207 return Ctx.getFromTargetType(Type: Ctx.Target->getPtrDiffType(AddrSpace: LangAS::Default));
5208 }
5209 llvm_unreachable("unexpected kind");
5210 };
5211 auto *New = new (*this, alignof(PredefinedSugarType))
5212 PredefinedSugarType(KD, &Idents.get(Name: PredefinedSugarType::getName(KD)),
5213 getCanonicalType(*this, static_cast<Kind>(KD)));
5214 Types.push_back(Elt: New);
5215 PredefinedSugarTypes[llvm::to_underlying(E: KD)] = New;
5216 return QualType(New, 0);
5217}
5218
5219QualType ASTContext::getTypeDeclType(ElaboratedTypeKeyword Keyword,
5220 NestedNameSpecifier Qualifier,
5221 const TypeDecl *Decl) const {
5222 if (auto *Tag = dyn_cast<TagDecl>(Val: Decl))
5223 return getTagType(Keyword, Qualifier, TD: Tag,
5224 /*OwnsTag=*/false);
5225 if (auto *Typedef = dyn_cast<TypedefNameDecl>(Val: Decl))
5226 return getTypedefType(Keyword, Qualifier, Decl: Typedef);
5227 if (auto *UD = dyn_cast<UnresolvedUsingTypenameDecl>(Val: Decl))
5228 return getUnresolvedUsingType(Keyword, Qualifier, D: UD);
5229
5230 assert(Keyword == ElaboratedTypeKeyword::None);
5231 assert(!Qualifier);
5232 return QualType(Decl->TypeForDecl, 0);
5233}
5234
5235CanQualType ASTContext::getCanonicalTypeDeclType(const TypeDecl *TD) const {
5236 if (auto *Tag = dyn_cast<TagDecl>(Val: TD))
5237 return getCanonicalTagType(TD: Tag);
5238 if (auto *TN = dyn_cast<TypedefNameDecl>(Val: TD))
5239 return getCanonicalType(T: TN->getUnderlyingType());
5240 if (const auto *UD = dyn_cast<UnresolvedUsingTypenameDecl>(Val: TD))
5241 return getCanonicalUnresolvedUsingType(D: UD);
5242 assert(TD->TypeForDecl);
5243 return TD->TypeForDecl->getCanonicalTypeUnqualified();
5244}
5245
5246QualType ASTContext::getTypeDeclType(const TypeDecl *Decl) const {
5247 if (const auto *TD = dyn_cast<TagDecl>(Val: Decl))
5248 return getCanonicalTagType(TD);
5249 if (const auto *TD = dyn_cast<TypedefNameDecl>(Val: Decl);
5250 isa_and_nonnull<TypedefDecl, TypeAliasDecl>(Val: TD))
5251 return getTypedefType(Keyword: ElaboratedTypeKeyword::None,
5252 /*Qualifier=*/std::nullopt, Decl: TD);
5253 if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Val: Decl))
5254 return getCanonicalUnresolvedUsingType(D: Using);
5255
5256 assert(Decl->TypeForDecl);
5257 return QualType(Decl->TypeForDecl, 0);
5258}
5259
5260/// getTypedefType - Return the unique reference to the type for the
5261/// specified typedef name decl.
5262QualType
5263ASTContext::getTypedefType(ElaboratedTypeKeyword Keyword,
5264 NestedNameSpecifier Qualifier,
5265 const TypedefNameDecl *Decl, QualType UnderlyingType,
5266 std::optional<bool> TypeMatchesDeclOrNone) const {
5267 if (!TypeMatchesDeclOrNone) {
5268 QualType DeclUnderlyingType = Decl->getUnderlyingType();
5269 assert(!DeclUnderlyingType.isNull());
5270 if (UnderlyingType.isNull())
5271 UnderlyingType = DeclUnderlyingType;
5272 else
5273 assert(hasSameType(UnderlyingType, DeclUnderlyingType));
5274 TypeMatchesDeclOrNone = UnderlyingType == DeclUnderlyingType;
5275 } else {
5276 // FIXME: This is a workaround for a serialization cycle: assume the decl
5277 // underlying type is not available; don't touch it.
5278 assert(!UnderlyingType.isNull());
5279 }
5280
5281 if (Keyword == ElaboratedTypeKeyword::None && !Qualifier &&
5282 *TypeMatchesDeclOrNone) {
5283 if (Decl->TypeForDecl)
5284 return QualType(Decl->TypeForDecl, 0);
5285
5286 auto *NewType = new (*this, alignof(TypedefType))
5287 TypedefType(Type::Typedef, Keyword, Qualifier, Decl, UnderlyingType,
5288 !*TypeMatchesDeclOrNone);
5289
5290 Types.push_back(Elt: NewType);
5291 Decl->TypeForDecl = NewType;
5292 return QualType(NewType, 0);
5293 }
5294
5295 llvm::FoldingSetNodeID ID;
5296 TypedefType::Profile(ID, Keyword, Qualifier, Decl,
5297 Underlying: *TypeMatchesDeclOrNone ? QualType() : UnderlyingType);
5298
5299 void *InsertPos = nullptr;
5300 if (FoldingSetPlaceholder<TypedefType> *Placeholder =
5301 TypedefTypes.FindNodeOrInsertPos(ID, InsertPos))
5302 return QualType(Placeholder->getType(), 0);
5303
5304 void *Mem =
5305 Allocate(Size: TypedefType::totalSizeToAlloc<FoldingSetPlaceholder<TypedefType>,
5306 NestedNameSpecifier, QualType>(
5307 Counts: 1, Counts: !!Qualifier, Counts: !*TypeMatchesDeclOrNone),
5308 Align: alignof(TypedefType));
5309 auto *NewType =
5310 new (Mem) TypedefType(Type::Typedef, Keyword, Qualifier, Decl,
5311 UnderlyingType, !*TypeMatchesDeclOrNone);
5312 auto *Placeholder = new (NewType->getFoldingSetPlaceholder())
5313 FoldingSetPlaceholder<TypedefType>();
5314 TypedefTypes.InsertNode(N: Placeholder, InsertPos);
5315 Types.push_back(Elt: NewType);
5316 return QualType(NewType, 0);
5317}
5318
5319QualType ASTContext::getUsingType(ElaboratedTypeKeyword Keyword,
5320 NestedNameSpecifier Qualifier,
5321 const UsingShadowDecl *D,
5322 QualType UnderlyingType) const {
5323 // FIXME: This is expensive to compute every time!
5324 if (UnderlyingType.isNull()) {
5325 const auto *UD = cast<UsingDecl>(Val: D->getIntroducer());
5326 UnderlyingType =
5327 getTypeDeclType(Keyword: UD->hasTypename() ? ElaboratedTypeKeyword::Typename
5328 : ElaboratedTypeKeyword::None,
5329 Qualifier: UD->getQualifier(), Decl: cast<TypeDecl>(Val: D->getTargetDecl()));
5330 }
5331
5332 llvm::FoldingSetNodeID ID;
5333 UsingType::Profile(ID, Keyword, Qualifier, D, UnderlyingType);
5334
5335 void *InsertPos = nullptr;
5336 if (const UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos))
5337 return QualType(T, 0);
5338
5339 assert(!UnderlyingType.hasLocalQualifiers());
5340
5341 assert(
5342 hasSameType(getCanonicalTypeDeclType(cast<TypeDecl>(D->getTargetDecl())),
5343 UnderlyingType));
5344
5345 void *Mem =
5346 Allocate(Size: UsingType::totalSizeToAlloc<NestedNameSpecifier>(Counts: !!Qualifier),
5347 Align: alignof(UsingType));
5348 UsingType *T = new (Mem) UsingType(Keyword, Qualifier, D, UnderlyingType);
5349 Types.push_back(Elt: T);
5350 UsingTypes.InsertNode(N: T, InsertPos);
5351 return QualType(T, 0);
5352}
5353
5354TagType *ASTContext::getTagTypeInternal(ElaboratedTypeKeyword Keyword,
5355 NestedNameSpecifier Qualifier,
5356 const TagDecl *TD, bool OwnsTag,
5357 bool IsInjected,
5358 const Type *CanonicalType,
5359 bool WithFoldingSetNode) const {
5360 auto [TC, Size] = [&] {
5361 switch (TD->getDeclKind()) {
5362 case Decl::Enum:
5363 static_assert(alignof(EnumType) == alignof(TagType));
5364 return std::make_tuple(args: Type::Enum, args: sizeof(EnumType));
5365 case Decl::ClassTemplatePartialSpecialization:
5366 case Decl::ClassTemplateSpecialization:
5367 case Decl::CXXRecord:
5368 static_assert(alignof(RecordType) == alignof(TagType));
5369 static_assert(alignof(InjectedClassNameType) == alignof(TagType));
5370 if (cast<CXXRecordDecl>(Val: TD)->hasInjectedClassType())
5371 return std::make_tuple(args: Type::InjectedClassName,
5372 args: sizeof(InjectedClassNameType));
5373 [[fallthrough]];
5374 case Decl::Record:
5375 return std::make_tuple(args: Type::Record, args: sizeof(RecordType));
5376 default:
5377 llvm_unreachable("unexpected decl kind");
5378 }
5379 }();
5380
5381 if (Qualifier) {
5382 static_assert(alignof(NestedNameSpecifier) <= alignof(TagType));
5383 Size = llvm::alignTo(Value: Size, Align: alignof(NestedNameSpecifier)) +
5384 sizeof(NestedNameSpecifier);
5385 }
5386 void *Mem;
5387 if (WithFoldingSetNode) {
5388 // FIXME: It would be more profitable to tail allocate the folding set node
5389 // from the type, instead of the other way around, due to the greater
5390 // alignment requirements of the type. But this makes it harder to deal with
5391 // the different type node sizes. This would require either uniquing from
5392 // different folding sets, or having the folding setaccept a
5393 // contextual parameter which is not fixed at construction.
5394 Mem = Allocate(
5395 Size: sizeof(TagTypeFoldingSetPlaceholder) +
5396 TagTypeFoldingSetPlaceholder::getOffset() + Size,
5397 Align: std::max(a: alignof(TagTypeFoldingSetPlaceholder), b: alignof(TagType)));
5398 auto *T = new (Mem) TagTypeFoldingSetPlaceholder();
5399 Mem = T->getTagType();
5400 } else {
5401 Mem = Allocate(Size, Align: alignof(TagType));
5402 }
5403
5404 auto *T = [&, TC = TC]() -> TagType * {
5405 switch (TC) {
5406 case Type::Enum: {
5407 assert(isa<EnumDecl>(TD));
5408 auto *T = new (Mem) EnumType(TC, Keyword, Qualifier, TD, OwnsTag,
5409 IsInjected, CanonicalType);
5410 assert(reinterpret_cast<void *>(T) ==
5411 reinterpret_cast<void *>(static_cast<TagType *>(T)) &&
5412 "TagType must be the first base of EnumType");
5413 return T;
5414 }
5415 case Type::Record: {
5416 assert(isa<RecordDecl>(TD));
5417 auto *T = new (Mem) RecordType(TC, Keyword, Qualifier, TD, OwnsTag,
5418 IsInjected, CanonicalType);
5419 assert(reinterpret_cast<void *>(T) ==
5420 reinterpret_cast<void *>(static_cast<TagType *>(T)) &&
5421 "TagType must be the first base of RecordType");
5422 return T;
5423 }
5424 case Type::InjectedClassName: {
5425 auto *T = new (Mem) InjectedClassNameType(Keyword, Qualifier, TD,
5426 IsInjected, CanonicalType);
5427 assert(reinterpret_cast<void *>(T) ==
5428 reinterpret_cast<void *>(static_cast<TagType *>(T)) &&
5429 "TagType must be the first base of InjectedClassNameType");
5430 return T;
5431 }
5432 default:
5433 llvm_unreachable("unexpected type class");
5434 }
5435 }();
5436 assert(T->getKeyword() == Keyword);
5437 assert(T->getQualifier() == Qualifier);
5438 assert(T->getDecl() == TD);
5439 assert(T->isInjected() == IsInjected);
5440 assert(T->isTagOwned() == OwnsTag);
5441 assert((T->isCanonicalUnqualified()
5442 ? QualType()
5443 : T->getCanonicalTypeInternal()) == QualType(CanonicalType, 0));
5444 Types.push_back(Elt: T);
5445 return T;
5446}
5447
5448static const TagDecl *getNonInjectedClassName(const TagDecl *TD) {
5449 if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: TD);
5450 RD && RD->isInjectedClassName())
5451 return cast<TagDecl>(Val: RD->getDeclContext());
5452 return TD;
5453}
5454
5455CanQualType ASTContext::getCanonicalTagType(const TagDecl *TD) const {
5456 TD = ::getNonInjectedClassName(TD)->getCanonicalDecl();
5457 if (TD->TypeForDecl)
5458 return TD->TypeForDecl->getCanonicalTypeUnqualified();
5459
5460 const Type *CanonicalType = getTagTypeInternal(
5461 Keyword: ElaboratedTypeKeyword::None,
5462 /*Qualifier=*/std::nullopt, TD,
5463 /*OwnsTag=*/false, /*IsInjected=*/false, /*CanonicalType=*/nullptr,
5464 /*WithFoldingSetNode=*/false);
5465 TD->TypeForDecl = CanonicalType;
5466 return CanQualType::CreateUnsafe(Other: QualType(CanonicalType, 0));
5467}
5468
5469QualType ASTContext::getTagType(ElaboratedTypeKeyword Keyword,
5470 NestedNameSpecifier Qualifier,
5471 const TagDecl *TD, bool OwnsTag) const {
5472
5473 const TagDecl *NonInjectedTD = ::getNonInjectedClassName(TD);
5474 bool IsInjected = TD != NonInjectedTD;
5475
5476 ElaboratedTypeKeyword PreferredKeyword =
5477 getLangOpts().CPlusPlus ? ElaboratedTypeKeyword::None
5478 : KeywordHelpers::getKeywordForTagTypeKind(
5479 Tag: NonInjectedTD->getTagKind());
5480
5481 if (Keyword == PreferredKeyword && !Qualifier && !OwnsTag) {
5482 if (const Type *T = TD->TypeForDecl; T && !T->isCanonicalUnqualified())
5483 return QualType(T, 0);
5484
5485 const Type *CanonicalType = getCanonicalTagType(TD: NonInjectedTD).getTypePtr();
5486 const Type *T =
5487 getTagTypeInternal(Keyword,
5488 /*Qualifier=*/std::nullopt, TD: NonInjectedTD,
5489 /*OwnsTag=*/false, IsInjected, CanonicalType,
5490 /*WithFoldingSetNode=*/false);
5491 TD->TypeForDecl = T;
5492 return QualType(T, 0);
5493 }
5494
5495 llvm::FoldingSetNodeID ID;
5496 TagTypeFoldingSetPlaceholder::Profile(ID, Keyword, Qualifier, Tag: NonInjectedTD,
5497 OwnsTag, IsInjected);
5498
5499 void *InsertPos = nullptr;
5500 if (TagTypeFoldingSetPlaceholder *T =
5501 TagTypes.FindNodeOrInsertPos(ID, InsertPos))
5502 return QualType(T->getTagType(), 0);
5503
5504 const Type *CanonicalType = getCanonicalTagType(TD: NonInjectedTD).getTypePtr();
5505 TagType *T =
5506 getTagTypeInternal(Keyword, Qualifier, TD: NonInjectedTD, OwnsTag, IsInjected,
5507 CanonicalType, /*WithFoldingSetNode=*/true);
5508 TagTypes.InsertNode(N: TagTypeFoldingSetPlaceholder::fromTagType(T), InsertPos);
5509 return QualType(T, 0);
5510}
5511
5512bool ASTContext::computeBestEnumTypes(bool IsPacked, unsigned NumNegativeBits,
5513 unsigned NumPositiveBits,
5514 QualType &BestType,
5515 QualType &BestPromotionType) {
5516 unsigned IntWidth = Target->getIntWidth();
5517 unsigned CharWidth = Target->getCharWidth();
5518 unsigned ShortWidth = Target->getShortWidth();
5519 bool EnumTooLarge = false;
5520 unsigned BestWidth;
5521 if (NumNegativeBits) {
5522 // If there is a negative value, figure out the smallest integer type (of
5523 // int/long/longlong) that fits.
5524 // If it's packed, check also if it fits a char or a short.
5525 if (IsPacked && NumNegativeBits <= CharWidth &&
5526 NumPositiveBits < CharWidth) {
5527 BestType = SignedCharTy;
5528 BestWidth = CharWidth;
5529 } else if (IsPacked && NumNegativeBits <= ShortWidth &&
5530 NumPositiveBits < ShortWidth) {
5531 BestType = ShortTy;
5532 BestWidth = ShortWidth;
5533 } else if (NumNegativeBits <= IntWidth && NumPositiveBits < IntWidth) {
5534 BestType = IntTy;
5535 BestWidth = IntWidth;
5536 } else {
5537 BestWidth = Target->getLongWidth();
5538
5539 if (NumNegativeBits <= BestWidth && NumPositiveBits < BestWidth) {
5540 BestType = LongTy;
5541 } else {
5542 BestWidth = Target->getLongLongWidth();
5543
5544 if (NumNegativeBits > BestWidth || NumPositiveBits >= BestWidth)
5545 EnumTooLarge = true;
5546 BestType = LongLongTy;
5547 }
5548 }
5549 BestPromotionType = (BestWidth <= IntWidth ? IntTy : BestType);
5550 } else {
5551 // If there is no negative value, figure out the smallest type that fits
5552 // all of the enumerator values.
5553 // If it's packed, check also if it fits a char or a short.
5554 if (IsPacked && NumPositiveBits <= CharWidth) {
5555 BestType = UnsignedCharTy;
5556 BestPromotionType = IntTy;
5557 BestWidth = CharWidth;
5558 } else if (IsPacked && NumPositiveBits <= ShortWidth) {
5559 BestType = UnsignedShortTy;
5560 BestPromotionType = IntTy;
5561 BestWidth = ShortWidth;
5562 } else if (NumPositiveBits <= IntWidth) {
5563 BestType = UnsignedIntTy;
5564 BestWidth = IntWidth;
5565 BestPromotionType = (NumPositiveBits == BestWidth || !LangOpts.CPlusPlus)
5566 ? UnsignedIntTy
5567 : IntTy;
5568 } else if (NumPositiveBits <= (BestWidth = Target->getLongWidth())) {
5569 BestType = UnsignedLongTy;
5570 BestPromotionType = (NumPositiveBits == BestWidth || !LangOpts.CPlusPlus)
5571 ? UnsignedLongTy
5572 : LongTy;
5573 } else {
5574 BestWidth = Target->getLongLongWidth();
5575 if (NumPositiveBits > BestWidth) {
5576 // This can happen with bit-precise integer types, but those are not
5577 // allowed as the type for an enumerator per C23 6.7.2.2p4 and p12.
5578 // FIXME: GCC uses __int128_t and __uint128_t for cases that fit within
5579 // a 128-bit integer, we should consider doing the same.
5580 EnumTooLarge = true;
5581 }
5582 BestType = UnsignedLongLongTy;
5583 BestPromotionType = (NumPositiveBits == BestWidth || !LangOpts.CPlusPlus)
5584 ? UnsignedLongLongTy
5585 : LongLongTy;
5586 }
5587 }
5588 return EnumTooLarge;
5589}
5590
5591bool ASTContext::isRepresentableIntegerValue(llvm::APSInt &Value, QualType T) {
5592 assert((T->isIntegralType(*this) || T->isEnumeralType()) &&
5593 "Integral type required!");
5594 unsigned BitWidth = getIntWidth(T);
5595
5596 if (Value.isUnsigned() || Value.isNonNegative()) {
5597 if (T->isSignedIntegerOrEnumerationType())
5598 --BitWidth;
5599 return Value.getActiveBits() <= BitWidth;
5600 }
5601 return Value.getSignificantBits() <= BitWidth;
5602}
5603
5604UnresolvedUsingType *ASTContext::getUnresolvedUsingTypeInternal(
5605 ElaboratedTypeKeyword Keyword, NestedNameSpecifier Qualifier,
5606 const UnresolvedUsingTypenameDecl *D, void *InsertPos,
5607 const Type *CanonicalType) const {
5608 void *Mem = Allocate(
5609 Size: UnresolvedUsingType::totalSizeToAlloc<
5610 FoldingSetPlaceholder<UnresolvedUsingType>, NestedNameSpecifier>(
5611 Counts: !!InsertPos, Counts: !!Qualifier),
5612 Align: alignof(UnresolvedUsingType));
5613 auto *T = new (Mem) UnresolvedUsingType(Keyword, Qualifier, D, CanonicalType);
5614 if (InsertPos) {
5615 auto *Placeholder = new (T->getFoldingSetPlaceholder())
5616 FoldingSetPlaceholder<TypedefType>();
5617 TypedefTypes.InsertNode(N: Placeholder, InsertPos);
5618 }
5619 Types.push_back(Elt: T);
5620 return T;
5621}
5622
5623CanQualType ASTContext::getCanonicalUnresolvedUsingType(
5624 const UnresolvedUsingTypenameDecl *D) const {
5625 D = D->getCanonicalDecl();
5626 if (D->TypeForDecl)
5627 return D->TypeForDecl->getCanonicalTypeUnqualified();
5628
5629 const Type *CanonicalType = getUnresolvedUsingTypeInternal(
5630 Keyword: ElaboratedTypeKeyword::None,
5631 /*Qualifier=*/std::nullopt, D,
5632 /*InsertPos=*/nullptr, /*CanonicalType=*/nullptr);
5633 D->TypeForDecl = CanonicalType;
5634 return CanQualType::CreateUnsafe(Other: QualType(CanonicalType, 0));
5635}
5636
5637QualType
5638ASTContext::getUnresolvedUsingType(ElaboratedTypeKeyword Keyword,
5639 NestedNameSpecifier Qualifier,
5640 const UnresolvedUsingTypenameDecl *D) const {
5641 if (Keyword == ElaboratedTypeKeyword::None && !Qualifier) {
5642 if (const Type *T = D->TypeForDecl; T && !T->isCanonicalUnqualified())
5643 return QualType(T, 0);
5644
5645 const Type *CanonicalType = getCanonicalUnresolvedUsingType(D).getTypePtr();
5646 const Type *T =
5647 getUnresolvedUsingTypeInternal(Keyword: ElaboratedTypeKeyword::None,
5648 /*Qualifier=*/std::nullopt, D,
5649 /*InsertPos=*/nullptr, CanonicalType);
5650 D->TypeForDecl = T;
5651 return QualType(T, 0);
5652 }
5653
5654 llvm::FoldingSetNodeID ID;
5655 UnresolvedUsingType::Profile(ID, Keyword, Qualifier, D);
5656
5657 void *InsertPos = nullptr;
5658 if (FoldingSetPlaceholder<UnresolvedUsingType> *Placeholder =
5659 UnresolvedUsingTypes.FindNodeOrInsertPos(ID, InsertPos))
5660 return QualType(Placeholder->getType(), 0);
5661 assert(InsertPos);
5662
5663 const Type *CanonicalType = getCanonicalUnresolvedUsingType(D).getTypePtr();
5664 const Type *T = getUnresolvedUsingTypeInternal(Keyword, Qualifier, D,
5665 InsertPos, CanonicalType);
5666 return QualType(T, 0);
5667}
5668
5669QualType ASTContext::getAttributedType(attr::Kind attrKind,
5670 QualType modifiedType,
5671 QualType equivalentType,
5672 const Attr *attr) const {
5673 llvm::FoldingSetNodeID id;
5674 AttributedType::Profile(ID&: id, attrKind, modified: modifiedType, equivalent: equivalentType, attr);
5675
5676 void *insertPos = nullptr;
5677 AttributedType *type = AttributedTypes.FindNodeOrInsertPos(ID: id, InsertPos&: insertPos);
5678 if (type) return QualType(type, 0);
5679
5680 assert(!attr || attr->getKind() == attrKind);
5681
5682 QualType canon = getCanonicalType(T: equivalentType);
5683 type = new (*this, alignof(AttributedType))
5684 AttributedType(canon, attrKind, attr, modifiedType, equivalentType);
5685
5686 Types.push_back(Elt: type);
5687 AttributedTypes.InsertNode(N: type, InsertPos: insertPos);
5688
5689 return QualType(type, 0);
5690}
5691
5692QualType ASTContext::getAttributedType(const Attr *attr, QualType modifiedType,
5693 QualType equivalentType) const {
5694 return getAttributedType(attrKind: attr->getKind(), modifiedType, equivalentType, attr);
5695}
5696
5697QualType ASTContext::getAttributedType(NullabilityKind nullability,
5698 QualType modifiedType,
5699 QualType equivalentType) {
5700 switch (nullability) {
5701 case NullabilityKind::NonNull:
5702 return getAttributedType(attrKind: attr::TypeNonNull, modifiedType, equivalentType);
5703
5704 case NullabilityKind::Nullable:
5705 return getAttributedType(attrKind: attr::TypeNullable, modifiedType, equivalentType);
5706
5707 case NullabilityKind::NullableResult:
5708 return getAttributedType(attrKind: attr::TypeNullableResult, modifiedType,
5709 equivalentType);
5710
5711 case NullabilityKind::Unspecified:
5712 return getAttributedType(attrKind: attr::TypeNullUnspecified, modifiedType,
5713 equivalentType);
5714 }
5715
5716 llvm_unreachable("Unknown nullability kind");
5717}
5718
5719QualType ASTContext::getBTFTagAttributedType(const BTFTypeTagAttr *BTFAttr,
5720 QualType Wrapped) const {
5721 llvm::FoldingSetNodeID ID;
5722 BTFTagAttributedType::Profile(ID, Wrapped, BTFAttr);
5723
5724 void *InsertPos = nullptr;
5725 BTFTagAttributedType *Ty =
5726 BTFTagAttributedTypes.FindNodeOrInsertPos(ID, InsertPos);
5727 if (Ty)
5728 return QualType(Ty, 0);
5729
5730 QualType Canon = getCanonicalType(T: Wrapped);
5731 Ty = new (*this, alignof(BTFTagAttributedType))
5732 BTFTagAttributedType(Canon, Wrapped, BTFAttr);
5733
5734 Types.push_back(Elt: Ty);
5735 BTFTagAttributedTypes.InsertNode(N: Ty, InsertPos);
5736
5737 return QualType(Ty, 0);
5738}
5739
5740QualType ASTContext::getOverflowBehaviorType(const OverflowBehaviorAttr *Attr,
5741 QualType Underlying) const {
5742 const IdentifierInfo *II = Attr->getBehaviorKind();
5743 StringRef IdentName = II->getName();
5744 OverflowBehaviorType::OverflowBehaviorKind Kind;
5745 if (IdentName == "wrap") {
5746 Kind = OverflowBehaviorType::OverflowBehaviorKind::Wrap;
5747 } else if (IdentName == "trap") {
5748 Kind = OverflowBehaviorType::OverflowBehaviorKind::Trap;
5749 } else {
5750 return Underlying;
5751 }
5752
5753 return getOverflowBehaviorType(Kind, Wrapped: Underlying);
5754}
5755
5756QualType ASTContext::getOverflowBehaviorType(
5757 OverflowBehaviorType::OverflowBehaviorKind Kind,
5758 QualType Underlying) const {
5759 assert(!Underlying->isOverflowBehaviorType() &&
5760 "Cannot have underlying types that are themselves OBTs");
5761 llvm::FoldingSetNodeID ID;
5762 OverflowBehaviorType::Profile(ID, Underlying, Kind);
5763 void *InsertPos = nullptr;
5764
5765 if (OverflowBehaviorType *OBT =
5766 OverflowBehaviorTypes.FindNodeOrInsertPos(ID, InsertPos)) {
5767 return QualType(OBT, 0);
5768 }
5769
5770 QualType Canonical;
5771 if (!Underlying.isCanonical() || Underlying.hasLocalQualifiers()) {
5772 SplitQualType canonSplit = getCanonicalType(T: Underlying).split();
5773 Canonical = getOverflowBehaviorType(Kind, Underlying: QualType(canonSplit.Ty, 0));
5774 Canonical = getQualifiedType(T: Canonical, Qs: canonSplit.Quals);
5775 assert(!OverflowBehaviorTypes.FindNodeOrInsertPos(ID, InsertPos) &&
5776 "Shouldn't be in the map");
5777 }
5778
5779 OverflowBehaviorType *Ty = new (*this, alignof(OverflowBehaviorType))
5780 OverflowBehaviorType(Canonical, Underlying, Kind);
5781
5782 Types.push_back(Elt: Ty);
5783 OverflowBehaviorTypes.InsertNode(N: Ty, InsertPos);
5784 return QualType(Ty, 0);
5785}
5786
5787QualType ASTContext::getHLSLAttributedResourceType(
5788 QualType Wrapped, QualType Contained,
5789 const HLSLAttributedResourceType::Attributes &Attrs) {
5790
5791 llvm::FoldingSetNodeID ID;
5792 HLSLAttributedResourceType::Profile(ID, Wrapped, Contained, Attrs);
5793
5794 void *InsertPos = nullptr;
5795 HLSLAttributedResourceType *Ty =
5796 HLSLAttributedResourceTypes.FindNodeOrInsertPos(ID, InsertPos);
5797 if (Ty)
5798 return QualType(Ty, 0);
5799
5800 Ty = new (*this, alignof(HLSLAttributedResourceType))
5801 HLSLAttributedResourceType(Wrapped, Contained, Attrs);
5802
5803 Types.push_back(Elt: Ty);
5804 HLSLAttributedResourceTypes.InsertNode(N: Ty, InsertPos);
5805
5806 return QualType(Ty, 0);
5807}
5808
5809QualType ASTContext::getHLSLInlineSpirvType(uint32_t Opcode, uint32_t Size,
5810 uint32_t Alignment,
5811 ArrayRef<SpirvOperand> Operands) {
5812 llvm::FoldingSetNodeID ID;
5813 HLSLInlineSpirvType::Profile(ID, Opcode, Size, Alignment, Operands);
5814
5815 void *InsertPos = nullptr;
5816 HLSLInlineSpirvType *Ty =
5817 HLSLInlineSpirvTypes.FindNodeOrInsertPos(ID, InsertPos);
5818 if (Ty)
5819 return QualType(Ty, 0);
5820
5821 void *Mem = Allocate(
5822 Size: HLSLInlineSpirvType::totalSizeToAlloc<SpirvOperand>(Counts: Operands.size()),
5823 Align: alignof(HLSLInlineSpirvType));
5824
5825 Ty = new (Mem) HLSLInlineSpirvType(Opcode, Size, Alignment, Operands);
5826
5827 Types.push_back(Elt: Ty);
5828 HLSLInlineSpirvTypes.InsertNode(N: Ty, InsertPos);
5829
5830 return QualType(Ty, 0);
5831}
5832
5833/// Retrieve a substitution-result type.
5834QualType ASTContext::getSubstTemplateTypeParmType(QualType Replacement,
5835 Decl *AssociatedDecl,
5836 unsigned Index,
5837 UnsignedOrNone PackIndex,
5838 bool Final) const {
5839 llvm::FoldingSetNodeID ID;
5840 SubstTemplateTypeParmType::Profile(ID, Replacement, AssociatedDecl, Index,
5841 PackIndex, Final);
5842 void *InsertPos = nullptr;
5843 SubstTemplateTypeParmType *SubstParm =
5844 SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
5845
5846 if (!SubstParm) {
5847 void *Mem = Allocate(Size: SubstTemplateTypeParmType::totalSizeToAlloc<QualType>(
5848 Counts: !Replacement.isCanonical()),
5849 Align: alignof(SubstTemplateTypeParmType));
5850 SubstParm = new (Mem) SubstTemplateTypeParmType(Replacement, AssociatedDecl,
5851 Index, PackIndex, Final);
5852 Types.push_back(Elt: SubstParm);
5853 SubstTemplateTypeParmTypes.InsertNode(N: SubstParm, InsertPos);
5854 }
5855
5856 return QualType(SubstParm, 0);
5857}
5858
5859QualType
5860ASTContext::getSubstTemplateTypeParmPackType(Decl *AssociatedDecl,
5861 unsigned Index, bool Final,
5862 const TemplateArgument &ArgPack) {
5863#ifndef NDEBUG
5864 for (const auto &P : ArgPack.pack_elements())
5865 assert(P.getKind() == TemplateArgument::Type && "Pack contains a non-type");
5866#endif
5867
5868 llvm::FoldingSetNodeID ID;
5869 SubstTemplateTypeParmPackType::Profile(ID, AssociatedDecl, Index, Final,
5870 ArgPack);
5871 void *InsertPos = nullptr;
5872 if (SubstTemplateTypeParmPackType *SubstParm =
5873 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos))
5874 return QualType(SubstParm, 0);
5875
5876 QualType Canon;
5877 {
5878 TemplateArgument CanonArgPack = getCanonicalTemplateArgument(Arg: ArgPack);
5879 if (!AssociatedDecl->isCanonicalDecl() ||
5880 !CanonArgPack.structurallyEquals(Other: ArgPack)) {
5881 Canon = getSubstTemplateTypeParmPackType(
5882 AssociatedDecl: AssociatedDecl->getCanonicalDecl(), Index, Final, ArgPack: CanonArgPack);
5883 [[maybe_unused]] const auto *Nothing =
5884 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos);
5885 assert(!Nothing);
5886 }
5887 }
5888
5889 auto *SubstParm = new (*this, alignof(SubstTemplateTypeParmPackType))
5890 SubstTemplateTypeParmPackType(Canon, AssociatedDecl, Index, Final,
5891 ArgPack);
5892 Types.push_back(Elt: SubstParm);
5893 SubstTemplateTypeParmPackTypes.InsertNode(N: SubstParm, InsertPos);
5894 return QualType(SubstParm, 0);
5895}
5896
5897QualType
5898ASTContext::getSubstBuiltinTemplatePack(const TemplateArgument &ArgPack) {
5899 assert(llvm::all_of(ArgPack.pack_elements(),
5900 [](const auto &P) {
5901 return P.getKind() == TemplateArgument::Type;
5902 }) &&
5903 "Pack contains a non-type");
5904
5905 llvm::FoldingSetNodeID ID;
5906 SubstBuiltinTemplatePackType::Profile(ID, ArgPack);
5907
5908 void *InsertPos = nullptr;
5909 if (auto *T =
5910 SubstBuiltinTemplatePackTypes.FindNodeOrInsertPos(ID, InsertPos))
5911 return QualType(T, 0);
5912
5913 QualType Canon;
5914 TemplateArgument CanonArgPack = getCanonicalTemplateArgument(Arg: ArgPack);
5915 if (!CanonArgPack.structurallyEquals(Other: ArgPack)) {
5916 Canon = getSubstBuiltinTemplatePack(ArgPack: CanonArgPack);
5917 // Refresh InsertPos, in case the recursive call above caused rehashing,
5918 // which would invalidate the bucket pointer.
5919 [[maybe_unused]] const auto *Nothing =
5920 SubstBuiltinTemplatePackTypes.FindNodeOrInsertPos(ID, InsertPos);
5921 assert(!Nothing);
5922 }
5923
5924 auto *PackType = new (*this, alignof(SubstBuiltinTemplatePackType))
5925 SubstBuiltinTemplatePackType(Canon, ArgPack);
5926 Types.push_back(Elt: PackType);
5927 SubstBuiltinTemplatePackTypes.InsertNode(N: PackType, InsertPos);
5928 return QualType(PackType, 0);
5929}
5930
5931/// Retrieve the template type parameter type for a template
5932/// parameter or parameter pack with the given depth, index, and (optionally)
5933/// name.
5934QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index,
5935 bool ParameterPack,
5936 TemplateTypeParmDecl *TTPDecl) const {
5937 llvm::FoldingSetNodeID ID;
5938 TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl);
5939 void *InsertPos = nullptr;
5940 TemplateTypeParmType *TypeParm
5941 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
5942
5943 if (TypeParm)
5944 return QualType(TypeParm, 0);
5945
5946 if (TTPDecl) {
5947 QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack);
5948 TypeParm = new (*this, alignof(TemplateTypeParmType))
5949 TemplateTypeParmType(Depth, Index, ParameterPack, TTPDecl, Canon);
5950
5951 TemplateTypeParmType *TypeCheck
5952 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
5953 assert(!TypeCheck && "Template type parameter canonical type broken");
5954 (void)TypeCheck;
5955 } else
5956 TypeParm = new (*this, alignof(TemplateTypeParmType)) TemplateTypeParmType(
5957 Depth, Index, ParameterPack, /*TTPDecl=*/nullptr, /*Canon=*/QualType());
5958
5959 Types.push_back(Elt: TypeParm);
5960 TemplateTypeParmTypes.InsertNode(N: TypeParm, InsertPos);
5961
5962 return QualType(TypeParm, 0);
5963}
5964
5965static ElaboratedTypeKeyword
5966getCanonicalElaboratedTypeKeyword(ElaboratedTypeKeyword Keyword) {
5967 switch (Keyword) {
5968 // These are just themselves.
5969 case ElaboratedTypeKeyword::None:
5970 case ElaboratedTypeKeyword::Struct:
5971 case ElaboratedTypeKeyword::Union:
5972 case ElaboratedTypeKeyword::Enum:
5973 case ElaboratedTypeKeyword::Interface:
5974 return Keyword;
5975
5976 // These are equivalent.
5977 case ElaboratedTypeKeyword::Typename:
5978 return ElaboratedTypeKeyword::None;
5979
5980 // These are functionally equivalent, so relying on their equivalence is
5981 // IFNDR. By making them equivalent, we disallow overloading, which at least
5982 // can produce a diagnostic.
5983 case ElaboratedTypeKeyword::Class:
5984 return ElaboratedTypeKeyword::Struct;
5985 }
5986 llvm_unreachable("unexpected keyword kind");
5987}
5988
5989TypeSourceInfo *ASTContext::getTemplateSpecializationTypeInfo(
5990 ElaboratedTypeKeyword Keyword, SourceLocation ElaboratedKeywordLoc,
5991 NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKeywordLoc,
5992 TemplateName Name, SourceLocation NameLoc,
5993 const TemplateArgumentListInfo &SpecifiedArgs,
5994 ArrayRef<TemplateArgument> CanonicalArgs, QualType Underlying) const {
5995 QualType TST = getTemplateSpecializationType(
5996 Keyword, T: Name, SpecifiedArgs: SpecifiedArgs.arguments(), CanonicalArgs, Canon: Underlying);
5997
5998 TypeSourceInfo *TSI = CreateTypeSourceInfo(T: TST);
5999 TSI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>().set(
6000 ElaboratedKeywordLoc, QualifierLoc, TemplateKeywordLoc, NameLoc,
6001 TAL: SpecifiedArgs);
6002 return TSI;
6003}
6004
6005QualType ASTContext::getTemplateSpecializationType(
6006 ElaboratedTypeKeyword Keyword, TemplateName Template,
6007 ArrayRef<TemplateArgumentLoc> SpecifiedArgs,
6008 ArrayRef<TemplateArgument> CanonicalArgs, QualType Underlying) const {
6009 SmallVector<TemplateArgument, 4> SpecifiedArgVec;
6010 SpecifiedArgVec.reserve(N: SpecifiedArgs.size());
6011 for (const TemplateArgumentLoc &Arg : SpecifiedArgs)
6012 SpecifiedArgVec.push_back(Elt: Arg.getArgument());
6013
6014 return getTemplateSpecializationType(Keyword, T: Template, SpecifiedArgs: SpecifiedArgVec,
6015 CanonicalArgs, Underlying);
6016}
6017
6018[[maybe_unused]] static bool
6019hasAnyPackExpansions(ArrayRef<TemplateArgument> Args) {
6020 for (const TemplateArgument &Arg : Args)
6021 if (Arg.isPackExpansion())
6022 return true;
6023 return false;
6024}
6025
6026QualType ASTContext::getCanonicalTemplateSpecializationType(
6027 ElaboratedTypeKeyword Keyword, TemplateName Template,
6028 ArrayRef<TemplateArgument> Args) const {
6029 assert(Template ==
6030 getCanonicalTemplateName(Template, /*IgnoreDeduced=*/true));
6031 assert((Keyword == ElaboratedTypeKeyword::None ||
6032 Template.getAsDependentTemplateName()));
6033#ifndef NDEBUG
6034 for (const auto &Arg : Args)
6035 assert(Arg.structurallyEquals(getCanonicalTemplateArgument(Arg)));
6036#endif
6037
6038 llvm::FoldingSetNodeID ID;
6039 TemplateSpecializationType::Profile(ID, Keyword, T: Template, Args, Underlying: QualType(),
6040 Context: *this);
6041 void *InsertPos = nullptr;
6042 if (auto *T = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos))
6043 return QualType(T, 0);
6044
6045 void *Mem = Allocate(Size: sizeof(TemplateSpecializationType) +
6046 sizeof(TemplateArgument) * Args.size(),
6047 Align: alignof(TemplateSpecializationType));
6048 auto *Spec =
6049 new (Mem) TemplateSpecializationType(Keyword, Template,
6050 /*IsAlias=*/false, Args, QualType());
6051 assert(Spec->isDependentType() &&
6052 "canonical template specialization must be dependent");
6053 Types.push_back(Elt: Spec);
6054 TemplateSpecializationTypes.InsertNode(N: Spec, InsertPos);
6055 return QualType(Spec, 0);
6056}
6057
6058QualType ASTContext::getTemplateSpecializationType(
6059 ElaboratedTypeKeyword Keyword, TemplateName Template,
6060 ArrayRef<TemplateArgument> SpecifiedArgs,
6061 ArrayRef<TemplateArgument> CanonicalArgs, QualType Underlying) const {
6062 const auto *TD = Template.getAsTemplateDecl(/*IgnoreDeduced=*/true);
6063 bool IsTypeAlias = TD && TD->isTypeAlias();
6064 if (Underlying.isNull()) {
6065 TemplateName CanonTemplate =
6066 getCanonicalTemplateName(Name: Template, /*IgnoreDeduced=*/true);
6067 ElaboratedTypeKeyword CanonKeyword =
6068 CanonTemplate.getAsDependentTemplateName()
6069 ? getCanonicalElaboratedTypeKeyword(Keyword)
6070 : ElaboratedTypeKeyword::None;
6071 bool NonCanonical = Template != CanonTemplate || Keyword != CanonKeyword;
6072 SmallVector<TemplateArgument, 4> CanonArgsVec;
6073 if (CanonicalArgs.empty()) {
6074 CanonArgsVec = SmallVector<TemplateArgument, 4>(SpecifiedArgs);
6075 NonCanonical |= canonicalizeTemplateArguments(Args: CanonArgsVec);
6076 CanonicalArgs = CanonArgsVec;
6077 } else {
6078 NonCanonical |= !llvm::equal(
6079 LRange&: SpecifiedArgs, RRange&: CanonicalArgs,
6080 P: [](const TemplateArgument &A, const TemplateArgument &B) {
6081 return A.structurallyEquals(Other: B);
6082 });
6083 }
6084
6085 // We can get here with an alias template when the specialization
6086 // contains a pack expansion that does not match up with a parameter
6087 // pack, or a builtin template which cannot be resolved due to dependency.
6088 assert((!isa_and_nonnull<TypeAliasTemplateDecl>(TD) ||
6089 hasAnyPackExpansions(CanonicalArgs)) &&
6090 "Caller must compute aliased type");
6091 IsTypeAlias = false;
6092
6093 Underlying = getCanonicalTemplateSpecializationType(
6094 Keyword: CanonKeyword, Template: CanonTemplate, Args: CanonicalArgs);
6095 if (!NonCanonical)
6096 return Underlying;
6097 }
6098 void *Mem = Allocate(Size: sizeof(TemplateSpecializationType) +
6099 sizeof(TemplateArgument) * SpecifiedArgs.size() +
6100 (IsTypeAlias ? sizeof(QualType) : 0),
6101 Align: alignof(TemplateSpecializationType));
6102 auto *Spec = new (Mem) TemplateSpecializationType(
6103 Keyword, Template, IsTypeAlias, SpecifiedArgs, Underlying);
6104 Types.push_back(Elt: Spec);
6105 return QualType(Spec, 0);
6106}
6107
6108QualType
6109ASTContext::getParenType(QualType InnerType) const {
6110 llvm::FoldingSetNodeID ID;
6111 ParenType::Profile(ID, Inner: InnerType);
6112
6113 void *InsertPos = nullptr;
6114 ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
6115 if (T)
6116 return QualType(T, 0);
6117
6118 QualType Canon = InnerType;
6119 if (!Canon.isCanonical()) {
6120 Canon = getCanonicalType(T: InnerType);
6121 ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
6122 assert(!CheckT && "Paren canonical type broken");
6123 (void)CheckT;
6124 }
6125
6126 T = new (*this, alignof(ParenType)) ParenType(InnerType, Canon);
6127 Types.push_back(Elt: T);
6128 ParenTypes.InsertNode(N: T, InsertPos);
6129 return QualType(T, 0);
6130}
6131
6132QualType
6133ASTContext::getMacroQualifiedType(QualType UnderlyingTy,
6134 const IdentifierInfo *MacroII) const {
6135 QualType Canon = UnderlyingTy;
6136 if (!Canon.isCanonical())
6137 Canon = getCanonicalType(T: UnderlyingTy);
6138
6139 auto *newType = new (*this, alignof(MacroQualifiedType))
6140 MacroQualifiedType(UnderlyingTy, Canon, MacroII);
6141 Types.push_back(Elt: newType);
6142 return QualType(newType, 0);
6143}
6144
6145QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword,
6146 NestedNameSpecifier NNS,
6147 const IdentifierInfo *Name) const {
6148 llvm::FoldingSetNodeID ID;
6149 DependentNameType::Profile(ID, Keyword, NNS, Name);
6150
6151 void *InsertPos = nullptr;
6152 if (DependentNameType *T =
6153 DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos))
6154 return QualType(T, 0);
6155
6156 ElaboratedTypeKeyword CanonKeyword =
6157 getCanonicalElaboratedTypeKeyword(Keyword);
6158 NestedNameSpecifier CanonNNS = NNS.getCanonical();
6159
6160 QualType Canon;
6161 if (CanonKeyword != Keyword || CanonNNS != NNS) {
6162 Canon = getDependentNameType(Keyword: CanonKeyword, NNS: CanonNNS, Name);
6163 [[maybe_unused]] DependentNameType *T =
6164 DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
6165 assert(!T && "broken canonicalization");
6166 assert(Canon.isCanonical());
6167 }
6168
6169 DependentNameType *T = new (*this, alignof(DependentNameType))
6170 DependentNameType(Keyword, NNS, Name, Canon);
6171 Types.push_back(Elt: T);
6172 DependentNameTypes.InsertNode(N: T, InsertPos);
6173 return QualType(T, 0);
6174}
6175
6176TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) const {
6177 TemplateArgument Arg;
6178 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Val: Param)) {
6179 QualType ArgType = getTypeDeclType(Decl: TTP);
6180 if (TTP->isParameterPack())
6181 ArgType = getPackExpansionType(Pattern: ArgType, NumExpansions: std::nullopt);
6182
6183 Arg = TemplateArgument(ArgType);
6184 } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Val: Param)) {
6185 QualType T =
6186 NTTP->getType().getNonPackExpansionType().getNonLValueExprType(Context: *this);
6187 // For class NTTPs, ensure we include the 'const' so the type matches that
6188 // of a real template argument.
6189 // FIXME: It would be more faithful to model this as something like an
6190 // lvalue-to-rvalue conversion applied to a const-qualified lvalue.
6191 ExprValueKind VK;
6192 if (T->isRecordType()) {
6193 // C++ [temp.param]p8: An id-expression naming a non-type
6194 // template-parameter of class type T denotes a static storage duration
6195 // object of type const T.
6196 T.addConst();
6197 VK = VK_LValue;
6198 } else {
6199 VK = Expr::getValueKindForType(T: NTTP->getType());
6200 }
6201 Expr *E = new (*this)
6202 DeclRefExpr(*this, NTTP, /*RefersToEnclosingVariableOrCapture=*/false,
6203 T, VK, NTTP->getLocation());
6204
6205 if (NTTP->isParameterPack())
6206 E = new (*this) PackExpansionExpr(E, NTTP->getLocation(), std::nullopt);
6207 Arg = TemplateArgument(E, /*IsCanonical=*/false);
6208 } else {
6209 auto *TTP = cast<TemplateTemplateParmDecl>(Val: Param);
6210 TemplateName Name = getQualifiedTemplateName(
6211 /*Qualifier=*/std::nullopt, /*TemplateKeyword=*/false,
6212 Template: TemplateName(TTP));
6213 if (TTP->isParameterPack())
6214 Arg = TemplateArgument(Name, /*NumExpansions=*/std::nullopt);
6215 else
6216 Arg = TemplateArgument(Name);
6217 }
6218
6219 if (Param->isTemplateParameterPack())
6220 Arg =
6221 TemplateArgument::CreatePackCopy(Context&: const_cast<ASTContext &>(*this), Args: Arg);
6222
6223 return Arg;
6224}
6225
6226QualType ASTContext::getPackExpansionType(QualType Pattern,
6227 UnsignedOrNone NumExpansions,
6228 bool ExpectPackInType) const {
6229 assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) &&
6230 "Pack expansions must expand one or more parameter packs");
6231
6232 llvm::FoldingSetNodeID ID;
6233 PackExpansionType::Profile(ID, Pattern, NumExpansions);
6234
6235 void *InsertPos = nullptr;
6236 PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
6237 if (T)
6238 return QualType(T, 0);
6239
6240 QualType Canon;
6241 if (!Pattern.isCanonical()) {
6242 Canon = getPackExpansionType(Pattern: getCanonicalType(T: Pattern), NumExpansions,
6243 /*ExpectPackInType=*/false);
6244
6245 // Find the insert position again, in case we inserted an element into
6246 // PackExpansionTypes and invalidated our insert position.
6247 PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
6248 }
6249
6250 T = new (*this, alignof(PackExpansionType))
6251 PackExpansionType(Pattern, Canon, NumExpansions);
6252 Types.push_back(Elt: T);
6253 PackExpansionTypes.InsertNode(N: T, InsertPos);
6254 return QualType(T, 0);
6255}
6256
6257/// CmpProtocolNames - Comparison predicate for sorting protocols
6258/// alphabetically.
6259static int CmpProtocolNames(ObjCProtocolDecl *const *LHS,
6260 ObjCProtocolDecl *const *RHS) {
6261 return DeclarationName::compare(LHS: (*LHS)->getDeclName(), RHS: (*RHS)->getDeclName());
6262}
6263
6264static bool areSortedAndUniqued(ArrayRef<ObjCProtocolDecl *> Protocols) {
6265 if (Protocols.empty()) return true;
6266
6267 if (Protocols[0]->getCanonicalDecl() != Protocols[0])
6268 return false;
6269
6270 for (unsigned i = 1; i != Protocols.size(); ++i)
6271 if (CmpProtocolNames(LHS: &Protocols[i - 1], RHS: &Protocols[i]) >= 0 ||
6272 Protocols[i]->getCanonicalDecl() != Protocols[i])
6273 return false;
6274 return true;
6275}
6276
6277static void
6278SortAndUniqueProtocols(SmallVectorImpl<ObjCProtocolDecl *> &Protocols) {
6279 // Sort protocols, keyed by name.
6280 llvm::array_pod_sort(Start: Protocols.begin(), End: Protocols.end(), Compare: CmpProtocolNames);
6281
6282 // Canonicalize.
6283 for (ObjCProtocolDecl *&P : Protocols)
6284 P = P->getCanonicalDecl();
6285
6286 // Remove duplicates.
6287 auto ProtocolsEnd = llvm::unique(R&: Protocols);
6288 Protocols.erase(CS: ProtocolsEnd, CE: Protocols.end());
6289}
6290
6291QualType ASTContext::getObjCObjectType(QualType BaseType,
6292 ObjCProtocolDecl * const *Protocols,
6293 unsigned NumProtocols) const {
6294 return getObjCObjectType(Base: BaseType, typeArgs: {}, protocols: ArrayRef(Protocols, NumProtocols),
6295 /*isKindOf=*/false);
6296}
6297
6298QualType ASTContext::getObjCObjectType(
6299 QualType baseType,
6300 ArrayRef<QualType> typeArgs,
6301 ArrayRef<ObjCProtocolDecl *> protocols,
6302 bool isKindOf) const {
6303 // If the base type is an interface and there aren't any protocols or
6304 // type arguments to add, then the interface type will do just fine.
6305 if (typeArgs.empty() && protocols.empty() && !isKindOf &&
6306 isa<ObjCInterfaceType>(Val: baseType))
6307 return baseType;
6308
6309 // Look in the folding set for an existing type.
6310 llvm::FoldingSetNodeID ID;
6311 ObjCObjectTypeImpl::Profile(ID, Base: baseType, typeArgs, protocols, isKindOf);
6312 void *InsertPos = nullptr;
6313 if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos))
6314 return QualType(QT, 0);
6315
6316 // Determine the type arguments to be used for canonicalization,
6317 // which may be explicitly specified here or written on the base
6318 // type.
6319 ArrayRef<QualType> effectiveTypeArgs = typeArgs;
6320 if (effectiveTypeArgs.empty()) {
6321 if (const auto *baseObject = baseType->getAs<ObjCObjectType>())
6322 effectiveTypeArgs = baseObject->getTypeArgs();
6323 }
6324
6325 // Build the canonical type, which has the canonical base type and a
6326 // sorted-and-uniqued list of protocols and the type arguments
6327 // canonicalized.
6328 QualType canonical;
6329 bool typeArgsAreCanonical = llvm::all_of(
6330 Range&: effectiveTypeArgs, P: [&](QualType type) { return type.isCanonical(); });
6331 bool protocolsSorted = areSortedAndUniqued(Protocols: protocols);
6332 if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) {
6333 // Determine the canonical type arguments.
6334 ArrayRef<QualType> canonTypeArgs;
6335 SmallVector<QualType, 4> canonTypeArgsVec;
6336 if (!typeArgsAreCanonical) {
6337 canonTypeArgsVec.reserve(N: effectiveTypeArgs.size());
6338 for (auto typeArg : effectiveTypeArgs)
6339 canonTypeArgsVec.push_back(Elt: getCanonicalType(T: typeArg));
6340 canonTypeArgs = canonTypeArgsVec;
6341 } else {
6342 canonTypeArgs = effectiveTypeArgs;
6343 }
6344
6345 ArrayRef<ObjCProtocolDecl *> canonProtocols;
6346 SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec;
6347 if (!protocolsSorted) {
6348 canonProtocolsVec.append(in_start: protocols.begin(), in_end: protocols.end());
6349 SortAndUniqueProtocols(Protocols&: canonProtocolsVec);
6350 canonProtocols = canonProtocolsVec;
6351 } else {
6352 canonProtocols = protocols;
6353 }
6354
6355 canonical = getObjCObjectType(baseType: getCanonicalType(T: baseType), typeArgs: canonTypeArgs,
6356 protocols: canonProtocols, isKindOf);
6357
6358 // Regenerate InsertPos.
6359 ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos);
6360 }
6361
6362 unsigned size = sizeof(ObjCObjectTypeImpl);
6363 size += typeArgs.size() * sizeof(QualType);
6364 size += protocols.size() * sizeof(ObjCProtocolDecl *);
6365 void *mem = Allocate(Size: size, Align: alignof(ObjCObjectTypeImpl));
6366 auto *T =
6367 new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols,
6368 isKindOf);
6369
6370 Types.push_back(Elt: T);
6371 ObjCObjectTypes.InsertNode(N: T, InsertPos);
6372 return QualType(T, 0);
6373}
6374
6375/// Apply Objective-C protocol qualifiers to the given type.
6376/// If this is for the canonical type of a type parameter, we can apply
6377/// protocol qualifiers on the ObjCObjectPointerType.
6378QualType
6379ASTContext::applyObjCProtocolQualifiers(QualType type,
6380 ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError,
6381 bool allowOnPointerType) const {
6382 hasError = false;
6383
6384 if (const auto *objT = dyn_cast<ObjCTypeParamType>(Val: type.getTypePtr())) {
6385 return getObjCTypeParamType(Decl: objT->getDecl(), protocols);
6386 }
6387
6388 // Apply protocol qualifiers to ObjCObjectPointerType.
6389 if (allowOnPointerType) {
6390 if (const auto *objPtr =
6391 dyn_cast<ObjCObjectPointerType>(Val: type.getTypePtr())) {
6392 const ObjCObjectType *objT = objPtr->getObjectType();
6393 // Merge protocol lists and construct ObjCObjectType.
6394 SmallVector<ObjCProtocolDecl*, 8> protocolsVec;
6395 protocolsVec.append(in_start: objT->qual_begin(),
6396 in_end: objT->qual_end());
6397 protocolsVec.append(in_start: protocols.begin(), in_end: protocols.end());
6398 ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec;
6399 type = getObjCObjectType(
6400 baseType: objT->getBaseType(),
6401 typeArgs: objT->getTypeArgsAsWritten(),
6402 protocols,
6403 isKindOf: objT->isKindOfTypeAsWritten());
6404 return getObjCObjectPointerType(OIT: type);
6405 }
6406 }
6407
6408 // Apply protocol qualifiers to ObjCObjectType.
6409 if (const auto *objT = dyn_cast<ObjCObjectType>(Val: type.getTypePtr())){
6410 // FIXME: Check for protocols to which the class type is already
6411 // known to conform.
6412
6413 return getObjCObjectType(baseType: objT->getBaseType(),
6414 typeArgs: objT->getTypeArgsAsWritten(),
6415 protocols,
6416 isKindOf: objT->isKindOfTypeAsWritten());
6417 }
6418
6419 // If the canonical type is ObjCObjectType, ...
6420 if (type->isObjCObjectType()) {
6421 // Silently overwrite any existing protocol qualifiers.
6422 // TODO: determine whether that's the right thing to do.
6423
6424 // FIXME: Check for protocols to which the class type is already
6425 // known to conform.
6426 return getObjCObjectType(baseType: type, typeArgs: {}, protocols, isKindOf: false);
6427 }
6428
6429 // id<protocol-list>
6430 if (type->isObjCIdType()) {
6431 const auto *objPtr = type->castAs<ObjCObjectPointerType>();
6432 type = getObjCObjectType(baseType: ObjCBuiltinIdTy, typeArgs: {}, protocols,
6433 isKindOf: objPtr->isKindOfType());
6434 return getObjCObjectPointerType(OIT: type);
6435 }
6436
6437 // Class<protocol-list>
6438 if (type->isObjCClassType()) {
6439 const auto *objPtr = type->castAs<ObjCObjectPointerType>();
6440 type = getObjCObjectType(baseType: ObjCBuiltinClassTy, typeArgs: {}, protocols,
6441 isKindOf: objPtr->isKindOfType());
6442 return getObjCObjectPointerType(OIT: type);
6443 }
6444
6445 hasError = true;
6446 return type;
6447}
6448
6449QualType
6450ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl,
6451 ArrayRef<ObjCProtocolDecl *> protocols) const {
6452 // Look in the folding set for an existing type.
6453 llvm::FoldingSetNodeID ID;
6454 ObjCTypeParamType::Profile(ID, OTPDecl: Decl, CanonicalType: Decl->getUnderlyingType(), protocols);
6455 void *InsertPos = nullptr;
6456 if (ObjCTypeParamType *TypeParam =
6457 ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos))
6458 return QualType(TypeParam, 0);
6459
6460 // We canonicalize to the underlying type.
6461 QualType Canonical = getCanonicalType(T: Decl->getUnderlyingType());
6462 if (!protocols.empty()) {
6463 // Apply the protocol qualifers.
6464 bool hasError;
6465 Canonical = getCanonicalType(T: applyObjCProtocolQualifiers(
6466 type: Canonical, protocols, hasError, allowOnPointerType: true /*allowOnPointerType*/));
6467 assert(!hasError && "Error when apply protocol qualifier to bound type");
6468 }
6469
6470 unsigned size = sizeof(ObjCTypeParamType);
6471 size += protocols.size() * sizeof(ObjCProtocolDecl *);
6472 void *mem = Allocate(Size: size, Align: alignof(ObjCTypeParamType));
6473 auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols);
6474
6475 Types.push_back(Elt: newType);
6476 ObjCTypeParamTypes.InsertNode(N: newType, InsertPos);
6477 return QualType(newType, 0);
6478}
6479
6480void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig,
6481 ObjCTypeParamDecl *New) const {
6482 New->setTypeSourceInfo(getTrivialTypeSourceInfo(T: Orig->getUnderlyingType()));
6483 // Update TypeForDecl after updating TypeSourceInfo.
6484 auto *NewTypeParamTy = cast<ObjCTypeParamType>(Val: New->TypeForDecl);
6485 SmallVector<ObjCProtocolDecl *, 8> protocols;
6486 protocols.append(in_start: NewTypeParamTy->qual_begin(), in_end: NewTypeParamTy->qual_end());
6487 QualType UpdatedTy = getObjCTypeParamType(Decl: New, protocols);
6488 New->TypeForDecl = UpdatedTy.getTypePtr();
6489}
6490
6491/// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's
6492/// protocol list adopt all protocols in QT's qualified-id protocol
6493/// list.
6494bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT,
6495 ObjCInterfaceDecl *IC) {
6496 if (!QT->isObjCQualifiedIdType())
6497 return false;
6498
6499 if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) {
6500 // If both the right and left sides have qualifiers.
6501 for (auto *Proto : OPT->quals()) {
6502 if (!IC->ClassImplementsProtocol(lProto: Proto, lookupCategory: false))
6503 return false;
6504 }
6505 return true;
6506 }
6507 return false;
6508}
6509
6510/// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in
6511/// QT's qualified-id protocol list adopt all protocols in IDecl's list
6512/// of protocols.
6513bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT,
6514 ObjCInterfaceDecl *IDecl) {
6515 if (!QT->isObjCQualifiedIdType())
6516 return false;
6517 const auto *OPT = QT->getAs<ObjCObjectPointerType>();
6518 if (!OPT)
6519 return false;
6520 if (!IDecl->hasDefinition())
6521 return false;
6522 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols;
6523 CollectInheritedProtocols(CDecl: IDecl, Protocols&: InheritedProtocols);
6524 if (InheritedProtocols.empty())
6525 return false;
6526 // Check that if every protocol in list of id<plist> conforms to a protocol
6527 // of IDecl's, then bridge casting is ok.
6528 bool Conforms = false;
6529 for (auto *Proto : OPT->quals()) {
6530 Conforms = false;
6531 for (auto *PI : InheritedProtocols) {
6532 if (ProtocolCompatibleWithProtocol(lProto: Proto, rProto: PI)) {
6533 Conforms = true;
6534 break;
6535 }
6536 }
6537 if (!Conforms)
6538 break;
6539 }
6540 if (Conforms)
6541 return true;
6542
6543 for (auto *PI : InheritedProtocols) {
6544 // If both the right and left sides have qualifiers.
6545 bool Adopts = false;
6546 for (auto *Proto : OPT->quals()) {
6547 // return 'true' if 'PI' is in the inheritance hierarchy of Proto
6548 if ((Adopts = ProtocolCompatibleWithProtocol(lProto: PI, rProto: Proto)))
6549 break;
6550 }
6551 if (!Adopts)
6552 return false;
6553 }
6554 return true;
6555}
6556
6557/// getObjCObjectPointerType - Return a ObjCObjectPointerType type for
6558/// the given object type.
6559QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const {
6560 llvm::FoldingSetNodeID ID;
6561 ObjCObjectPointerType::Profile(ID, T: ObjectT);
6562
6563 void *InsertPos = nullptr;
6564 if (ObjCObjectPointerType *QT =
6565 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
6566 return QualType(QT, 0);
6567
6568 // Find the canonical object type.
6569 QualType Canonical;
6570 if (!ObjectT.isCanonical()) {
6571 Canonical = getObjCObjectPointerType(ObjectT: getCanonicalType(T: ObjectT));
6572
6573 // Regenerate InsertPos.
6574 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
6575 }
6576
6577 // No match.
6578 void *Mem =
6579 Allocate(Size: sizeof(ObjCObjectPointerType), Align: alignof(ObjCObjectPointerType));
6580 auto *QType =
6581 new (Mem) ObjCObjectPointerType(Canonical, ObjectT);
6582
6583 Types.push_back(Elt: QType);
6584 ObjCObjectPointerTypes.InsertNode(N: QType, InsertPos);
6585 return QualType(QType, 0);
6586}
6587
6588/// getObjCInterfaceType - Return the unique reference to the type for the
6589/// specified ObjC interface decl. The list of protocols is optional.
6590QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl,
6591 ObjCInterfaceDecl *PrevDecl) const {
6592 if (Decl->TypeForDecl)
6593 return QualType(Decl->TypeForDecl, 0);
6594
6595 if (PrevDecl) {
6596 assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl");
6597 Decl->TypeForDecl = PrevDecl->TypeForDecl;
6598 return QualType(PrevDecl->TypeForDecl, 0);
6599 }
6600
6601 // Prefer the definition, if there is one.
6602 if (const ObjCInterfaceDecl *Def = Decl->getDefinition())
6603 Decl = Def;
6604
6605 void *Mem = Allocate(Size: sizeof(ObjCInterfaceType), Align: alignof(ObjCInterfaceType));
6606 auto *T = new (Mem) ObjCInterfaceType(Decl);
6607 Decl->TypeForDecl = T;
6608 Types.push_back(Elt: T);
6609 return QualType(T, 0);
6610}
6611
6612/// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique
6613/// TypeOfExprType AST's (since expression's are never shared). For example,
6614/// multiple declarations that refer to "typeof(x)" all contain different
6615/// DeclRefExpr's. This doesn't effect the type checker, since it operates
6616/// on canonical type's (which are always unique).
6617QualType ASTContext::getTypeOfExprType(Expr *tofExpr, TypeOfKind Kind) const {
6618 TypeOfExprType *toe;
6619 if (tofExpr->isTypeDependent()) {
6620 llvm::FoldingSetNodeID ID;
6621 DependentTypeOfExprType::Profile(ID, Context: *this, E: tofExpr,
6622 IsUnqual: Kind == TypeOfKind::Unqualified);
6623
6624 void *InsertPos = nullptr;
6625 DependentTypeOfExprType *Canon =
6626 DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos);
6627 if (Canon) {
6628 // We already have a "canonical" version of an identical, dependent
6629 // typeof(expr) type. Use that as our canonical type.
6630 toe = new (*this, alignof(TypeOfExprType)) TypeOfExprType(
6631 *this, tofExpr, Kind, QualType((TypeOfExprType *)Canon, 0));
6632 } else {
6633 // Build a new, canonical typeof(expr) type.
6634 Canon = new (*this, alignof(DependentTypeOfExprType))
6635 DependentTypeOfExprType(*this, tofExpr, Kind);
6636 DependentTypeOfExprTypes.InsertNode(N: Canon, InsertPos);
6637 toe = Canon;
6638 }
6639 } else {
6640 QualType Canonical = getCanonicalType(T: tofExpr->getType());
6641 toe = new (*this, alignof(TypeOfExprType))
6642 TypeOfExprType(*this, tofExpr, Kind, Canonical);
6643 }
6644 Types.push_back(Elt: toe);
6645 return QualType(toe, 0);
6646}
6647
6648/// getTypeOfType - Unlike many "get<Type>" functions, we don't unique
6649/// TypeOfType nodes. The only motivation to unique these nodes would be
6650/// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be
6651/// an issue. This doesn't affect the type checker, since it operates
6652/// on canonical types (which are always unique).
6653QualType ASTContext::getTypeOfType(QualType tofType, TypeOfKind Kind) const {
6654 QualType Canonical = getCanonicalType(T: tofType);
6655 auto *tot = new (*this, alignof(TypeOfType))
6656 TypeOfType(*this, tofType, Canonical, Kind);
6657 Types.push_back(Elt: tot);
6658 return QualType(tot, 0);
6659}
6660
6661/// getReferenceQualifiedType - Given an expr, will return the type for
6662/// that expression, as in [dcl.type.simple]p4 but without taking id-expressions
6663/// and class member access into account.
6664QualType ASTContext::getReferenceQualifiedType(const Expr *E) const {
6665 // C++11 [dcl.type.simple]p4:
6666 // [...]
6667 QualType T = E->getType();
6668 switch (E->getValueKind()) {
6669 // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the
6670 // type of e;
6671 case VK_XValue:
6672 return getRValueReferenceType(T);
6673 // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the
6674 // type of e;
6675 case VK_LValue:
6676 return getLValueReferenceType(T);
6677 // - otherwise, decltype(e) is the type of e.
6678 case VK_PRValue:
6679 return T;
6680 }
6681 llvm_unreachable("Unknown value kind");
6682}
6683
6684/// Unlike many "get<Type>" functions, we don't unique DecltypeType
6685/// nodes. This would never be helpful, since each such type has its own
6686/// expression, and would not give a significant memory saving, since there
6687/// is an Expr tree under each such type.
6688QualType ASTContext::getDecltypeType(Expr *E, QualType UnderlyingType) const {
6689 // C++11 [temp.type]p2:
6690 // If an expression e involves a template parameter, decltype(e) denotes a
6691 // unique dependent type. Two such decltype-specifiers refer to the same
6692 // type only if their expressions are equivalent (14.5.6.1).
6693 QualType CanonType;
6694 if (!E->isInstantiationDependent()) {
6695 CanonType = getCanonicalType(T: UnderlyingType);
6696 } else if (!UnderlyingType.isNull()) {
6697 CanonType = getDecltypeType(E, UnderlyingType: QualType());
6698 } else {
6699 llvm::FoldingSetNodeID ID;
6700 DependentDecltypeType::Profile(ID, Context: *this, E);
6701
6702 void *InsertPos = nullptr;
6703 if (DependentDecltypeType *Canon =
6704 DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos))
6705 return QualType(Canon, 0);
6706
6707 // Build a new, canonical decltype(expr) type.
6708 auto *DT =
6709 new (*this, alignof(DependentDecltypeType)) DependentDecltypeType(E);
6710 DependentDecltypeTypes.InsertNode(N: DT, InsertPos);
6711 Types.push_back(Elt: DT);
6712 return QualType(DT, 0);
6713 }
6714 auto *DT = new (*this, alignof(DecltypeType))
6715 DecltypeType(E, UnderlyingType, CanonType);
6716 Types.push_back(Elt: DT);
6717 return QualType(DT, 0);
6718}
6719
6720QualType ASTContext::getPackIndexingType(QualType Pattern, Expr *IndexExpr,
6721 bool FullySubstituted,
6722 ArrayRef<QualType> Expansions,
6723 UnsignedOrNone Index) const {
6724 QualType Canonical;
6725 if (FullySubstituted && Index) {
6726 Canonical = getCanonicalType(T: Expansions[*Index]);
6727 } else {
6728 llvm::FoldingSetNodeID ID;
6729 PackIndexingType::Profile(ID, Context: *this, Pattern: Pattern.getCanonicalType(), E: IndexExpr,
6730 FullySubstituted, Expansions);
6731 void *InsertPos = nullptr;
6732 PackIndexingType *Canon =
6733 DependentPackIndexingTypes.FindNodeOrInsertPos(ID, InsertPos);
6734 if (!Canon) {
6735 void *Mem = Allocate(
6736 Size: PackIndexingType::totalSizeToAlloc<QualType>(Counts: Expansions.size()),
6737 Align: TypeAlignment);
6738 Canon =
6739 new (Mem) PackIndexingType(QualType(), Pattern.getCanonicalType(),
6740 IndexExpr, FullySubstituted, Expansions);
6741 DependentPackIndexingTypes.InsertNode(N: Canon, InsertPos);
6742 }
6743 Canonical = QualType(Canon, 0);
6744 }
6745
6746 void *Mem =
6747 Allocate(Size: PackIndexingType::totalSizeToAlloc<QualType>(Counts: Expansions.size()),
6748 Align: TypeAlignment);
6749 auto *T = new (Mem) PackIndexingType(Canonical, Pattern, IndexExpr,
6750 FullySubstituted, Expansions);
6751 Types.push_back(Elt: T);
6752 return QualType(T, 0);
6753}
6754
6755/// getUnaryTransformationType - We don't unique these, since the memory
6756/// savings are minimal and these are rare.
6757QualType
6758ASTContext::getUnaryTransformType(QualType BaseType, QualType UnderlyingType,
6759 UnaryTransformType::UTTKind Kind) const {
6760
6761 llvm::FoldingSetNodeID ID;
6762 UnaryTransformType::Profile(ID, BaseType, UnderlyingType, UKind: Kind);
6763
6764 void *InsertPos = nullptr;
6765 if (UnaryTransformType *UT =
6766 UnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos))
6767 return QualType(UT, 0);
6768
6769 QualType CanonType;
6770 if (!BaseType->isDependentType()) {
6771 CanonType = UnderlyingType.getCanonicalType();
6772 } else {
6773 assert(UnderlyingType.isNull() || BaseType == UnderlyingType);
6774 UnderlyingType = QualType();
6775 if (QualType CanonBase = BaseType.getCanonicalType();
6776 BaseType != CanonBase) {
6777 CanonType = getUnaryTransformType(BaseType: CanonBase, UnderlyingType: QualType(), Kind);
6778 assert(CanonType.isCanonical());
6779
6780 // Find the insertion position again.
6781 [[maybe_unused]] UnaryTransformType *UT =
6782 UnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos);
6783 assert(!UT && "broken canonicalization");
6784 }
6785 }
6786
6787 auto *UT = new (*this, alignof(UnaryTransformType))
6788 UnaryTransformType(BaseType, UnderlyingType, Kind, CanonType);
6789 UnaryTransformTypes.InsertNode(N: UT, InsertPos);
6790 Types.push_back(Elt: UT);
6791 return QualType(UT, 0);
6792}
6793
6794/// getAutoType - Return the uniqued reference to the 'auto' type which has been
6795/// deduced to the given type, or to the canonical undeduced 'auto' type, or the
6796/// canonical deduced-but-dependent 'auto' type.
6797QualType
6798ASTContext::getAutoType(DeducedKind DK, QualType DeducedAsType,
6799 AutoTypeKeyword Keyword,
6800 TemplateDecl *TypeConstraintConcept,
6801 ArrayRef<TemplateArgument> TypeConstraintArgs) const {
6802 if (DK == DeducedKind::Undeduced && Keyword == AutoTypeKeyword::Auto &&
6803 !TypeConstraintConcept) {
6804 assert(DeducedAsType.isNull() && "");
6805 assert(TypeConstraintArgs.empty() && "");
6806 return getAutoDeductType();
6807 }
6808
6809 // Look in the folding set for an existing type.
6810 llvm::FoldingSetNodeID ID;
6811 AutoType::Profile(ID, Context: *this, DK, Deduced: DeducedAsType, Keyword,
6812 CD: TypeConstraintConcept, Arguments: TypeConstraintArgs);
6813 if (auto const AT_iter = AutoTypes.find(Val: ID); AT_iter != AutoTypes.end())
6814 return QualType(AT_iter->getSecond(), 0);
6815
6816 if (DK == DeducedKind::Deduced) {
6817 assert(!DeducedAsType.isNull() && "deduced type must be provided");
6818 } else {
6819 assert(DeducedAsType.isNull() && "deduced type must not be provided");
6820 if (TypeConstraintConcept) {
6821 bool AnyNonCanonArgs = false;
6822 auto *CanonicalConcept =
6823 cast<TemplateDecl>(Val: TypeConstraintConcept->getCanonicalDecl());
6824 auto CanonicalConceptArgs = ::getCanonicalTemplateArguments(
6825 C: *this, Args: TypeConstraintArgs, AnyNonCanonArgs);
6826 if (TypeConstraintConcept != CanonicalConcept || AnyNonCanonArgs)
6827 DeducedAsType = getAutoType(DK, DeducedAsType: QualType(), Keyword, TypeConstraintConcept: CanonicalConcept,
6828 TypeConstraintArgs: CanonicalConceptArgs);
6829 }
6830 }
6831
6832 void *Mem = Allocate(Size: sizeof(AutoType) +
6833 sizeof(TemplateArgument) * TypeConstraintArgs.size(),
6834 Align: alignof(AutoType));
6835 auto *AT = new (Mem) AutoType(DK, DeducedAsType, Keyword,
6836 TypeConstraintConcept, TypeConstraintArgs);
6837#ifndef NDEBUG
6838 llvm::FoldingSetNodeID InsertedID;
6839 AT->Profile(InsertedID, *this);
6840 assert(InsertedID == ID && "ID does not match");
6841#endif
6842 Types.push_back(Elt: AT);
6843 AutoTypes.try_emplace(Key: ID, Args&: AT);
6844 return QualType(AT, 0);
6845}
6846
6847QualType ASTContext::getUnconstrainedType(QualType T) const {
6848 QualType CanonT = T.getNonPackExpansionType().getCanonicalType();
6849
6850 // Remove a type-constraint from a top-level auto or decltype(auto).
6851 if (auto *AT = CanonT->getAs<AutoType>()) {
6852 if (!AT->isConstrained())
6853 return T;
6854 return getQualifiedType(
6855 T: getAutoType(DK: AT->getDeducedKind(), DeducedAsType: QualType(), Keyword: AT->getKeyword()),
6856 Qs: T.getQualifiers());
6857 }
6858
6859 // FIXME: We only support constrained auto at the top level in the type of a
6860 // non-type template parameter at the moment. Once we lift that restriction,
6861 // we'll need to recursively build types containing auto here.
6862 assert(!CanonT->getContainedAutoType() ||
6863 !CanonT->getContainedAutoType()->isConstrained());
6864 return T;
6865}
6866
6867/// Return the uniqued reference to the deduced template specialization type
6868/// which has been deduced to the given type, or to the canonical undeduced
6869/// such type, or the canonical deduced-but-dependent such type.
6870QualType ASTContext::getDeducedTemplateSpecializationType(
6871 DeducedKind DK, QualType DeducedAsType, ElaboratedTypeKeyword Keyword,
6872 TemplateName Template) const {
6873 // DeducedAsPack only ever occurs for lambda init-capture pack, which always
6874 // use AutoType.
6875 assert(DK != DeducedKind::DeducedAsPack &&
6876 "unexpected DeducedAsPack for DeducedTemplateSpecializationType");
6877
6878 // Look in the folding set for an existing type.
6879 void *InsertPos = nullptr;
6880 llvm::FoldingSetNodeID ID;
6881 DeducedTemplateSpecializationType::Profile(ID, DK, Deduced: DeducedAsType, Keyword,
6882 Template);
6883 if (DeducedTemplateSpecializationType *DTST =
6884 DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos))
6885 return QualType(DTST, 0);
6886
6887 if (DK == DeducedKind::Deduced) {
6888 assert(!DeducedAsType.isNull() && "deduced type must be provided");
6889 } else {
6890 assert(DeducedAsType.isNull() && "deduced type must not be provided");
6891 TemplateName CanonTemplateName = getCanonicalTemplateName(Name: Template);
6892 // FIXME: Can this be formed from a DependentTemplateName, such that the
6893 // keyword should be part of the canonical type?
6894 if (Keyword != ElaboratedTypeKeyword::None ||
6895 Template != CanonTemplateName) {
6896 DeducedAsType = getDeducedTemplateSpecializationType(
6897 DK, DeducedAsType: QualType(), Keyword: ElaboratedTypeKeyword::None, Template: CanonTemplateName);
6898 // Find the insertion position again.
6899 [[maybe_unused]] DeducedTemplateSpecializationType *DTST =
6900 DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
6901 assert(!DTST && "broken canonicalization");
6902 }
6903 }
6904
6905 auto *DTST = new (*this, alignof(DeducedTemplateSpecializationType))
6906 DeducedTemplateSpecializationType(DK, DeducedAsType, Keyword, Template);
6907
6908#ifndef NDEBUG
6909 llvm::FoldingSetNodeID TempID;
6910 DTST->Profile(TempID);
6911 assert(ID == TempID && "ID does not match");
6912#endif
6913 Types.push_back(Elt: DTST);
6914 DeducedTemplateSpecializationTypes.InsertNode(N: DTST, InsertPos);
6915 return QualType(DTST, 0);
6916}
6917
6918/// getAtomicType - Return the uniqued reference to the atomic type for
6919/// the given value type.
6920QualType ASTContext::getAtomicType(QualType T) const {
6921 // Unique pointers, to guarantee there is only one pointer of a particular
6922 // structure.
6923 llvm::FoldingSetNodeID ID;
6924 AtomicType::Profile(ID, T);
6925
6926 void *InsertPos = nullptr;
6927 if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos))
6928 return QualType(AT, 0);
6929
6930 // If the atomic value type isn't canonical, this won't be a canonical type
6931 // either, so fill in the canonical type field.
6932 QualType Canonical;
6933 if (!T.isCanonical()) {
6934 Canonical = getAtomicType(T: getCanonicalType(T));
6935
6936 // Get the new insert position for the node we care about.
6937 AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos);
6938 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
6939 }
6940 auto *New = new (*this, alignof(AtomicType)) AtomicType(T, Canonical);
6941 Types.push_back(Elt: New);
6942 AtomicTypes.InsertNode(N: New, InsertPos);
6943 return QualType(New, 0);
6944}
6945
6946/// getAutoDeductType - Get type pattern for deducing against 'auto'.
6947QualType ASTContext::getAutoDeductType() const {
6948 if (AutoDeductTy.isNull())
6949 AutoDeductTy = QualType(new (*this, alignof(AutoType))
6950 AutoType(DeducedKind::Undeduced, QualType(),
6951 AutoTypeKeyword::Auto,
6952 /*TypeConstraintConcept=*/nullptr,
6953 /*TypeConstraintArgs=*/{}),
6954 0);
6955 return AutoDeductTy;
6956}
6957
6958/// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'.
6959QualType ASTContext::getAutoRRefDeductType() const {
6960 if (AutoRRefDeductTy.isNull())
6961 AutoRRefDeductTy = getRValueReferenceType(T: getAutoDeductType());
6962 assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern");
6963 return AutoRRefDeductTy;
6964}
6965
6966/// getSizeType - Return the unique type for "size_t" (C99 7.17), the result
6967/// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and
6968/// needs to agree with the definition in <stddef.h>.
6969QualType ASTContext::getSizeType() const {
6970 return getPredefinedSugarType(KD: PredefinedSugarType::Kind::SizeT);
6971}
6972
6973CanQualType ASTContext::getCanonicalSizeType() const {
6974 return getFromTargetType(Type: Target->getSizeType());
6975}
6976
6977/// Return the unique signed counterpart of the integer type
6978/// corresponding to size_t.
6979QualType ASTContext::getSignedSizeType() const {
6980 return getPredefinedSugarType(KD: PredefinedSugarType::Kind::SignedSizeT);
6981}
6982
6983/// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17)
6984/// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
6985QualType ASTContext::getPointerDiffType() const {
6986 return getPredefinedSugarType(KD: PredefinedSugarType::Kind::PtrdiffT);
6987}
6988
6989/// Return the unique unsigned counterpart of "ptrdiff_t"
6990/// integer type. The standard (C11 7.21.6.1p7) refers to this type
6991/// in the definition of %tu format specifier.
6992QualType ASTContext::getUnsignedPointerDiffType() const {
6993 return getFromTargetType(Type: Target->getUnsignedPtrDiffType(AddrSpace: LangAS::Default));
6994}
6995
6996/// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5).
6997CanQualType ASTContext::getIntMaxType() const {
6998 return getFromTargetType(Type: Target->getIntMaxType());
6999}
7000
7001/// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5).
7002CanQualType ASTContext::getUIntMaxType() const {
7003 return getFromTargetType(Type: Target->getUIntMaxType());
7004}
7005
7006/// getSignedWCharType - Return the type of "signed wchar_t".
7007/// Used when in C++, as a GCC extension.
7008QualType ASTContext::getSignedWCharType() const {
7009 // FIXME: derive from "Target" ?
7010 return WCharTy;
7011}
7012
7013/// getUnsignedWCharType - Return the type of "unsigned wchar_t".
7014/// Used when in C++, as a GCC extension.
7015QualType ASTContext::getUnsignedWCharType() const {
7016 // FIXME: derive from "Target" ?
7017 return UnsignedIntTy;
7018}
7019
7020QualType ASTContext::getIntPtrType() const {
7021 return getFromTargetType(Type: Target->getIntPtrType());
7022}
7023
7024QualType ASTContext::getUIntPtrType() const {
7025 return getCorrespondingUnsignedType(T: getIntPtrType());
7026}
7027
7028/// Return the unique type for "pid_t" defined in
7029/// <sys/types.h>. We need this to compute the correct type for vfork().
7030QualType ASTContext::getProcessIDType() const {
7031 return getFromTargetType(Type: Target->getProcessIDType());
7032}
7033
7034//===----------------------------------------------------------------------===//
7035// Type Operators
7036//===----------------------------------------------------------------------===//
7037
7038CanQualType ASTContext::getCanonicalParamType(QualType T) const {
7039 // Push qualifiers into arrays, and then discard any remaining
7040 // qualifiers.
7041 T = getCanonicalType(T);
7042 T = getVariableArrayDecayedType(type: T);
7043 const Type *Ty = T.getTypePtr();
7044 QualType Result;
7045 if (getLangOpts().HLSL && isa<ConstantArrayType>(Val: Ty)) {
7046 Result = getArrayParameterType(Ty: QualType(Ty, 0));
7047 } else if (isa<ArrayType>(Val: Ty)) {
7048 Result = getArrayDecayedType(T: QualType(Ty,0));
7049 } else if (isa<FunctionType>(Val: Ty)) {
7050 Result = getPointerType(T: QualType(Ty, 0));
7051 } else {
7052 Result = QualType(Ty, 0);
7053 }
7054
7055 return CanQualType::CreateUnsafe(Other: Result);
7056}
7057
7058QualType ASTContext::getUnqualifiedArrayType(QualType type,
7059 Qualifiers &quals) const {
7060 SplitQualType splitType = type.getSplitUnqualifiedType();
7061
7062 // FIXME: getSplitUnqualifiedType() actually walks all the way to
7063 // the unqualified desugared type and then drops it on the floor.
7064 // We then have to strip that sugar back off with
7065 // getUnqualifiedDesugaredType(), which is silly.
7066 const auto *AT =
7067 dyn_cast<ArrayType>(Val: splitType.Ty->getUnqualifiedDesugaredType());
7068
7069 // If we don't have an array, just use the results in splitType.
7070 if (!AT) {
7071 quals = splitType.Quals;
7072 return QualType(splitType.Ty, 0);
7073 }
7074
7075 // Otherwise, recurse on the array's element type.
7076 QualType elementType = AT->getElementType();
7077 QualType unqualElementType = getUnqualifiedArrayType(type: elementType, quals);
7078
7079 // If that didn't change the element type, AT has no qualifiers, so we
7080 // can just use the results in splitType.
7081 if (elementType == unqualElementType) {
7082 assert(quals.empty()); // from the recursive call
7083 quals = splitType.Quals;
7084 return QualType(splitType.Ty, 0);
7085 }
7086
7087 // Otherwise, add in the qualifiers from the outermost type, then
7088 // build the type back up.
7089 quals.addConsistentQualifiers(qs: splitType.Quals);
7090
7091 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: AT)) {
7092 return getConstantArrayType(EltTy: unqualElementType, ArySizeIn: CAT->getSize(),
7093 SizeExpr: CAT->getSizeExpr(), ASM: CAT->getSizeModifier(), IndexTypeQuals: 0);
7094 }
7095
7096 if (const auto *IAT = dyn_cast<IncompleteArrayType>(Val: AT)) {
7097 return getIncompleteArrayType(elementType: unqualElementType, ASM: IAT->getSizeModifier(), elementTypeQuals: 0);
7098 }
7099
7100 if (const auto *VAT = dyn_cast<VariableArrayType>(Val: AT)) {
7101 return getVariableArrayType(EltTy: unqualElementType, NumElts: VAT->getSizeExpr(),
7102 ASM: VAT->getSizeModifier(),
7103 IndexTypeQuals: VAT->getIndexTypeCVRQualifiers());
7104 }
7105
7106 const auto *DSAT = cast<DependentSizedArrayType>(Val: AT);
7107 return getDependentSizedArrayType(elementType: unqualElementType, numElements: DSAT->getSizeExpr(),
7108 ASM: DSAT->getSizeModifier(), elementTypeQuals: 0);
7109}
7110
7111/// Attempt to unwrap two types that may both be array types with the same bound
7112/// (or both be array types of unknown bound) for the purpose of comparing the
7113/// cv-decomposition of two types per C++ [conv.qual].
7114///
7115/// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in
7116/// C++20 [conv.qual], if permitted by the current language mode.
7117void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2,
7118 bool AllowPiMismatch) const {
7119 while (true) {
7120 auto *AT1 = getAsArrayType(T: T1);
7121 if (!AT1)
7122 return;
7123
7124 auto *AT2 = getAsArrayType(T: T2);
7125 if (!AT2)
7126 return;
7127
7128 // If we don't have two array types with the same constant bound nor two
7129 // incomplete array types, we've unwrapped everything we can.
7130 // C++20 also permits one type to be a constant array type and the other
7131 // to be an incomplete array type.
7132 // FIXME: Consider also unwrapping array of unknown bound and VLA.
7133 if (auto *CAT1 = dyn_cast<ConstantArrayType>(Val: AT1)) {
7134 auto *CAT2 = dyn_cast<ConstantArrayType>(Val: AT2);
7135 if (!((CAT2 && CAT1->getSize() == CAT2->getSize()) ||
7136 (AllowPiMismatch && getLangOpts().CPlusPlus20 &&
7137 isa<IncompleteArrayType>(Val: AT2))))
7138 return;
7139 } else if (isa<IncompleteArrayType>(Val: AT1)) {
7140 if (!(isa<IncompleteArrayType>(Val: AT2) ||
7141 (AllowPiMismatch && getLangOpts().CPlusPlus20 &&
7142 isa<ConstantArrayType>(Val: AT2))))
7143 return;
7144 } else {
7145 return;
7146 }
7147
7148 T1 = AT1->getElementType();
7149 T2 = AT2->getElementType();
7150 }
7151}
7152
7153/// Attempt to unwrap two types that may be similar (C++ [conv.qual]).
7154///
7155/// If T1 and T2 are both pointer types of the same kind, or both array types
7156/// with the same bound, unwraps layers from T1 and T2 until a pointer type is
7157/// unwrapped. Top-level qualifiers on T1 and T2 are ignored.
7158///
7159/// This function will typically be called in a loop that successively
7160/// "unwraps" pointer and pointer-to-member types to compare them at each
7161/// level.
7162///
7163/// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in
7164/// C++20 [conv.qual], if permitted by the current language mode.
7165///
7166/// \return \c true if a pointer type was unwrapped, \c false if we reached a
7167/// pair of types that can't be unwrapped further.
7168bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2,
7169 bool AllowPiMismatch) const {
7170 UnwrapSimilarArrayTypes(T1, T2, AllowPiMismatch);
7171
7172 const auto *T1PtrType = T1->getAs<PointerType>();
7173 const auto *T2PtrType = T2->getAs<PointerType>();
7174 if (T1PtrType && T2PtrType) {
7175 T1 = T1PtrType->getPointeeType();
7176 T2 = T2PtrType->getPointeeType();
7177 return true;
7178 }
7179
7180 if (const auto *T1MPType = T1->getAs<MemberPointerType>(),
7181 *T2MPType = T2->getAs<MemberPointerType>();
7182 T1MPType && T2MPType) {
7183 if (auto *RD1 = T1MPType->getMostRecentCXXRecordDecl(),
7184 *RD2 = T2MPType->getMostRecentCXXRecordDecl();
7185 RD1 != RD2 && RD1->getCanonicalDecl() != RD2->getCanonicalDecl())
7186 return false;
7187 if (T1MPType->getQualifier().getCanonical() !=
7188 T2MPType->getQualifier().getCanonical())
7189 return false;
7190 T1 = T1MPType->getPointeeType();
7191 T2 = T2MPType->getPointeeType();
7192 return true;
7193 }
7194
7195 if (getLangOpts().ObjC) {
7196 const auto *T1OPType = T1->getAs<ObjCObjectPointerType>();
7197 const auto *T2OPType = T2->getAs<ObjCObjectPointerType>();
7198 if (T1OPType && T2OPType) {
7199 T1 = T1OPType->getPointeeType();
7200 T2 = T2OPType->getPointeeType();
7201 return true;
7202 }
7203 }
7204
7205 // FIXME: Block pointers, too?
7206
7207 return false;
7208}
7209
7210bool ASTContext::hasSimilarType(QualType T1, QualType T2) const {
7211 while (true) {
7212 Qualifiers Quals;
7213 T1 = getUnqualifiedArrayType(type: T1, quals&: Quals);
7214 T2 = getUnqualifiedArrayType(type: T2, quals&: Quals);
7215 if (hasSameType(T1, T2))
7216 return true;
7217 if (!UnwrapSimilarTypes(T1, T2))
7218 return false;
7219 }
7220}
7221
7222bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) {
7223 while (true) {
7224 Qualifiers Quals1, Quals2;
7225 T1 = getUnqualifiedArrayType(type: T1, quals&: Quals1);
7226 T2 = getUnqualifiedArrayType(type: T2, quals&: Quals2);
7227
7228 Quals1.removeCVRQualifiers();
7229 Quals2.removeCVRQualifiers();
7230 if (Quals1 != Quals2)
7231 return false;
7232
7233 if (hasSameType(T1, T2))
7234 return true;
7235
7236 if (!UnwrapSimilarTypes(T1, T2, /*AllowPiMismatch*/ false))
7237 return false;
7238 }
7239}
7240
7241DeclarationNameInfo
7242ASTContext::getNameForTemplate(TemplateName Name,
7243 SourceLocation NameLoc) const {
7244 switch (Name.getKind()) {
7245 case TemplateName::QualifiedTemplate:
7246 case TemplateName::Template:
7247 // DNInfo work in progress: CHECKME: what about DNLoc?
7248 return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(),
7249 NameLoc);
7250
7251 case TemplateName::OverloadedTemplate: {
7252 OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate();
7253 // DNInfo work in progress: CHECKME: what about DNLoc?
7254 return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc);
7255 }
7256
7257 case TemplateName::AssumedTemplate: {
7258 AssumedTemplateStorage *Storage = Name.getAsAssumedTemplateName();
7259 return DeclarationNameInfo(Storage->getDeclName(), NameLoc);
7260 }
7261
7262 case TemplateName::DependentTemplate: {
7263 DependentTemplateName *DTN = Name.getAsDependentTemplateName();
7264 IdentifierOrOverloadedOperator TN = DTN->getName();
7265 DeclarationName DName;
7266 if (const IdentifierInfo *II = TN.getIdentifier()) {
7267 DName = DeclarationNames.getIdentifier(ID: II);
7268 return DeclarationNameInfo(DName, NameLoc);
7269 } else {
7270 DName = DeclarationNames.getCXXOperatorName(Op: TN.getOperator());
7271 // DNInfo work in progress: FIXME: source locations?
7272 DeclarationNameLoc DNLoc =
7273 DeclarationNameLoc::makeCXXOperatorNameLoc(Range: SourceRange());
7274 return DeclarationNameInfo(DName, NameLoc, DNLoc);
7275 }
7276 }
7277
7278 case TemplateName::SubstTemplateTemplateParm: {
7279 SubstTemplateTemplateParmStorage *subst
7280 = Name.getAsSubstTemplateTemplateParm();
7281 return DeclarationNameInfo(subst->getParameter()->getDeclName(),
7282 NameLoc);
7283 }
7284
7285 case TemplateName::SubstTemplateTemplateParmPack: {
7286 SubstTemplateTemplateParmPackStorage *subst
7287 = Name.getAsSubstTemplateTemplateParmPack();
7288 return DeclarationNameInfo(subst->getParameterPack()->getDeclName(),
7289 NameLoc);
7290 }
7291 case TemplateName::UsingTemplate:
7292 return DeclarationNameInfo(Name.getAsUsingShadowDecl()->getDeclName(),
7293 NameLoc);
7294 case TemplateName::DeducedTemplate: {
7295 DeducedTemplateStorage *DTS = Name.getAsDeducedTemplateName();
7296 return getNameForTemplate(Name: DTS->getUnderlying(), NameLoc);
7297 }
7298 }
7299
7300 llvm_unreachable("bad template name kind!");
7301}
7302
7303static const TemplateArgument *
7304getDefaultTemplateArgumentOrNone(const NamedDecl *P) {
7305 auto handleParam = [](auto *TP) -> const TemplateArgument * {
7306 if (!TP->hasDefaultArgument())
7307 return nullptr;
7308 return &TP->getDefaultArgument().getArgument();
7309 };
7310 switch (P->getKind()) {
7311 case NamedDecl::TemplateTypeParm:
7312 return handleParam(cast<TemplateTypeParmDecl>(Val: P));
7313 case NamedDecl::NonTypeTemplateParm:
7314 return handleParam(cast<NonTypeTemplateParmDecl>(Val: P));
7315 case NamedDecl::TemplateTemplateParm:
7316 return handleParam(cast<TemplateTemplateParmDecl>(Val: P));
7317 default:
7318 llvm_unreachable("Unexpected template parameter kind");
7319 }
7320}
7321
7322TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name,
7323 bool IgnoreDeduced) const {
7324 while (std::optional<TemplateName> UnderlyingOrNone =
7325 Name.desugar(IgnoreDeduced))
7326 Name = *UnderlyingOrNone;
7327
7328 switch (Name.getKind()) {
7329 case TemplateName::Template: {
7330 TemplateDecl *Template = Name.getAsTemplateDecl();
7331 if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Val: Template))
7332 Template = getCanonicalTemplateTemplateParmDecl(TTP);
7333
7334 // The canonical template name is the canonical template declaration.
7335 return TemplateName(cast<TemplateDecl>(Val: Template->getCanonicalDecl()));
7336 }
7337
7338 case TemplateName::AssumedTemplate:
7339 // An assumed template is just a name, so it is already canonical.
7340 return Name;
7341
7342 case TemplateName::OverloadedTemplate:
7343 llvm_unreachable("cannot canonicalize overloaded template");
7344
7345 case TemplateName::DependentTemplate: {
7346 DependentTemplateName *DTN = Name.getAsDependentTemplateName();
7347 assert(DTN && "Non-dependent template names must refer to template decls.");
7348 NestedNameSpecifier Qualifier = DTN->getQualifier();
7349 NestedNameSpecifier CanonQualifier = Qualifier.getCanonical();
7350 if (Qualifier != CanonQualifier || !DTN->hasTemplateKeyword())
7351 return getDependentTemplateName(Name: {CanonQualifier, DTN->getName(),
7352 /*HasTemplateKeyword=*/true});
7353 return Name;
7354 }
7355
7356 case TemplateName::SubstTemplateTemplateParmPack: {
7357 SubstTemplateTemplateParmPackStorage *subst =
7358 Name.getAsSubstTemplateTemplateParmPack();
7359 TemplateArgument canonArgPack =
7360 getCanonicalTemplateArgument(Arg: subst->getArgumentPack());
7361 return getSubstTemplateTemplateParmPack(
7362 ArgPack: canonArgPack, AssociatedDecl: subst->getAssociatedDecl()->getCanonicalDecl(),
7363 Index: subst->getIndex(), Final: subst->getFinal());
7364 }
7365 case TemplateName::DeducedTemplate: {
7366 assert(IgnoreDeduced == false);
7367 DeducedTemplateStorage *DTS = Name.getAsDeducedTemplateName();
7368 DefaultArguments DefArgs = DTS->getDefaultArguments();
7369 TemplateName Underlying = DTS->getUnderlying();
7370
7371 TemplateName CanonUnderlying =
7372 getCanonicalTemplateName(Name: Underlying, /*IgnoreDeduced=*/true);
7373 bool NonCanonical = CanonUnderlying != Underlying;
7374 auto CanonArgs =
7375 getCanonicalTemplateArguments(C: *this, Args: DefArgs.Args, AnyNonCanonArgs&: NonCanonical);
7376
7377 ArrayRef<NamedDecl *> Params =
7378 CanonUnderlying.getAsTemplateDecl()->getTemplateParameters()->asArray();
7379 assert(CanonArgs.size() <= Params.size());
7380 // A deduced template name which deduces the same default arguments already
7381 // declared in the underlying template is the same template as the
7382 // underlying template. We need need to note any arguments which differ from
7383 // the corresponding declaration. If any argument differs, we must build a
7384 // deduced template name.
7385 for (int I = CanonArgs.size() - 1; I >= 0; --I) {
7386 const TemplateArgument *A = getDefaultTemplateArgumentOrNone(P: Params[I]);
7387 if (!A)
7388 break;
7389 auto CanonParamDefArg = getCanonicalTemplateArgument(Arg: *A);
7390 TemplateArgument &CanonDefArg = CanonArgs[I];
7391 if (CanonDefArg.structurallyEquals(Other: CanonParamDefArg))
7392 continue;
7393 // Keep popping from the back any deault arguments which are the same.
7394 if (I == int(CanonArgs.size() - 1))
7395 CanonArgs.pop_back();
7396 NonCanonical = true;
7397 }
7398 return NonCanonical ? getDeducedTemplateName(
7399 Underlying: CanonUnderlying,
7400 /*DefaultArgs=*/{.StartPos: DefArgs.StartPos, .Args: CanonArgs})
7401 : Name;
7402 }
7403 case TemplateName::UsingTemplate:
7404 case TemplateName::QualifiedTemplate:
7405 case TemplateName::SubstTemplateTemplateParm:
7406 llvm_unreachable("always sugar node");
7407 }
7408
7409 llvm_unreachable("bad template name!");
7410}
7411
7412bool ASTContext::hasSameTemplateName(const TemplateName &X,
7413 const TemplateName &Y,
7414 bool IgnoreDeduced) const {
7415 return getCanonicalTemplateName(Name: X, IgnoreDeduced) ==
7416 getCanonicalTemplateName(Name: Y, IgnoreDeduced);
7417}
7418
7419bool ASTContext::isSameAssociatedConstraint(
7420 const AssociatedConstraint &ACX, const AssociatedConstraint &ACY) const {
7421 if (ACX.ArgPackSubstIndex != ACY.ArgPackSubstIndex)
7422 return false;
7423 if (!isSameConstraintExpr(XCE: ACX.ConstraintExpr, YCE: ACY.ConstraintExpr))
7424 return false;
7425 return true;
7426}
7427
7428bool ASTContext::isSameConstraintExpr(const Expr *XCE, const Expr *YCE) const {
7429 if (!XCE != !YCE)
7430 return false;
7431
7432 if (!XCE)
7433 return true;
7434
7435 llvm::FoldingSetNodeID XCEID, YCEID;
7436 XCE->Profile(ID&: XCEID, Context: *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true);
7437 YCE->Profile(ID&: YCEID, Context: *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true);
7438 return XCEID == YCEID;
7439}
7440
7441bool ASTContext::isSameTypeConstraint(const TypeConstraint *XTC,
7442 const TypeConstraint *YTC) const {
7443 if (!XTC != !YTC)
7444 return false;
7445
7446 if (!XTC)
7447 return true;
7448
7449 auto *NCX = XTC->getNamedConcept();
7450 auto *NCY = YTC->getNamedConcept();
7451 if (!NCX || !NCY || !isSameEntity(X: NCX, Y: NCY))
7452 return false;
7453 if (XTC->getConceptReference()->hasExplicitTemplateArgs() !=
7454 YTC->getConceptReference()->hasExplicitTemplateArgs())
7455 return false;
7456 if (XTC->getConceptReference()->hasExplicitTemplateArgs())
7457 if (XTC->getConceptReference()
7458 ->getTemplateArgsAsWritten()
7459 ->NumTemplateArgs !=
7460 YTC->getConceptReference()->getTemplateArgsAsWritten()->NumTemplateArgs)
7461 return false;
7462
7463 // Compare slowly by profiling.
7464 //
7465 // We couldn't compare the profiling result for the template
7466 // args here. Consider the following example in different modules:
7467 //
7468 // template <__integer_like _Tp, C<_Tp> Sentinel>
7469 // constexpr _Tp operator()(_Tp &&__t, Sentinel &&last) const {
7470 // return __t;
7471 // }
7472 //
7473 // When we compare the profiling result for `C<_Tp>` in different
7474 // modules, it will compare the type of `_Tp` in different modules.
7475 // However, the type of `_Tp` in different modules refer to different
7476 // types here naturally. So we couldn't compare the profiling result
7477 // for the template args directly.
7478 return isSameConstraintExpr(XCE: XTC->getImmediatelyDeclaredConstraint(),
7479 YCE: YTC->getImmediatelyDeclaredConstraint());
7480}
7481
7482bool ASTContext::isSameTemplateParameter(const NamedDecl *X,
7483 const NamedDecl *Y) const {
7484 if (X->getKind() != Y->getKind())
7485 return false;
7486
7487 if (auto *TX = dyn_cast<TemplateTypeParmDecl>(Val: X)) {
7488 auto *TY = cast<TemplateTypeParmDecl>(Val: Y);
7489 if (TX->isParameterPack() != TY->isParameterPack())
7490 return false;
7491 if (TX->hasTypeConstraint() != TY->hasTypeConstraint())
7492 return false;
7493 return isSameTypeConstraint(XTC: TX->getTypeConstraint(),
7494 YTC: TY->getTypeConstraint());
7495 }
7496
7497 if (auto *TX = dyn_cast<NonTypeTemplateParmDecl>(Val: X)) {
7498 auto *TY = cast<NonTypeTemplateParmDecl>(Val: Y);
7499 return TX->isParameterPack() == TY->isParameterPack() &&
7500 TX->getASTContext().hasSameType(T1: TX->getType(), T2: TY->getType()) &&
7501 isSameConstraintExpr(XCE: TX->getPlaceholderTypeConstraint(),
7502 YCE: TY->getPlaceholderTypeConstraint());
7503 }
7504
7505 auto *TX = cast<TemplateTemplateParmDecl>(Val: X);
7506 auto *TY = cast<TemplateTemplateParmDecl>(Val: Y);
7507 return TX->isParameterPack() == TY->isParameterPack() &&
7508 isSameTemplateParameterList(X: TX->getTemplateParameters(),
7509 Y: TY->getTemplateParameters());
7510}
7511
7512bool ASTContext::isSameTemplateParameterList(
7513 const TemplateParameterList *X, const TemplateParameterList *Y) const {
7514 if (X->size() != Y->size())
7515 return false;
7516
7517 for (unsigned I = 0, N = X->size(); I != N; ++I)
7518 if (!isSameTemplateParameter(X: X->getParam(Idx: I), Y: Y->getParam(Idx: I)))
7519 return false;
7520
7521 return isSameConstraintExpr(XCE: X->getRequiresClause(), YCE: Y->getRequiresClause());
7522}
7523
7524bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl *X,
7525 const NamedDecl *Y) const {
7526 // If the type parameter isn't the same already, we don't need to check the
7527 // default argument further.
7528 if (!isSameTemplateParameter(X, Y))
7529 return false;
7530
7531 if (auto *TTPX = dyn_cast<TemplateTypeParmDecl>(Val: X)) {
7532 auto *TTPY = cast<TemplateTypeParmDecl>(Val: Y);
7533 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument())
7534 return false;
7535
7536 return hasSameType(T1: TTPX->getDefaultArgument().getArgument().getAsType(),
7537 T2: TTPY->getDefaultArgument().getArgument().getAsType());
7538 }
7539
7540 if (auto *NTTPX = dyn_cast<NonTypeTemplateParmDecl>(Val: X)) {
7541 auto *NTTPY = cast<NonTypeTemplateParmDecl>(Val: Y);
7542 if (!NTTPX->hasDefaultArgument() || !NTTPY->hasDefaultArgument())
7543 return false;
7544
7545 Expr *DefaultArgumentX =
7546 NTTPX->getDefaultArgument().getArgument().getAsExpr()->IgnoreImpCasts();
7547 Expr *DefaultArgumentY =
7548 NTTPY->getDefaultArgument().getArgument().getAsExpr()->IgnoreImpCasts();
7549 llvm::FoldingSetNodeID XID, YID;
7550 DefaultArgumentX->Profile(ID&: XID, Context: *this, /*Canonical=*/true);
7551 DefaultArgumentY->Profile(ID&: YID, Context: *this, /*Canonical=*/true);
7552 return XID == YID;
7553 }
7554
7555 auto *TTPX = cast<TemplateTemplateParmDecl>(Val: X);
7556 auto *TTPY = cast<TemplateTemplateParmDecl>(Val: Y);
7557
7558 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument())
7559 return false;
7560
7561 const TemplateArgument &TAX = TTPX->getDefaultArgument().getArgument();
7562 const TemplateArgument &TAY = TTPY->getDefaultArgument().getArgument();
7563 return hasSameTemplateName(X: TAX.getAsTemplate(), Y: TAY.getAsTemplate());
7564}
7565
7566static bool isSameQualifier(const NestedNameSpecifier X,
7567 const NestedNameSpecifier Y) {
7568 if (X == Y)
7569 return true;
7570 if (!X || !Y)
7571 return false;
7572
7573 auto Kind = X.getKind();
7574 if (Kind != Y.getKind())
7575 return false;
7576
7577 // FIXME: For namespaces and types, we're permitted to check that the entity
7578 // is named via the same tokens. We should probably do so.
7579 switch (Kind) {
7580 case NestedNameSpecifier::Kind::Namespace: {
7581 auto [NamespaceX, PrefixX] = X.getAsNamespaceAndPrefix();
7582 auto [NamespaceY, PrefixY] = Y.getAsNamespaceAndPrefix();
7583 if (!declaresSameEntity(D1: NamespaceX->getNamespace(),
7584 D2: NamespaceY->getNamespace()))
7585 return false;
7586 return isSameQualifier(X: PrefixX, Y: PrefixY);
7587 }
7588 case NestedNameSpecifier::Kind::Type: {
7589 const auto *TX = X.getAsType(), *TY = Y.getAsType();
7590 if (TX->getCanonicalTypeInternal() != TY->getCanonicalTypeInternal())
7591 return false;
7592 return isSameQualifier(X: TX->getPrefix(), Y: TY->getPrefix());
7593 }
7594 case NestedNameSpecifier::Kind::Null:
7595 case NestedNameSpecifier::Kind::Global:
7596 case NestedNameSpecifier::Kind::MicrosoftSuper:
7597 return true;
7598 }
7599 llvm_unreachable("unhandled qualifier kind");
7600}
7601
7602static bool hasSameCudaAttrs(const FunctionDecl *A, const FunctionDecl *B) {
7603 if (!A->getASTContext().getLangOpts().CUDA)
7604 return true; // Target attributes are overloadable in CUDA compilation only.
7605 if (A->hasAttr<CUDADeviceAttr>() != B->hasAttr<CUDADeviceAttr>())
7606 return false;
7607 if (A->hasAttr<CUDADeviceAttr>() && B->hasAttr<CUDADeviceAttr>())
7608 return A->hasAttr<CUDAHostAttr>() == B->hasAttr<CUDAHostAttr>();
7609 return true; // unattributed and __host__ functions are the same.
7610}
7611
7612/// Determine whether the attributes we can overload on are identical for A and
7613/// B. Will ignore any overloadable attrs represented in the type of A and B.
7614static bool hasSameOverloadableAttrs(const FunctionDecl *A,
7615 const FunctionDecl *B) {
7616 // Note that pass_object_size attributes are represented in the function's
7617 // ExtParameterInfo, so we don't need to check them here.
7618
7619 llvm::FoldingSetNodeID Cand1ID, Cand2ID;
7620 auto AEnableIfAttrs = A->specific_attrs<EnableIfAttr>();
7621 auto BEnableIfAttrs = B->specific_attrs<EnableIfAttr>();
7622
7623 for (auto Pair : zip_longest(t&: AEnableIfAttrs, u&: BEnableIfAttrs)) {
7624 std::optional<EnableIfAttr *> Cand1A = std::get<0>(t&: Pair);
7625 std::optional<EnableIfAttr *> Cand2A = std::get<1>(t&: Pair);
7626
7627 // Return false if the number of enable_if attributes is different.
7628 if (!Cand1A || !Cand2A)
7629 return false;
7630
7631 Cand1ID.clear();
7632 Cand2ID.clear();
7633
7634 (*Cand1A)->getCond()->Profile(ID&: Cand1ID, Context: A->getASTContext(), Canonical: true);
7635 (*Cand2A)->getCond()->Profile(ID&: Cand2ID, Context: B->getASTContext(), Canonical: true);
7636
7637 // Return false if any of the enable_if expressions of A and B are
7638 // different.
7639 if (Cand1ID != Cand2ID)
7640 return false;
7641 }
7642 return hasSameCudaAttrs(A, B);
7643}
7644
7645bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) const {
7646 // Caution: this function is called by the AST reader during deserialization,
7647 // so it cannot rely on AST invariants being met. Non-trivial accessors
7648 // should be avoided, along with any traversal of redeclaration chains.
7649
7650 if (X == Y)
7651 return true;
7652
7653 if (X->getDeclName() != Y->getDeclName())
7654 return false;
7655
7656 // Must be in the same context.
7657 //
7658 // Note that we can't use DeclContext::Equals here, because the DeclContexts
7659 // could be two different declarations of the same function. (We will fix the
7660 // semantic DC to refer to the primary definition after merging.)
7661 if (!declaresSameEntity(D1: cast<Decl>(Val: X->getDeclContext()->getRedeclContext()),
7662 D2: cast<Decl>(Val: Y->getDeclContext()->getRedeclContext())))
7663 return false;
7664
7665 // If either X or Y are local to the owning module, they are only possible to
7666 // be the same entity if they are in the same module.
7667 if (X->isModuleLocal() || Y->isModuleLocal())
7668 if (!isInSameModule(M1: X->getOwningModule(), M2: Y->getOwningModule()))
7669 return false;
7670
7671 // Two typedefs refer to the same entity if they have the same underlying
7672 // type.
7673 if (const auto *TypedefX = dyn_cast<TypedefNameDecl>(Val: X))
7674 if (const auto *TypedefY = dyn_cast<TypedefNameDecl>(Val: Y))
7675 return hasSameType(T1: TypedefX->getUnderlyingType(),
7676 T2: TypedefY->getUnderlyingType());
7677
7678 // Must have the same kind.
7679 if (X->getKind() != Y->getKind())
7680 return false;
7681
7682 // Objective-C classes and protocols with the same name always match.
7683 if (isa<ObjCInterfaceDecl>(Val: X) || isa<ObjCProtocolDecl>(Val: X))
7684 return true;
7685
7686 if (isa<ClassTemplateSpecializationDecl>(Val: X)) {
7687 // No need to handle these here: we merge them when adding them to the
7688 // template.
7689 return false;
7690 }
7691
7692 // Compatible tags match.
7693 if (const auto *TagX = dyn_cast<TagDecl>(Val: X)) {
7694 const auto *TagY = cast<TagDecl>(Val: Y);
7695 return (TagX->getTagKind() == TagY->getTagKind()) ||
7696 ((TagX->getTagKind() == TagTypeKind::Struct ||
7697 TagX->getTagKind() == TagTypeKind::Class ||
7698 TagX->getTagKind() == TagTypeKind::Interface) &&
7699 (TagY->getTagKind() == TagTypeKind::Struct ||
7700 TagY->getTagKind() == TagTypeKind::Class ||
7701 TagY->getTagKind() == TagTypeKind::Interface));
7702 }
7703
7704 // Functions with the same type and linkage match.
7705 // FIXME: This needs to cope with merging of prototyped/non-prototyped
7706 // functions, etc.
7707 if (const auto *FuncX = dyn_cast<FunctionDecl>(Val: X)) {
7708 const auto *FuncY = cast<FunctionDecl>(Val: Y);
7709 if (const auto *CtorX = dyn_cast<CXXConstructorDecl>(Val: X)) {
7710 const auto *CtorY = cast<CXXConstructorDecl>(Val: Y);
7711 if (CtorX->getInheritedConstructor() &&
7712 !isSameEntity(X: CtorX->getInheritedConstructor().getConstructor(),
7713 Y: CtorY->getInheritedConstructor().getConstructor()))
7714 return false;
7715 }
7716
7717 if (FuncX->isMultiVersion() != FuncY->isMultiVersion())
7718 return false;
7719
7720 // Multiversioned functions with different feature strings are represented
7721 // as separate declarations.
7722 if (FuncX->isMultiVersion()) {
7723 const auto *TAX = FuncX->getAttr<TargetAttr>();
7724 const auto *TAY = FuncY->getAttr<TargetAttr>();
7725 assert(TAX && TAY && "Multiversion Function without target attribute");
7726
7727 if (TAX->getFeaturesStr() != TAY->getFeaturesStr())
7728 return false;
7729 }
7730
7731 // Per C++20 [temp.over.link]/4, friends in different classes are sometimes
7732 // not the same entity if they are constrained.
7733 if ((FuncX->isMemberLikeConstrainedFriend() ||
7734 FuncY->isMemberLikeConstrainedFriend()) &&
7735 !FuncX->getLexicalDeclContext()->Equals(
7736 DC: FuncY->getLexicalDeclContext())) {
7737 return false;
7738 }
7739
7740 if (!isSameAssociatedConstraint(ACX: FuncX->getTrailingRequiresClause(),
7741 ACY: FuncY->getTrailingRequiresClause()))
7742 return false;
7743
7744 auto GetTypeAsWritten = [](const FunctionDecl *FD) {
7745 // Map to the first declaration that we've already merged into this one.
7746 // The TSI of redeclarations might not match (due to calling conventions
7747 // being inherited onto the type but not the TSI), but the TSI type of
7748 // the first declaration of the function should match across modules.
7749 FD = FD->getCanonicalDecl();
7750 return FD->getTypeSourceInfo() ? FD->getTypeSourceInfo()->getType()
7751 : FD->getType();
7752 };
7753 QualType XT = GetTypeAsWritten(FuncX), YT = GetTypeAsWritten(FuncY);
7754 if (!hasSameType(T1: XT, T2: YT)) {
7755 // We can get functions with different types on the redecl chain in C++17
7756 // if they have differing exception specifications and at least one of
7757 // the excpetion specs is unresolved.
7758 auto *XFPT = XT->getAs<FunctionProtoType>();
7759 auto *YFPT = YT->getAs<FunctionProtoType>();
7760 if (getLangOpts().CPlusPlus17 && XFPT && YFPT &&
7761 (isUnresolvedExceptionSpec(ESpecType: XFPT->getExceptionSpecType()) ||
7762 isUnresolvedExceptionSpec(ESpecType: YFPT->getExceptionSpecType())) &&
7763 hasSameFunctionTypeIgnoringExceptionSpec(T: XT, U: YT))
7764 return true;
7765 return false;
7766 }
7767
7768 return FuncX->getLinkageInternal() == FuncY->getLinkageInternal() &&
7769 hasSameOverloadableAttrs(A: FuncX, B: FuncY);
7770 }
7771
7772 // Variables with the same type and linkage match.
7773 if (const auto *VarX = dyn_cast<VarDecl>(Val: X)) {
7774 const auto *VarY = cast<VarDecl>(Val: Y);
7775 if (VarX->getLinkageInternal() == VarY->getLinkageInternal()) {
7776 // During deserialization, we might compare variables before we load
7777 // their types. Assume the types will end up being the same.
7778 if (VarX->getType().isNull() || VarY->getType().isNull())
7779 return true;
7780
7781 if (hasSameType(T1: VarX->getType(), T2: VarY->getType()))
7782 return true;
7783
7784 // We can get decls with different types on the redecl chain. Eg.
7785 // template <typename T> struct S { static T Var[]; }; // #1
7786 // template <typename T> T S<T>::Var[sizeof(T)]; // #2
7787 // Only? happens when completing an incomplete array type. In this case
7788 // when comparing #1 and #2 we should go through their element type.
7789 const ArrayType *VarXTy = getAsArrayType(T: VarX->getType());
7790 const ArrayType *VarYTy = getAsArrayType(T: VarY->getType());
7791 if (!VarXTy || !VarYTy)
7792 return false;
7793 if (VarXTy->isIncompleteArrayType() || VarYTy->isIncompleteArrayType())
7794 return hasSameType(T1: VarXTy->getElementType(), T2: VarYTy->getElementType());
7795 }
7796 return false;
7797 }
7798
7799 // Namespaces with the same name and inlinedness match.
7800 if (const auto *NamespaceX = dyn_cast<NamespaceDecl>(Val: X)) {
7801 const auto *NamespaceY = cast<NamespaceDecl>(Val: Y);
7802 return NamespaceX->isInline() == NamespaceY->isInline();
7803 }
7804
7805 // Identical template names and kinds match if their template parameter lists
7806 // and patterns match.
7807 if (const auto *TemplateX = dyn_cast<TemplateDecl>(Val: X)) {
7808 const auto *TemplateY = cast<TemplateDecl>(Val: Y);
7809
7810 // ConceptDecl wouldn't be the same if their constraint expression differs.
7811 if (const auto *ConceptX = dyn_cast<ConceptDecl>(Val: X)) {
7812 const auto *ConceptY = cast<ConceptDecl>(Val: Y);
7813 if (!isSameConstraintExpr(XCE: ConceptX->getConstraintExpr(),
7814 YCE: ConceptY->getConstraintExpr()))
7815 return false;
7816 }
7817
7818 return isSameEntity(X: TemplateX->getTemplatedDecl(),
7819 Y: TemplateY->getTemplatedDecl()) &&
7820 isSameTemplateParameterList(X: TemplateX->getTemplateParameters(),
7821 Y: TemplateY->getTemplateParameters());
7822 }
7823
7824 // Fields with the same name and the same type match.
7825 if (const auto *FDX = dyn_cast<FieldDecl>(Val: X)) {
7826 const auto *FDY = cast<FieldDecl>(Val: Y);
7827 // FIXME: Also check the bitwidth is odr-equivalent, if any.
7828 return hasSameType(T1: FDX->getType(), T2: FDY->getType());
7829 }
7830
7831 // Indirect fields with the same target field match.
7832 if (const auto *IFDX = dyn_cast<IndirectFieldDecl>(Val: X)) {
7833 const auto *IFDY = cast<IndirectFieldDecl>(Val: Y);
7834 return IFDX->getAnonField()->getCanonicalDecl() ==
7835 IFDY->getAnonField()->getCanonicalDecl();
7836 }
7837
7838 // Enumerators with the same name match.
7839 if (isa<EnumConstantDecl>(Val: X))
7840 // FIXME: Also check the value is odr-equivalent.
7841 return true;
7842
7843 // Using shadow declarations with the same target match.
7844 if (const auto *USX = dyn_cast<UsingShadowDecl>(Val: X)) {
7845 const auto *USY = cast<UsingShadowDecl>(Val: Y);
7846 return declaresSameEntity(D1: USX->getTargetDecl(), D2: USY->getTargetDecl());
7847 }
7848
7849 // Using declarations with the same qualifier match. (We already know that
7850 // the name matches.)
7851 if (const auto *UX = dyn_cast<UsingDecl>(Val: X)) {
7852 const auto *UY = cast<UsingDecl>(Val: Y);
7853 return isSameQualifier(X: UX->getQualifier(), Y: UY->getQualifier()) &&
7854 UX->hasTypename() == UY->hasTypename() &&
7855 UX->isAccessDeclaration() == UY->isAccessDeclaration();
7856 }
7857 if (const auto *UX = dyn_cast<UnresolvedUsingValueDecl>(Val: X)) {
7858 const auto *UY = cast<UnresolvedUsingValueDecl>(Val: Y);
7859 return isSameQualifier(X: UX->getQualifier(), Y: UY->getQualifier()) &&
7860 UX->isAccessDeclaration() == UY->isAccessDeclaration();
7861 }
7862 if (const auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(Val: X)) {
7863 return isSameQualifier(
7864 X: UX->getQualifier(),
7865 Y: cast<UnresolvedUsingTypenameDecl>(Val: Y)->getQualifier());
7866 }
7867
7868 // Using-pack declarations are only created by instantiation, and match if
7869 // they're instantiated from matching UnresolvedUsing...Decls.
7870 if (const auto *UX = dyn_cast<UsingPackDecl>(Val: X)) {
7871 return declaresSameEntity(
7872 D1: UX->getInstantiatedFromUsingDecl(),
7873 D2: cast<UsingPackDecl>(Val: Y)->getInstantiatedFromUsingDecl());
7874 }
7875
7876 // Namespace alias definitions with the same target match.
7877 if (const auto *NAX = dyn_cast<NamespaceAliasDecl>(Val: X)) {
7878 const auto *NAY = cast<NamespaceAliasDecl>(Val: Y);
7879 return NAX->getNamespace()->Equals(DC: NAY->getNamespace());
7880 }
7881
7882 return false;
7883}
7884
7885TemplateArgument
7886ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const {
7887 switch (Arg.getKind()) {
7888 case TemplateArgument::Null:
7889 return Arg;
7890
7891 case TemplateArgument::Expression:
7892 return TemplateArgument(Arg.getAsExpr(), /*IsCanonical=*/true,
7893 Arg.getIsDefaulted());
7894
7895 case TemplateArgument::Declaration: {
7896 auto *D = cast<ValueDecl>(Val: Arg.getAsDecl()->getCanonicalDecl());
7897 return TemplateArgument(D, getCanonicalType(T: Arg.getParamTypeForDecl()),
7898 Arg.getIsDefaulted());
7899 }
7900
7901 case TemplateArgument::NullPtr:
7902 return TemplateArgument(getCanonicalType(T: Arg.getNullPtrType()),
7903 /*isNullPtr*/ true, Arg.getIsDefaulted());
7904
7905 case TemplateArgument::Template:
7906 return TemplateArgument(getCanonicalTemplateName(Name: Arg.getAsTemplate()),
7907 Arg.getIsDefaulted());
7908
7909 case TemplateArgument::TemplateExpansion:
7910 return TemplateArgument(
7911 getCanonicalTemplateName(Name: Arg.getAsTemplateOrTemplatePattern()),
7912 Arg.getNumTemplateExpansions(), Arg.getIsDefaulted());
7913
7914 case TemplateArgument::Integral:
7915 return TemplateArgument(Arg, getCanonicalType(T: Arg.getIntegralType()));
7916
7917 case TemplateArgument::StructuralValue:
7918 return TemplateArgument(*this,
7919 getCanonicalType(T: Arg.getStructuralValueType()),
7920 Arg.getAsStructuralValue(), Arg.getIsDefaulted());
7921
7922 case TemplateArgument::Type:
7923 return TemplateArgument(getCanonicalType(T: Arg.getAsType()),
7924 /*isNullPtr*/ false, Arg.getIsDefaulted());
7925
7926 case TemplateArgument::Pack: {
7927 bool AnyNonCanonArgs = false;
7928 auto CanonArgs = ::getCanonicalTemplateArguments(
7929 C: *this, Args: Arg.pack_elements(), AnyNonCanonArgs);
7930 if (!AnyNonCanonArgs)
7931 return Arg;
7932 auto NewArg = TemplateArgument::CreatePackCopy(
7933 Context&: const_cast<ASTContext &>(*this), Args: CanonArgs);
7934 NewArg.setIsDefaulted(Arg.getIsDefaulted());
7935 return NewArg;
7936 }
7937 }
7938
7939 // Silence GCC warning
7940 llvm_unreachable("Unhandled template argument kind");
7941}
7942
7943bool ASTContext::isSameTemplateArgument(const TemplateArgument &Arg1,
7944 const TemplateArgument &Arg2) const {
7945 if (Arg1.getKind() != Arg2.getKind())
7946 return false;
7947
7948 switch (Arg1.getKind()) {
7949 case TemplateArgument::Null:
7950 llvm_unreachable("Comparing NULL template argument");
7951
7952 case TemplateArgument::Type:
7953 return hasSameType(T1: Arg1.getAsType(), T2: Arg2.getAsType());
7954
7955 case TemplateArgument::Declaration:
7956 return Arg1.getAsDecl()->getUnderlyingDecl()->getCanonicalDecl() ==
7957 Arg2.getAsDecl()->getUnderlyingDecl()->getCanonicalDecl();
7958
7959 case TemplateArgument::NullPtr:
7960 return hasSameType(T1: Arg1.getNullPtrType(), T2: Arg2.getNullPtrType());
7961
7962 case TemplateArgument::Template:
7963 case TemplateArgument::TemplateExpansion:
7964 return getCanonicalTemplateName(Name: Arg1.getAsTemplateOrTemplatePattern()) ==
7965 getCanonicalTemplateName(Name: Arg2.getAsTemplateOrTemplatePattern());
7966
7967 case TemplateArgument::Integral:
7968 return llvm::APSInt::isSameValue(I1: Arg1.getAsIntegral(),
7969 I2: Arg2.getAsIntegral());
7970
7971 case TemplateArgument::StructuralValue:
7972 return Arg1.structurallyEquals(Other: Arg2);
7973
7974 case TemplateArgument::Expression: {
7975 llvm::FoldingSetNodeID ID1, ID2;
7976 Arg1.getAsExpr()->Profile(ID&: ID1, Context: *this, /*Canonical=*/true);
7977 Arg2.getAsExpr()->Profile(ID&: ID2, Context: *this, /*Canonical=*/true);
7978 return ID1 == ID2;
7979 }
7980
7981 case TemplateArgument::Pack:
7982 return llvm::equal(
7983 LRange: Arg1.getPackAsArray(), RRange: Arg2.getPackAsArray(),
7984 P: [&](const TemplateArgument &Arg1, const TemplateArgument &Arg2) {
7985 return isSameTemplateArgument(Arg1, Arg2);
7986 });
7987 }
7988
7989 llvm_unreachable("Unhandled template argument kind");
7990}
7991
7992const ArrayType *ASTContext::getAsArrayType(QualType T) const {
7993 // Handle the non-qualified case efficiently.
7994 if (!T.hasLocalQualifiers()) {
7995 // Handle the common positive case fast.
7996 if (const auto *AT = dyn_cast<ArrayType>(Val&: T))
7997 return AT;
7998 }
7999
8000 // Handle the common negative case fast.
8001 if (!isa<ArrayType>(Val: T.getCanonicalType()))
8002 return nullptr;
8003
8004 // Apply any qualifiers from the array type to the element type. This
8005 // implements C99 6.7.3p8: "If the specification of an array type includes
8006 // any type qualifiers, the element type is so qualified, not the array type."
8007
8008 // If we get here, we either have type qualifiers on the type, or we have
8009 // sugar such as a typedef in the way. If we have type qualifiers on the type
8010 // we must propagate them down into the element type.
8011
8012 SplitQualType split = T.getSplitDesugaredType();
8013 Qualifiers qs = split.Quals;
8014
8015 // If we have a simple case, just return now.
8016 const auto *ATy = dyn_cast<ArrayType>(Val: split.Ty);
8017 if (!ATy || qs.empty())
8018 return ATy;
8019
8020 // Otherwise, we have an array and we have qualifiers on it. Push the
8021 // qualifiers into the array element type and return a new array type.
8022 QualType NewEltTy = getQualifiedType(T: ATy->getElementType(), Qs: qs);
8023
8024 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: ATy))
8025 return cast<ArrayType>(Val: getConstantArrayType(EltTy: NewEltTy, ArySizeIn: CAT->getSize(),
8026 SizeExpr: CAT->getSizeExpr(),
8027 ASM: CAT->getSizeModifier(),
8028 IndexTypeQuals: CAT->getIndexTypeCVRQualifiers()));
8029 if (const auto *IAT = dyn_cast<IncompleteArrayType>(Val: ATy))
8030 return cast<ArrayType>(Val: getIncompleteArrayType(elementType: NewEltTy,
8031 ASM: IAT->getSizeModifier(),
8032 elementTypeQuals: IAT->getIndexTypeCVRQualifiers()));
8033
8034 if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(Val: ATy))
8035 return cast<ArrayType>(Val: getDependentSizedArrayType(
8036 elementType: NewEltTy, numElements: DSAT->getSizeExpr(), ASM: DSAT->getSizeModifier(),
8037 elementTypeQuals: DSAT->getIndexTypeCVRQualifiers()));
8038
8039 const auto *VAT = cast<VariableArrayType>(Val: ATy);
8040 return cast<ArrayType>(
8041 Val: getVariableArrayType(EltTy: NewEltTy, NumElts: VAT->getSizeExpr(), ASM: VAT->getSizeModifier(),
8042 IndexTypeQuals: VAT->getIndexTypeCVRQualifiers()));
8043}
8044
8045QualType ASTContext::getAdjustedParameterType(QualType T) const {
8046 if (getLangOpts().HLSL && T.getAddressSpace() == LangAS::hlsl_groupshared)
8047 return getLValueReferenceType(T);
8048 if (getLangOpts().HLSL && T->isConstantArrayType())
8049 return getArrayParameterType(Ty: T);
8050 if (T->isArrayType() || T->isFunctionType())
8051 return getDecayedType(T);
8052 return T;
8053}
8054
8055QualType ASTContext::getSignatureParameterType(QualType T) const {
8056 T = getVariableArrayDecayedType(type: T);
8057 T = getAdjustedParameterType(T);
8058 return T.getUnqualifiedType();
8059}
8060
8061QualType ASTContext::getExceptionObjectType(QualType T) const {
8062 // C++ [except.throw]p3:
8063 // A throw-expression initializes a temporary object, called the exception
8064 // object, the type of which is determined by removing any top-level
8065 // cv-qualifiers from the static type of the operand of throw and adjusting
8066 // the type from "array of T" or "function returning T" to "pointer to T"
8067 // or "pointer to function returning T", [...]
8068 T = getVariableArrayDecayedType(type: T);
8069 if (T->isArrayType() || T->isFunctionType())
8070 T = getDecayedType(T);
8071 return T.getUnqualifiedType();
8072}
8073
8074/// getArrayDecayedType - Return the properly qualified result of decaying the
8075/// specified array type to a pointer. This operation is non-trivial when
8076/// handling typedefs etc. The canonical type of "T" must be an array type,
8077/// this returns a pointer to a properly qualified element of the array.
8078///
8079/// See C99 6.7.5.3p7 and C99 6.3.2.1p3.
8080QualType ASTContext::getArrayDecayedType(QualType Ty) const {
8081 // Get the element type with 'getAsArrayType' so that we don't lose any
8082 // typedefs in the element type of the array. This also handles propagation
8083 // of type qualifiers from the array type into the element type if present
8084 // (C99 6.7.3p8).
8085 const ArrayType *PrettyArrayType = getAsArrayType(T: Ty);
8086 assert(PrettyArrayType && "Not an array type!");
8087
8088 QualType PtrTy = getPointerType(T: PrettyArrayType->getElementType());
8089
8090 // int x[restrict 4] -> int *restrict
8091 QualType Result = getQualifiedType(T: PtrTy,
8092 Qs: PrettyArrayType->getIndexTypeQualifiers());
8093
8094 // int x[_Nullable] -> int * _Nullable
8095 if (auto Nullability = Ty->getNullability()) {
8096 Result = const_cast<ASTContext *>(this)->getAttributedType(nullability: *Nullability,
8097 modifiedType: Result, equivalentType: Result);
8098 }
8099 return Result;
8100}
8101
8102QualType ASTContext::getBaseElementType(const ArrayType *array) const {
8103 return getBaseElementType(QT: array->getElementType());
8104}
8105
8106QualType ASTContext::getBaseElementType(QualType type) const {
8107 Qualifiers qs;
8108 while (true) {
8109 SplitQualType split = type.getSplitDesugaredType();
8110 const ArrayType *array = split.Ty->getAsArrayTypeUnsafe();
8111 if (!array) break;
8112
8113 type = array->getElementType();
8114 qs.addConsistentQualifiers(qs: split.Quals);
8115 }
8116
8117 return getQualifiedType(T: type, Qs: qs);
8118}
8119
8120/// getConstantArrayElementCount - Returns number of constant array elements.
8121uint64_t
8122ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const {
8123 uint64_t ElementCount = 1;
8124 do {
8125 ElementCount *= CA->getZExtSize();
8126 CA = dyn_cast_or_null<ConstantArrayType>(
8127 Val: CA->getElementType()->getAsArrayTypeUnsafe());
8128 } while (CA);
8129 return ElementCount;
8130}
8131
8132uint64_t ASTContext::getArrayInitLoopExprElementCount(
8133 const ArrayInitLoopExpr *AILE) const {
8134 if (!AILE)
8135 return 0;
8136
8137 uint64_t ElementCount = 1;
8138
8139 do {
8140 ElementCount *= AILE->getArraySize().getZExtValue();
8141 AILE = dyn_cast<ArrayInitLoopExpr>(Val: AILE->getSubExpr());
8142 } while (AILE);
8143
8144 return ElementCount;
8145}
8146
8147/// getFloatingRank - Return a relative rank for floating point types.
8148/// This routine will assert if passed a built-in type that isn't a float.
8149static FloatingRank getFloatingRank(QualType T) {
8150 if (const auto *CT = T->getAs<ComplexType>())
8151 return getFloatingRank(T: CT->getElementType());
8152
8153 switch (T->castAs<BuiltinType>()->getKind()) {
8154 default: llvm_unreachable("getFloatingRank(): not a floating type");
8155 case BuiltinType::Float16: return Float16Rank;
8156 case BuiltinType::Half: return HalfRank;
8157 case BuiltinType::Float: return FloatRank;
8158 case BuiltinType::Double: return DoubleRank;
8159 case BuiltinType::LongDouble: return LongDoubleRank;
8160 case BuiltinType::Float128: return Float128Rank;
8161 case BuiltinType::BFloat16: return BFloat16Rank;
8162 case BuiltinType::Ibm128: return Ibm128Rank;
8163 }
8164}
8165
8166/// getFloatingTypeOrder - Compare the rank of the two specified floating
8167/// point types, ignoring the domain of the type (i.e. 'double' ==
8168/// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If
8169/// LHS < RHS, return -1.
8170int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const {
8171 FloatingRank LHSR = getFloatingRank(T: LHS);
8172 FloatingRank RHSR = getFloatingRank(T: RHS);
8173
8174 if (LHSR == RHSR)
8175 return 0;
8176 if (LHSR > RHSR)
8177 return 1;
8178 return -1;
8179}
8180
8181int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const {
8182 if (&getFloatTypeSemantics(T: LHS) == &getFloatTypeSemantics(T: RHS))
8183 return 0;
8184 return getFloatingTypeOrder(LHS, RHS);
8185}
8186
8187/// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This
8188/// routine will assert if passed a built-in type that isn't an integer or enum,
8189/// or if it is not canonicalized.
8190unsigned ASTContext::getIntegerRank(const Type *T) const {
8191 assert(T->isCanonicalUnqualified() && "T should be canonicalized");
8192
8193 // Results in this 'losing' to any type of the same size, but winning if
8194 // larger.
8195 if (const auto *EIT = dyn_cast<BitIntType>(Val: T))
8196 return 0 + (EIT->getNumBits() << 3);
8197
8198 if (const auto *OBT = dyn_cast<OverflowBehaviorType>(Val: T))
8199 return getIntegerRank(T: OBT->getUnderlyingType().getTypePtr());
8200
8201 switch (cast<BuiltinType>(Val: T)->getKind()) {
8202 default: llvm_unreachable("getIntegerRank(): not a built-in integer");
8203 case BuiltinType::Bool:
8204 return 1 + (getIntWidth(T: BoolTy) << 3);
8205 case BuiltinType::Char_S:
8206 case BuiltinType::Char_U:
8207 case BuiltinType::SChar:
8208 case BuiltinType::UChar:
8209 return 2 + (getIntWidth(T: CharTy) << 3);
8210 case BuiltinType::Short:
8211 case BuiltinType::UShort:
8212 return 3 + (getIntWidth(T: ShortTy) << 3);
8213 case BuiltinType::Int:
8214 case BuiltinType::UInt:
8215 return 4 + (getIntWidth(T: IntTy) << 3);
8216 case BuiltinType::Long:
8217 case BuiltinType::ULong:
8218 return 5 + (getIntWidth(T: LongTy) << 3);
8219 case BuiltinType::LongLong:
8220 case BuiltinType::ULongLong:
8221 return 6 + (getIntWidth(T: LongLongTy) << 3);
8222 case BuiltinType::Int128:
8223 case BuiltinType::UInt128:
8224 return 7 + (getIntWidth(T: Int128Ty) << 3);
8225
8226 // "The ranks of char8_t, char16_t, char32_t, and wchar_t equal the ranks of
8227 // their underlying types" [c++20 conv.rank]
8228 case BuiltinType::Char8:
8229 return getIntegerRank(T: UnsignedCharTy.getTypePtr());
8230 case BuiltinType::Char16:
8231 return getIntegerRank(
8232 T: getFromTargetType(Type: Target->getChar16Type()).getTypePtr());
8233 case BuiltinType::Char32:
8234 return getIntegerRank(
8235 T: getFromTargetType(Type: Target->getChar32Type()).getTypePtr());
8236 case BuiltinType::WChar_S:
8237 case BuiltinType::WChar_U:
8238 return getIntegerRank(
8239 T: getFromTargetType(Type: Target->getWCharType()).getTypePtr());
8240 }
8241}
8242
8243/// Whether this is a promotable bitfield reference according
8244/// to C99 6.3.1.1p2, bullet 2 (and GCC extensions).
8245///
8246/// \returns the type this bit-field will promote to, or NULL if no
8247/// promotion occurs.
8248QualType ASTContext::isPromotableBitField(Expr *E) const {
8249 if (E->isTypeDependent() || E->isValueDependent())
8250 return {};
8251
8252 // C++ [conv.prom]p5:
8253 // If the bit-field has an enumerated type, it is treated as any other
8254 // value of that type for promotion purposes.
8255 if (getLangOpts().CPlusPlus && E->getType()->isEnumeralType())
8256 return {};
8257
8258 // FIXME: We should not do this unless E->refersToBitField() is true. This
8259 // matters in C where getSourceBitField() will find bit-fields for various
8260 // cases where the source expression is not a bit-field designator.
8261
8262 FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields?
8263 if (!Field)
8264 return {};
8265
8266 QualType FT = Field->getType();
8267
8268 uint64_t BitWidth = Field->getBitWidthValue();
8269 uint64_t IntSize = getTypeSize(T: IntTy);
8270 // C++ [conv.prom]p5:
8271 // A prvalue for an integral bit-field can be converted to a prvalue of type
8272 // int if int can represent all the values of the bit-field; otherwise, it
8273 // can be converted to unsigned int if unsigned int can represent all the
8274 // values of the bit-field. If the bit-field is larger yet, no integral
8275 // promotion applies to it.
8276 // C11 6.3.1.1/2:
8277 // [For a bit-field of type _Bool, int, signed int, or unsigned int:]
8278 // If an int can represent all values of the original type (as restricted by
8279 // the width, for a bit-field), the value is converted to an int; otherwise,
8280 // it is converted to an unsigned int.
8281 //
8282 // FIXME: C does not permit promotion of a 'long : 3' bitfield to int.
8283 // We perform that promotion here to match GCC and C++.
8284 // FIXME: C does not permit promotion of an enum bit-field whose rank is
8285 // greater than that of 'int'. We perform that promotion to match GCC.
8286 //
8287 // C23 6.3.1.1p2:
8288 // The value from a bit-field of a bit-precise integer type is converted to
8289 // the corresponding bit-precise integer type. (The rest is the same as in
8290 // C11.)
8291 if (QualType QT = Field->getType(); QT->isBitIntType())
8292 return QT;
8293
8294 if (BitWidth < IntSize)
8295 return IntTy;
8296
8297 if (BitWidth == IntSize)
8298 return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy;
8299
8300 // Bit-fields wider than int are not subject to promotions, and therefore act
8301 // like the base type. GCC has some weird bugs in this area that we
8302 // deliberately do not follow (GCC follows a pre-standard resolution to
8303 // C's DR315 which treats bit-width as being part of the type, and this leaks
8304 // into their semantics in some cases).
8305 return {};
8306}
8307
8308/// getPromotedIntegerType - Returns the type that Promotable will
8309/// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable
8310/// integer type.
8311QualType ASTContext::getPromotedIntegerType(QualType Promotable) const {
8312 assert(!Promotable.isNull());
8313 assert(isPromotableIntegerType(Promotable));
8314 if (const auto *ED = Promotable->getAsEnumDecl())
8315 return ED->getPromotionType();
8316
8317 // OverflowBehaviorTypes promote their underlying type and preserve OBT
8318 // qualifier.
8319 if (const auto *OBT = Promotable->getAs<OverflowBehaviorType>()) {
8320 QualType PromotedUnderlying =
8321 getPromotedIntegerType(Promotable: OBT->getUnderlyingType());
8322 return getOverflowBehaviorType(Kind: OBT->getBehaviorKind(), Underlying: PromotedUnderlying);
8323 }
8324
8325 if (const auto *BT = Promotable->getAs<BuiltinType>()) {
8326 // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t
8327 // (3.9.1) can be converted to a prvalue of the first of the following
8328 // types that can represent all the values of its underlying type:
8329 // int, unsigned int, long int, unsigned long int, long long int, or
8330 // unsigned long long int [...]
8331 // FIXME: Is there some better way to compute this?
8332 if (BT->getKind() == BuiltinType::WChar_S ||
8333 BT->getKind() == BuiltinType::WChar_U ||
8334 BT->getKind() == BuiltinType::Char8 ||
8335 BT->getKind() == BuiltinType::Char16 ||
8336 BT->getKind() == BuiltinType::Char32) {
8337 bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S;
8338 uint64_t FromSize = getTypeSize(T: BT);
8339 QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy,
8340 LongLongTy, UnsignedLongLongTy };
8341 for (const auto &PT : PromoteTypes) {
8342 uint64_t ToSize = getTypeSize(T: PT);
8343 if (FromSize < ToSize ||
8344 (FromSize == ToSize && FromIsSigned == PT->isSignedIntegerType()))
8345 return PT;
8346 }
8347 llvm_unreachable("char type should fit into long long");
8348 }
8349 }
8350
8351 // At this point, we should have a signed or unsigned integer type.
8352 if (Promotable->isSignedIntegerType())
8353 return IntTy;
8354 uint64_t PromotableSize = getIntWidth(T: Promotable);
8355 uint64_t IntSize = getIntWidth(T: IntTy);
8356 assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize);
8357 return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy;
8358}
8359
8360/// Recurses in pointer/array types until it finds an objc retainable
8361/// type and returns its ownership.
8362Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const {
8363 while (!T.isNull()) {
8364 if (T.getObjCLifetime() != Qualifiers::OCL_None)
8365 return T.getObjCLifetime();
8366 if (T->isArrayType())
8367 T = getBaseElementType(type: T);
8368 else if (const auto *PT = T->getAs<PointerType>())
8369 T = PT->getPointeeType();
8370 else if (const auto *RT = T->getAs<ReferenceType>())
8371 T = RT->getPointeeType();
8372 else
8373 break;
8374 }
8375
8376 return Qualifiers::OCL_None;
8377}
8378
8379static const Type *getIntegerTypeForEnum(const EnumType *ET) {
8380 // Incomplete enum types are not treated as integer types.
8381 // FIXME: In C++, enum types are never integer types.
8382 const EnumDecl *ED = ET->getDecl()->getDefinitionOrSelf();
8383 if (ED->isComplete() && !ED->isScoped())
8384 return ED->getIntegerType().getTypePtr();
8385 return nullptr;
8386}
8387
8388/// getIntegerTypeOrder - Returns the highest ranked integer type:
8389/// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If
8390/// LHS < RHS, return -1.
8391int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const {
8392 const Type *LHSC = getCanonicalType(T: LHS).getTypePtr();
8393 const Type *RHSC = getCanonicalType(T: RHS).getTypePtr();
8394
8395 // Unwrap enums to their underlying type.
8396 if (const auto *ET = dyn_cast<EnumType>(Val: LHSC))
8397 LHSC = getIntegerTypeForEnum(ET);
8398 if (const auto *ET = dyn_cast<EnumType>(Val: RHSC))
8399 RHSC = getIntegerTypeForEnum(ET);
8400
8401 if (LHSC == RHSC) return 0;
8402
8403 bool LHSUnsigned = LHSC->isUnsignedIntegerType();
8404 bool RHSUnsigned = RHSC->isUnsignedIntegerType();
8405
8406 unsigned LHSRank = getIntegerRank(T: LHSC);
8407 unsigned RHSRank = getIntegerRank(T: RHSC);
8408
8409 if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned.
8410 if (LHSRank == RHSRank) return 0;
8411 return LHSRank > RHSRank ? 1 : -1;
8412 }
8413
8414 // Otherwise, the LHS is signed and the RHS is unsigned or visa versa.
8415 if (LHSUnsigned) {
8416 // If the unsigned [LHS] type is larger, return it.
8417 if (LHSRank >= RHSRank)
8418 return 1;
8419
8420 // If the signed type can represent all values of the unsigned type, it
8421 // wins. Because we are dealing with 2's complement and types that are
8422 // powers of two larger than each other, this is always safe.
8423 return -1;
8424 }
8425
8426 // If the unsigned [RHS] type is larger, return it.
8427 if (RHSRank >= LHSRank)
8428 return -1;
8429
8430 // If the signed type can represent all values of the unsigned type, it
8431 // wins. Because we are dealing with 2's complement and types that are
8432 // powers of two larger than each other, this is always safe.
8433 return 1;
8434}
8435
8436TypedefDecl *ASTContext::getCFConstantStringDecl() const {
8437 if (CFConstantStringTypeDecl)
8438 return CFConstantStringTypeDecl;
8439
8440 assert(!CFConstantStringTagDecl &&
8441 "tag and typedef should be initialized together");
8442 CFConstantStringTagDecl = buildImplicitRecord(Name: "__NSConstantString_tag");
8443 CFConstantStringTagDecl->startDefinition();
8444
8445 struct {
8446 QualType Type;
8447 const char *Name;
8448 } Fields[5];
8449 unsigned Count = 0;
8450
8451 /// Objective-C ABI
8452 ///
8453 /// typedef struct __NSConstantString_tag {
8454 /// const int *isa;
8455 /// int flags;
8456 /// const char *str;
8457 /// long length;
8458 /// } __NSConstantString;
8459 ///
8460 /// Swift ABI (4.1, 4.2)
8461 ///
8462 /// typedef struct __NSConstantString_tag {
8463 /// uintptr_t _cfisa;
8464 /// uintptr_t _swift_rc;
8465 /// _Atomic(uint64_t) _cfinfoa;
8466 /// const char *_ptr;
8467 /// uint32_t _length;
8468 /// } __NSConstantString;
8469 ///
8470 /// Swift ABI (5.0)
8471 ///
8472 /// typedef struct __NSConstantString_tag {
8473 /// uintptr_t _cfisa;
8474 /// uintptr_t _swift_rc;
8475 /// _Atomic(uint64_t) _cfinfoa;
8476 /// const char *_ptr;
8477 /// uintptr_t _length;
8478 /// } __NSConstantString;
8479
8480 const auto CFRuntime = getLangOpts().CFRuntime;
8481 if (static_cast<unsigned>(CFRuntime) <
8482 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift)) {
8483 Fields[Count++] = { .Type: getPointerType(T: IntTy.withConst()), .Name: "isa" };
8484 Fields[Count++] = { .Type: IntTy, .Name: "flags" };
8485 Fields[Count++] = { .Type: getPointerType(T: CharTy.withConst()), .Name: "str" };
8486 Fields[Count++] = { .Type: LongTy, .Name: "length" };
8487 } else {
8488 Fields[Count++] = { .Type: getUIntPtrType(), .Name: "_cfisa" };
8489 Fields[Count++] = { .Type: getUIntPtrType(), .Name: "_swift_rc" };
8490 Fields[Count++] = { .Type: getFromTargetType(Type: Target->getUInt64Type()), .Name: "_swift_rc" };
8491 Fields[Count++] = { .Type: getPointerType(T: CharTy.withConst()), .Name: "_ptr" };
8492 if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 ||
8493 CFRuntime == LangOptions::CoreFoundationABI::Swift4_2)
8494 Fields[Count++] = { .Type: IntTy, .Name: "_ptr" };
8495 else
8496 Fields[Count++] = { .Type: getUIntPtrType(), .Name: "_ptr" };
8497 }
8498
8499 // Create fields
8500 for (unsigned i = 0; i < Count; ++i) {
8501 FieldDecl *Field =
8502 FieldDecl::Create(C: *this, DC: CFConstantStringTagDecl, StartLoc: SourceLocation(),
8503 IdLoc: SourceLocation(), Id: &Idents.get(Name: Fields[i].Name),
8504 T: Fields[i].Type, /*TInfo=*/nullptr,
8505 /*BitWidth=*/BW: nullptr, /*Mutable=*/false, InitStyle: ICIS_NoInit);
8506 Field->setAccess(AS_public);
8507 CFConstantStringTagDecl->addDecl(D: Field);
8508 }
8509
8510 CFConstantStringTagDecl->completeDefinition();
8511 // This type is designed to be compatible with NSConstantString, but cannot
8512 // use the same name, since NSConstantString is an interface.
8513 CanQualType tagType = getCanonicalTagType(TD: CFConstantStringTagDecl);
8514 CFConstantStringTypeDecl =
8515 buildImplicitTypedef(T: tagType, Name: "__NSConstantString");
8516
8517 return CFConstantStringTypeDecl;
8518}
8519
8520RecordDecl *ASTContext::getCFConstantStringTagDecl() const {
8521 if (!CFConstantStringTagDecl)
8522 getCFConstantStringDecl(); // Build the tag and the typedef.
8523 return CFConstantStringTagDecl;
8524}
8525
8526// getCFConstantStringType - Return the type used for constant CFStrings.
8527QualType ASTContext::getCFConstantStringType() const {
8528 return getTypedefType(Keyword: ElaboratedTypeKeyword::None, /*Qualifier=*/std::nullopt,
8529 Decl: getCFConstantStringDecl());
8530}
8531
8532QualType ASTContext::getObjCSuperType() const {
8533 if (ObjCSuperType.isNull()) {
8534 RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord(Name: "objc_super");
8535 getTranslationUnitDecl()->addDecl(D: ObjCSuperTypeDecl);
8536 ObjCSuperType = getCanonicalTagType(TD: ObjCSuperTypeDecl);
8537 }
8538 return ObjCSuperType;
8539}
8540
8541void ASTContext::setCFConstantStringType(QualType T) {
8542 const auto *TT = T->castAs<TypedefType>();
8543 CFConstantStringTypeDecl = cast<TypedefDecl>(Val: TT->getDecl());
8544 CFConstantStringTagDecl = TT->castAsRecordDecl();
8545}
8546
8547QualType ASTContext::getBlockDescriptorType() const {
8548 if (BlockDescriptorType)
8549 return getCanonicalTagType(TD: BlockDescriptorType);
8550
8551 RecordDecl *RD;
8552 // FIXME: Needs the FlagAppleBlock bit.
8553 RD = buildImplicitRecord(Name: "__block_descriptor");
8554 RD->startDefinition();
8555
8556 QualType FieldTypes[] = {
8557 UnsignedLongTy,
8558 UnsignedLongTy,
8559 };
8560
8561 static const char *const FieldNames[] = {
8562 "reserved",
8563 "Size"
8564 };
8565
8566 for (size_t i = 0; i < 2; ++i) {
8567 FieldDecl *Field = FieldDecl::Create(
8568 C: *this, DC: RD, StartLoc: SourceLocation(), IdLoc: SourceLocation(),
8569 Id: &Idents.get(Name: FieldNames[i]), T: FieldTypes[i], /*TInfo=*/nullptr,
8570 /*BitWidth=*/BW: nullptr, /*Mutable=*/false, InitStyle: ICIS_NoInit);
8571 Field->setAccess(AS_public);
8572 RD->addDecl(D: Field);
8573 }
8574
8575 RD->completeDefinition();
8576
8577 BlockDescriptorType = RD;
8578
8579 return getCanonicalTagType(TD: BlockDescriptorType);
8580}
8581
8582QualType ASTContext::getBlockDescriptorExtendedType() const {
8583 if (BlockDescriptorExtendedType)
8584 return getCanonicalTagType(TD: BlockDescriptorExtendedType);
8585
8586 RecordDecl *RD;
8587 // FIXME: Needs the FlagAppleBlock bit.
8588 RD = buildImplicitRecord(Name: "__block_descriptor_withcopydispose");
8589 RD->startDefinition();
8590
8591 QualType FieldTypes[] = {
8592 UnsignedLongTy,
8593 UnsignedLongTy,
8594 getPointerType(T: VoidPtrTy),
8595 getPointerType(T: VoidPtrTy)
8596 };
8597
8598 static const char *const FieldNames[] = {
8599 "reserved",
8600 "Size",
8601 "CopyFuncPtr",
8602 "DestroyFuncPtr"
8603 };
8604
8605 for (size_t i = 0; i < 4; ++i) {
8606 FieldDecl *Field = FieldDecl::Create(
8607 C: *this, DC: RD, StartLoc: SourceLocation(), IdLoc: SourceLocation(),
8608 Id: &Idents.get(Name: FieldNames[i]), T: FieldTypes[i], /*TInfo=*/nullptr,
8609 /*BitWidth=*/BW: nullptr,
8610 /*Mutable=*/false, InitStyle: ICIS_NoInit);
8611 Field->setAccess(AS_public);
8612 RD->addDecl(D: Field);
8613 }
8614
8615 RD->completeDefinition();
8616
8617 BlockDescriptorExtendedType = RD;
8618 return getCanonicalTagType(TD: BlockDescriptorExtendedType);
8619}
8620
8621OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const {
8622 const auto *BT = dyn_cast<BuiltinType>(Val: T);
8623
8624 if (!BT) {
8625 if (isa<PipeType>(Val: T))
8626 return OCLTK_Pipe;
8627
8628 return OCLTK_Default;
8629 }
8630
8631 switch (BT->getKind()) {
8632#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
8633 case BuiltinType::Id: \
8634 return OCLTK_Image;
8635#include "clang/Basic/OpenCLImageTypes.def"
8636
8637 case BuiltinType::OCLClkEvent:
8638 return OCLTK_ClkEvent;
8639
8640 case BuiltinType::OCLEvent:
8641 return OCLTK_Event;
8642
8643 case BuiltinType::OCLQueue:
8644 return OCLTK_Queue;
8645
8646 case BuiltinType::OCLReserveID:
8647 return OCLTK_ReserveID;
8648
8649 case BuiltinType::OCLSampler:
8650 return OCLTK_Sampler;
8651
8652 default:
8653 return OCLTK_Default;
8654 }
8655}
8656
8657LangAS ASTContext::getOpenCLTypeAddrSpace(const Type *T) const {
8658 return Target->getOpenCLTypeAddrSpace(TK: getOpenCLTypeKind(T));
8659}
8660
8661/// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty"
8662/// requires copy/dispose. Note that this must match the logic
8663/// in buildByrefHelpers.
8664bool ASTContext::BlockRequiresCopying(QualType Ty,
8665 const VarDecl *D) {
8666 if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) {
8667 const Expr *copyExpr = getBlockVarCopyInit(VD: D).getCopyExpr();
8668 if (!copyExpr && record->hasTrivialDestructor()) return false;
8669
8670 return true;
8671 }
8672
8673 if (Ty.hasAddressDiscriminatedPointerAuth())
8674 return true;
8675
8676 // The block needs copy/destroy helpers if Ty is non-trivial to destructively
8677 // move or destroy.
8678 if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType())
8679 return true;
8680
8681 if (!Ty->isObjCRetainableType()) return false;
8682
8683 Qualifiers qs = Ty.getQualifiers();
8684
8685 // If we have lifetime, that dominates.
8686 if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) {
8687 switch (lifetime) {
8688 case Qualifiers::OCL_None: llvm_unreachable("impossible");
8689
8690 // These are just bits as far as the runtime is concerned.
8691 case Qualifiers::OCL_ExplicitNone:
8692 case Qualifiers::OCL_Autoreleasing:
8693 return false;
8694
8695 // These cases should have been taken care of when checking the type's
8696 // non-triviality.
8697 case Qualifiers::OCL_Weak:
8698 case Qualifiers::OCL_Strong:
8699 llvm_unreachable("impossible");
8700 }
8701 llvm_unreachable("fell out of lifetime switch!");
8702 }
8703 return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) ||
8704 Ty->isObjCObjectPointerType());
8705}
8706
8707bool ASTContext::getByrefLifetime(QualType Ty,
8708 Qualifiers::ObjCLifetime &LifeTime,
8709 bool &HasByrefExtendedLayout) const {
8710 if (!getLangOpts().ObjC ||
8711 getLangOpts().getGC() != LangOptions::NonGC)
8712 return false;
8713
8714 HasByrefExtendedLayout = false;
8715 if (Ty->isRecordType()) {
8716 HasByrefExtendedLayout = true;
8717 LifeTime = Qualifiers::OCL_None;
8718 } else if ((LifeTime = Ty.getObjCLifetime())) {
8719 // Honor the ARC qualifiers.
8720 } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) {
8721 // The MRR rule.
8722 LifeTime = Qualifiers::OCL_ExplicitNone;
8723 } else {
8724 LifeTime = Qualifiers::OCL_None;
8725 }
8726 return true;
8727}
8728
8729CanQualType ASTContext::getNSUIntegerType() const {
8730 assert(Target && "Expected target to be initialized");
8731 const llvm::Triple &T = Target->getTriple();
8732 // Windows is LLP64 rather than LP64
8733 if (T.isOSWindows() && T.isArch64Bit())
8734 return UnsignedLongLongTy;
8735 return UnsignedLongTy;
8736}
8737
8738CanQualType ASTContext::getNSIntegerType() const {
8739 assert(Target && "Expected target to be initialized");
8740 const llvm::Triple &T = Target->getTriple();
8741 // Windows is LLP64 rather than LP64
8742 if (T.isOSWindows() && T.isArch64Bit())
8743 return LongLongTy;
8744 return LongTy;
8745}
8746
8747TypedefDecl *ASTContext::getObjCInstanceTypeDecl() {
8748 if (!ObjCInstanceTypeDecl)
8749 ObjCInstanceTypeDecl =
8750 buildImplicitTypedef(T: getObjCIdType(), Name: "instancetype");
8751 return ObjCInstanceTypeDecl;
8752}
8753
8754// This returns true if a type has been typedefed to BOOL:
8755// typedef <type> BOOL;
8756static bool isTypeTypedefedAsBOOL(QualType T) {
8757 if (const auto *TT = dyn_cast<TypedefType>(Val&: T))
8758 if (IdentifierInfo *II = TT->getDecl()->getIdentifier())
8759 return II->isStr(Str: "BOOL");
8760
8761 return false;
8762}
8763
8764/// getObjCEncodingTypeSize returns size of type for objective-c encoding
8765/// purpose.
8766CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const {
8767 if (!type->isIncompleteArrayType() && type->isIncompleteType())
8768 return CharUnits::Zero();
8769
8770 CharUnits sz = getTypeSizeInChars(T: type);
8771
8772 // Make all integer and enum types at least as large as an int
8773 if (sz.isPositive() && type->isIntegralOrEnumerationType())
8774 sz = std::max(a: sz, b: getTypeSizeInChars(T: IntTy));
8775 // Treat arrays as pointers, since that's how they're passed in.
8776 else if (type->isArrayType())
8777 sz = getTypeSizeInChars(T: VoidPtrTy);
8778 return sz;
8779}
8780
8781bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const {
8782 return getTargetInfo().getCXXABI().isMicrosoft() &&
8783 VD->isStaticDataMember() &&
8784 VD->getType()->isIntegralOrEnumerationType() &&
8785 !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit();
8786}
8787
8788ASTContext::InlineVariableDefinitionKind
8789ASTContext::getInlineVariableDefinitionKind(const VarDecl *VD) const {
8790 if (!VD->isInline())
8791 return InlineVariableDefinitionKind::None;
8792
8793 // In almost all cases, it's a weak definition.
8794 auto *First = VD->getFirstDecl();
8795 if (First->isInlineSpecified() || !First->isStaticDataMember())
8796 return InlineVariableDefinitionKind::Weak;
8797
8798 // If there's a file-context declaration in this translation unit, it's a
8799 // non-discardable definition.
8800 for (auto *D : VD->redecls())
8801 if (D->getLexicalDeclContext()->isFileContext() &&
8802 !D->isInlineSpecified() && (D->isConstexpr() || First->isConstexpr()))
8803 return InlineVariableDefinitionKind::Strong;
8804
8805 // If we've not seen one yet, we don't know.
8806 return InlineVariableDefinitionKind::WeakUnknown;
8807}
8808
8809static std::string charUnitsToString(const CharUnits &CU) {
8810 return llvm::itostr(X: CU.getQuantity());
8811}
8812
8813/// getObjCEncodingForBlock - Return the encoded type for this block
8814/// declaration.
8815std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const {
8816 std::string S;
8817
8818 const BlockDecl *Decl = Expr->getBlockDecl();
8819 QualType BlockTy =
8820 Expr->getType()->castAs<BlockPointerType>()->getPointeeType();
8821 QualType BlockReturnTy = BlockTy->castAs<FunctionType>()->getReturnType();
8822 // Encode result type.
8823 if (getLangOpts().EncodeExtendedBlockSig)
8824 getObjCEncodingForMethodParameter(QT: Decl::OBJC_TQ_None, T: BlockReturnTy, S,
8825 Extended: true /*Extended*/);
8826 else
8827 getObjCEncodingForType(T: BlockReturnTy, S);
8828 // Compute size of all parameters.
8829 // Start with computing size of a pointer in number of bytes.
8830 // FIXME: There might(should) be a better way of doing this computation!
8831 CharUnits PtrSize = getTypeSizeInChars(T: VoidPtrTy);
8832 CharUnits ParmOffset = PtrSize;
8833 for (auto *PI : Decl->parameters()) {
8834 QualType PType = PI->getType();
8835 CharUnits sz = getObjCEncodingTypeSize(type: PType);
8836 if (sz.isZero())
8837 continue;
8838 assert(sz.isPositive() && "BlockExpr - Incomplete param type");
8839 ParmOffset += sz;
8840 }
8841 // Size of the argument frame
8842 S += charUnitsToString(CU: ParmOffset);
8843 // Block pointer and offset.
8844 S += "@?0";
8845
8846 // Argument types.
8847 ParmOffset = PtrSize;
8848 for (auto *PVDecl : Decl->parameters()) {
8849 QualType PType = PVDecl->getOriginalType();
8850 if (const auto *AT =
8851 dyn_cast<ArrayType>(Val: PType->getCanonicalTypeInternal())) {
8852 // Use array's original type only if it has known number of
8853 // elements.
8854 if (!isa<ConstantArrayType>(Val: AT))
8855 PType = PVDecl->getType();
8856 } else if (PType->isFunctionType())
8857 PType = PVDecl->getType();
8858 if (getLangOpts().EncodeExtendedBlockSig)
8859 getObjCEncodingForMethodParameter(QT: Decl::OBJC_TQ_None, T: PType,
8860 S, Extended: true /*Extended*/);
8861 else
8862 getObjCEncodingForType(T: PType, S);
8863 S += charUnitsToString(CU: ParmOffset);
8864 ParmOffset += getObjCEncodingTypeSize(type: PType);
8865 }
8866
8867 return S;
8868}
8869
8870std::string
8871ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const {
8872 std::string S;
8873 // Encode result type.
8874 getObjCEncodingForType(T: Decl->getReturnType(), S);
8875 CharUnits ParmOffset;
8876 // Compute size of all parameters.
8877 for (auto *PI : Decl->parameters()) {
8878 QualType PType = PI->getType();
8879 CharUnits sz = getObjCEncodingTypeSize(type: PType);
8880 if (sz.isZero())
8881 continue;
8882
8883 assert(sz.isPositive() &&
8884 "getObjCEncodingForFunctionDecl - Incomplete param type");
8885 ParmOffset += sz;
8886 }
8887 S += charUnitsToString(CU: ParmOffset);
8888 ParmOffset = CharUnits::Zero();
8889
8890 // Argument types.
8891 for (auto *PVDecl : Decl->parameters()) {
8892 QualType PType = PVDecl->getOriginalType();
8893 if (const auto *AT =
8894 dyn_cast<ArrayType>(Val: PType->getCanonicalTypeInternal())) {
8895 // Use array's original type only if it has known number of
8896 // elements.
8897 if (!isa<ConstantArrayType>(Val: AT))
8898 PType = PVDecl->getType();
8899 } else if (PType->isFunctionType())
8900 PType = PVDecl->getType();
8901 getObjCEncodingForType(T: PType, S);
8902 S += charUnitsToString(CU: ParmOffset);
8903 ParmOffset += getObjCEncodingTypeSize(type: PType);
8904 }
8905
8906 return S;
8907}
8908
8909/// getObjCEncodingForMethodParameter - Return the encoded type for a single
8910/// method parameter or return type. If Extended, include class names and
8911/// block object types.
8912void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT,
8913 QualType T, std::string& S,
8914 bool Extended) const {
8915 // Encode type qualifier, 'in', 'inout', etc. for the parameter.
8916 getObjCEncodingForTypeQualifier(QT, S);
8917 // Encode parameter type.
8918 ObjCEncOptions Options = ObjCEncOptions()
8919 .setExpandPointedToStructures()
8920 .setExpandStructures()
8921 .setIsOutermostType();
8922 if (Extended)
8923 Options.setEncodeBlockParameters().setEncodeClassNames();
8924 getObjCEncodingForTypeImpl(t: T, S, Options, /*Field=*/nullptr);
8925}
8926
8927/// getObjCEncodingForMethodDecl - Return the encoded type for this method
8928/// declaration.
8929std::string ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl,
8930 bool Extended) const {
8931 // FIXME: This is not very efficient.
8932 // Encode return type.
8933 std::string S;
8934 getObjCEncodingForMethodParameter(QT: Decl->getObjCDeclQualifier(),
8935 T: Decl->getReturnType(), S, Extended);
8936 // Compute size of all parameters.
8937 // Start with computing size of a pointer in number of bytes.
8938 // FIXME: There might(should) be a better way of doing this computation!
8939 CharUnits PtrSize = getTypeSizeInChars(T: VoidPtrTy);
8940 // The first two arguments (self and _cmd) are pointers; account for
8941 // their size.
8942 CharUnits ParmOffset = 2 * PtrSize;
8943 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(),
8944 E = Decl->sel_param_end(); PI != E; ++PI) {
8945 QualType PType = (*PI)->getType();
8946 CharUnits sz = getObjCEncodingTypeSize(type: PType);
8947 if (sz.isZero())
8948 continue;
8949
8950 assert(sz.isPositive() &&
8951 "getObjCEncodingForMethodDecl - Incomplete param type");
8952 ParmOffset += sz;
8953 }
8954 S += charUnitsToString(CU: ParmOffset);
8955 S += "@0:";
8956 S += charUnitsToString(CU: PtrSize);
8957
8958 // Argument types.
8959 ParmOffset = 2 * PtrSize;
8960 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(),
8961 E = Decl->sel_param_end(); PI != E; ++PI) {
8962 const ParmVarDecl *PVDecl = *PI;
8963 QualType PType = PVDecl->getOriginalType();
8964 if (const auto *AT =
8965 dyn_cast<ArrayType>(Val: PType->getCanonicalTypeInternal())) {
8966 // Use array's original type only if it has known number of
8967 // elements.
8968 if (!isa<ConstantArrayType>(Val: AT))
8969 PType = PVDecl->getType();
8970 } else if (PType->isFunctionType())
8971 PType = PVDecl->getType();
8972 getObjCEncodingForMethodParameter(QT: PVDecl->getObjCDeclQualifier(),
8973 T: PType, S, Extended);
8974 S += charUnitsToString(CU: ParmOffset);
8975 ParmOffset += getObjCEncodingTypeSize(type: PType);
8976 }
8977
8978 return S;
8979}
8980
8981ObjCPropertyImplDecl *
8982ASTContext::getObjCPropertyImplDeclForPropertyDecl(
8983 const ObjCPropertyDecl *PD,
8984 const Decl *Container) const {
8985 if (!Container)
8986 return nullptr;
8987 if (const auto *CID = dyn_cast<ObjCCategoryImplDecl>(Val: Container)) {
8988 for (auto *PID : CID->property_impls())
8989 if (PID->getPropertyDecl() == PD)
8990 return PID;
8991 } else {
8992 const auto *OID = cast<ObjCImplementationDecl>(Val: Container);
8993 for (auto *PID : OID->property_impls())
8994 if (PID->getPropertyDecl() == PD)
8995 return PID;
8996 }
8997 return nullptr;
8998}
8999
9000/// getObjCEncodingForPropertyDecl - Return the encoded type for this
9001/// property declaration. If non-NULL, Container must be either an
9002/// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be
9003/// NULL when getting encodings for protocol properties.
9004/// Property attributes are stored as a comma-delimited C string. The simple
9005/// attributes readonly and bycopy are encoded as single characters. The
9006/// parametrized attributes, getter=name, setter=name, and ivar=name, are
9007/// encoded as single characters, followed by an identifier. Property types
9008/// are also encoded as a parametrized attribute. The characters used to encode
9009/// these attributes are defined by the following enumeration:
9010/// @code
9011/// enum PropertyAttributes {
9012/// kPropertyReadOnly = 'R', // property is read-only.
9013/// kPropertyBycopy = 'C', // property is a copy of the value last assigned
9014/// kPropertyByref = '&', // property is a reference to the value last assigned
9015/// kPropertyDynamic = 'D', // property is dynamic
9016/// kPropertyGetter = 'G', // followed by getter selector name
9017/// kPropertySetter = 'S', // followed by setter selector name
9018/// kPropertyInstanceVariable = 'V' // followed by instance variable name
9019/// kPropertyType = 'T' // followed by old-style type encoding.
9020/// kPropertyWeak = 'W' // 'weak' property
9021/// kPropertyStrong = 'P' // property GC'able
9022/// kPropertyNonAtomic = 'N' // property non-atomic
9023/// kPropertyOptional = '?' // property optional
9024/// };
9025/// @endcode
9026std::string
9027ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
9028 const Decl *Container) const {
9029 // Collect information from the property implementation decl(s).
9030 bool Dynamic = false;
9031 ObjCPropertyImplDecl *SynthesizePID = nullptr;
9032
9033 if (ObjCPropertyImplDecl *PropertyImpDecl =
9034 getObjCPropertyImplDeclForPropertyDecl(PD, Container)) {
9035 if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
9036 Dynamic = true;
9037 else
9038 SynthesizePID = PropertyImpDecl;
9039 }
9040
9041 // FIXME: This is not very efficient.
9042 std::string S = "T";
9043
9044 // Encode result type.
9045 // GCC has some special rules regarding encoding of properties which
9046 // closely resembles encoding of ivars.
9047 getObjCEncodingForPropertyType(T: PD->getType(), S);
9048
9049 if (PD->isOptional())
9050 S += ",?";
9051
9052 if (PD->isReadOnly()) {
9053 S += ",R";
9054 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy)
9055 S += ",C";
9056 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain)
9057 S += ",&";
9058 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak)
9059 S += ",W";
9060 } else {
9061 switch (PD->getSetterKind()) {
9062 case ObjCPropertyDecl::Assign: break;
9063 case ObjCPropertyDecl::Copy: S += ",C"; break;
9064 case ObjCPropertyDecl::Retain: S += ",&"; break;
9065 case ObjCPropertyDecl::Weak: S += ",W"; break;
9066 }
9067 }
9068
9069 // It really isn't clear at all what this means, since properties
9070 // are "dynamic by default".
9071 if (Dynamic)
9072 S += ",D";
9073
9074 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic)
9075 S += ",N";
9076
9077 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) {
9078 S += ",G";
9079 S += PD->getGetterName().getAsString();
9080 }
9081
9082 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) {
9083 S += ",S";
9084 S += PD->getSetterName().getAsString();
9085 }
9086
9087 if (SynthesizePID) {
9088 const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl();
9089 S += ",V";
9090 S += OID->getNameAsString();
9091 }
9092
9093 // FIXME: OBJCGC: weak & strong
9094 return S;
9095}
9096
9097/// getLegacyIntegralTypeEncoding -
9098/// Another legacy compatibility encoding: 32-bit longs are encoded as
9099/// 'l' or 'L' , but not always. For typedefs, we need to use
9100/// 'i' or 'I' instead if encoding a struct field, or a pointer!
9101void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const {
9102 if (PointeeTy->getAs<TypedefType>()) {
9103 if (const auto *BT = PointeeTy->getAs<BuiltinType>()) {
9104 if (BT->getKind() == BuiltinType::ULong && getIntWidth(T: PointeeTy) == 32)
9105 PointeeTy = UnsignedIntTy;
9106 else
9107 if (BT->getKind() == BuiltinType::Long && getIntWidth(T: PointeeTy) == 32)
9108 PointeeTy = IntTy;
9109 }
9110 }
9111}
9112
9113void ASTContext::getObjCEncodingForType(QualType T, std::string& S,
9114 const FieldDecl *Field,
9115 QualType *NotEncodedT) const {
9116 // We follow the behavior of gcc, expanding structures which are
9117 // directly pointed to, and expanding embedded structures. Note that
9118 // these rules are sufficient to prevent recursive encoding of the
9119 // same type.
9120 getObjCEncodingForTypeImpl(t: T, S,
9121 Options: ObjCEncOptions()
9122 .setExpandPointedToStructures()
9123 .setExpandStructures()
9124 .setIsOutermostType(),
9125 Field, NotEncodedT);
9126}
9127
9128void ASTContext::getObjCEncodingForPropertyType(QualType T,
9129 std::string& S) const {
9130 // Encode result type.
9131 // GCC has some special rules regarding encoding of properties which
9132 // closely resembles encoding of ivars.
9133 getObjCEncodingForTypeImpl(t: T, S,
9134 Options: ObjCEncOptions()
9135 .setExpandPointedToStructures()
9136 .setExpandStructures()
9137 .setIsOutermostType()
9138 .setEncodingProperty(),
9139 /*Field=*/nullptr);
9140}
9141
9142static char getObjCEncodingForPrimitiveType(const ASTContext *C,
9143 const BuiltinType *BT) {
9144 BuiltinType::Kind kind = BT->getKind();
9145 switch (kind) {
9146 case BuiltinType::Void: return 'v';
9147 case BuiltinType::Bool: return 'B';
9148 case BuiltinType::Char8:
9149 case BuiltinType::Char_U:
9150 case BuiltinType::UChar: return 'C';
9151 case BuiltinType::Char16:
9152 case BuiltinType::UShort: return 'S';
9153 case BuiltinType::Char32:
9154 case BuiltinType::UInt: return 'I';
9155 case BuiltinType::ULong:
9156 return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q';
9157 case BuiltinType::UInt128: return 'T';
9158 case BuiltinType::ULongLong: return 'Q';
9159 case BuiltinType::Char_S:
9160 case BuiltinType::SChar: return 'c';
9161 case BuiltinType::Short: return 's';
9162 case BuiltinType::WChar_S:
9163 case BuiltinType::WChar_U:
9164 case BuiltinType::Int: return 'i';
9165 case BuiltinType::Long:
9166 return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q';
9167 case BuiltinType::LongLong: return 'q';
9168 case BuiltinType::Int128: return 't';
9169 case BuiltinType::Float: return 'f';
9170 case BuiltinType::Double: return 'd';
9171 case BuiltinType::LongDouble: return 'D';
9172 case BuiltinType::NullPtr: return '*'; // like char*
9173
9174 case BuiltinType::BFloat16:
9175 case BuiltinType::Float16:
9176 case BuiltinType::Float128:
9177 case BuiltinType::Ibm128:
9178 case BuiltinType::Half:
9179 case BuiltinType::ShortAccum:
9180 case BuiltinType::Accum:
9181 case BuiltinType::LongAccum:
9182 case BuiltinType::UShortAccum:
9183 case BuiltinType::UAccum:
9184 case BuiltinType::ULongAccum:
9185 case BuiltinType::ShortFract:
9186 case BuiltinType::Fract:
9187 case BuiltinType::LongFract:
9188 case BuiltinType::UShortFract:
9189 case BuiltinType::UFract:
9190 case BuiltinType::ULongFract:
9191 case BuiltinType::SatShortAccum:
9192 case BuiltinType::SatAccum:
9193 case BuiltinType::SatLongAccum:
9194 case BuiltinType::SatUShortAccum:
9195 case BuiltinType::SatUAccum:
9196 case BuiltinType::SatULongAccum:
9197 case BuiltinType::SatShortFract:
9198 case BuiltinType::SatFract:
9199 case BuiltinType::SatLongFract:
9200 case BuiltinType::SatUShortFract:
9201 case BuiltinType::SatUFract:
9202 case BuiltinType::SatULongFract:
9203 // FIXME: potentially need @encodes for these!
9204 return ' ';
9205
9206#define SVE_TYPE(Name, Id, SingletonId) \
9207 case BuiltinType::Id:
9208#include "clang/Basic/AArch64ACLETypes.def"
9209#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
9210#include "clang/Basic/RISCVVTypes.def"
9211#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
9212#include "clang/Basic/WebAssemblyReferenceTypes.def"
9213#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) case BuiltinType::Id:
9214#include "clang/Basic/AMDGPUTypes.def"
9215 {
9216 DiagnosticsEngine &Diags = C->getDiagnostics();
9217 Diags.Report(DiagID: diag::err_unsupported_objc_primitive_encoding)
9218 << QualType(BT, 0);
9219 return ' ';
9220 }
9221
9222 case BuiltinType::ObjCId:
9223 case BuiltinType::ObjCClass:
9224 case BuiltinType::ObjCSel:
9225 llvm_unreachable("@encoding ObjC primitive type");
9226
9227 // OpenCL and placeholder types don't need @encodings.
9228#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
9229 case BuiltinType::Id:
9230#include "clang/Basic/OpenCLImageTypes.def"
9231#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
9232 case BuiltinType::Id:
9233#include "clang/Basic/OpenCLExtensionTypes.def"
9234 case BuiltinType::OCLEvent:
9235 case BuiltinType::OCLClkEvent:
9236 case BuiltinType::OCLQueue:
9237 case BuiltinType::OCLReserveID:
9238 case BuiltinType::OCLSampler:
9239 case BuiltinType::Dependent:
9240#define PPC_VECTOR_TYPE(Name, Id, Size) \
9241 case BuiltinType::Id:
9242#include "clang/Basic/PPCTypes.def"
9243#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
9244#include "clang/Basic/HLSLIntangibleTypes.def"
9245#define BUILTIN_TYPE(KIND, ID)
9246#define PLACEHOLDER_TYPE(KIND, ID) \
9247 case BuiltinType::KIND:
9248#include "clang/AST/BuiltinTypes.def"
9249 llvm_unreachable("invalid builtin type for @encode");
9250 }
9251 llvm_unreachable("invalid BuiltinType::Kind value");
9252}
9253
9254static char ObjCEncodingForEnumDecl(const ASTContext *C, const EnumDecl *ED) {
9255 EnumDecl *Enum = ED->getDefinitionOrSelf();
9256
9257 // The encoding of an non-fixed enum type is always 'i', regardless of size.
9258 if (!Enum->isFixed())
9259 return 'i';
9260
9261 // The encoding of a fixed enum type matches its fixed underlying type.
9262 const auto *BT = Enum->getIntegerType()->castAs<BuiltinType>();
9263 return getObjCEncodingForPrimitiveType(C, BT);
9264}
9265
9266static void EncodeBitField(const ASTContext *Ctx, std::string& S,
9267 QualType T, const FieldDecl *FD) {
9268 assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl");
9269 S += 'b';
9270 // The NeXT runtime encodes bit fields as b followed by the number of bits.
9271 // The GNU runtime requires more information; bitfields are encoded as b,
9272 // then the offset (in bits) of the first element, then the type of the
9273 // bitfield, then the size in bits. For example, in this structure:
9274 //
9275 // struct
9276 // {
9277 // int integer;
9278 // int flags:2;
9279 // };
9280 // On a 32-bit system, the encoding for flags would be b2 for the NeXT
9281 // runtime, but b32i2 for the GNU runtime. The reason for this extra
9282 // information is not especially sensible, but we're stuck with it for
9283 // compatibility with GCC, although providing it breaks anything that
9284 // actually uses runtime introspection and wants to work on both runtimes...
9285 if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) {
9286 uint64_t Offset;
9287
9288 if (const auto *IVD = dyn_cast<ObjCIvarDecl>(Val: FD)) {
9289 Offset = Ctx->lookupFieldBitOffset(OID: IVD->getContainingInterface(), Ivar: IVD);
9290 } else {
9291 const RecordDecl *RD = FD->getParent();
9292 const ASTRecordLayout &RL = Ctx->getASTRecordLayout(D: RD);
9293 Offset = RL.getFieldOffset(FieldNo: FD->getFieldIndex());
9294 }
9295
9296 S += llvm::utostr(X: Offset);
9297
9298 if (const auto *ET = T->getAsCanonical<EnumType>())
9299 S += ObjCEncodingForEnumDecl(C: Ctx, ED: ET->getDecl());
9300 else {
9301 const auto *BT = T->castAs<BuiltinType>();
9302 S += getObjCEncodingForPrimitiveType(C: Ctx, BT);
9303 }
9304 }
9305 S += llvm::utostr(X: FD->getBitWidthValue());
9306}
9307
9308// Helper function for determining whether the encoded type string would include
9309// a template specialization type.
9310static bool hasTemplateSpecializationInEncodedString(const Type *T,
9311 bool VisitBasesAndFields) {
9312 T = T->getBaseElementTypeUnsafe();
9313
9314 if (auto *PT = T->getAs<PointerType>())
9315 return hasTemplateSpecializationInEncodedString(
9316 T: PT->getPointeeType().getTypePtr(), VisitBasesAndFields: false);
9317
9318 auto *CXXRD = T->getAsCXXRecordDecl();
9319
9320 if (!CXXRD)
9321 return false;
9322
9323 if (isa<ClassTemplateSpecializationDecl>(Val: CXXRD))
9324 return true;
9325
9326 if (!CXXRD->hasDefinition() || !VisitBasesAndFields)
9327 return false;
9328
9329 for (const auto &B : CXXRD->bases())
9330 if (hasTemplateSpecializationInEncodedString(T: B.getType().getTypePtr(),
9331 VisitBasesAndFields: true))
9332 return true;
9333
9334 for (auto *FD : CXXRD->fields())
9335 if (hasTemplateSpecializationInEncodedString(T: FD->getType().getTypePtr(),
9336 VisitBasesAndFields: true))
9337 return true;
9338
9339 return false;
9340}
9341
9342// FIXME: Use SmallString for accumulating string.
9343void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
9344 const ObjCEncOptions Options,
9345 const FieldDecl *FD,
9346 QualType *NotEncodedT) const {
9347 CanQualType CT = getCanonicalType(T);
9348 switch (CT->getTypeClass()) {
9349 case Type::Builtin:
9350 case Type::Enum:
9351 if (FD && FD->isBitField())
9352 return EncodeBitField(Ctx: this, S, T, FD);
9353 if (const auto *BT = dyn_cast<BuiltinType>(Val&: CT))
9354 S += getObjCEncodingForPrimitiveType(C: this, BT);
9355 else
9356 S += ObjCEncodingForEnumDecl(C: this, ED: cast<EnumType>(Val&: CT)->getDecl());
9357 return;
9358
9359 case Type::Complex:
9360 S += 'j';
9361 getObjCEncodingForTypeImpl(T: T->castAs<ComplexType>()->getElementType(), S,
9362 Options: ObjCEncOptions(),
9363 /*Field=*/FD: nullptr);
9364 return;
9365
9366 case Type::Atomic:
9367 S += 'A';
9368 getObjCEncodingForTypeImpl(T: T->castAs<AtomicType>()->getValueType(), S,
9369 Options: ObjCEncOptions(),
9370 /*Field=*/FD: nullptr);
9371 return;
9372
9373 // encoding for pointer or reference types.
9374 case Type::Pointer:
9375 case Type::LValueReference:
9376 case Type::RValueReference: {
9377 QualType PointeeTy;
9378 if (isa<PointerType>(Val: CT)) {
9379 const auto *PT = T->castAs<PointerType>();
9380 if (PT->isObjCSelType()) {
9381 S += ':';
9382 return;
9383 }
9384 PointeeTy = PT->getPointeeType();
9385 } else {
9386 PointeeTy = T->castAs<ReferenceType>()->getPointeeType();
9387 }
9388
9389 bool isReadOnly = false;
9390 // For historical/compatibility reasons, the read-only qualifier of the
9391 // pointee gets emitted _before_ the '^'. The read-only qualifier of
9392 // the pointer itself gets ignored, _unless_ we are looking at a typedef!
9393 // Also, do not emit the 'r' for anything but the outermost type!
9394 if (T->getAs<TypedefType>()) {
9395 if (Options.IsOutermostType() && T.isConstQualified()) {
9396 isReadOnly = true;
9397 S += 'r';
9398 }
9399 } else if (Options.IsOutermostType()) {
9400 QualType P = PointeeTy;
9401 while (auto PT = P->getAs<PointerType>())
9402 P = PT->getPointeeType();
9403 if (P.isConstQualified()) {
9404 isReadOnly = true;
9405 S += 'r';
9406 }
9407 }
9408 if (isReadOnly) {
9409 // Another legacy compatibility encoding. Some ObjC qualifier and type
9410 // combinations need to be rearranged.
9411 // Rewrite "in const" from "nr" to "rn"
9412 if (StringRef(S).ends_with(Suffix: "nr"))
9413 S.replace(i1: S.end()-2, i2: S.end(), s: "rn");
9414 }
9415
9416 if (PointeeTy->isCharType()) {
9417 // char pointer types should be encoded as '*' unless it is a
9418 // type that has been typedef'd to 'BOOL'.
9419 if (!isTypeTypedefedAsBOOL(T: PointeeTy)) {
9420 S += '*';
9421 return;
9422 }
9423 } else if (const auto *RTy = PointeeTy->getAsCanonical<RecordType>()) {
9424 const IdentifierInfo *II = RTy->getDecl()->getIdentifier();
9425 // GCC binary compat: Need to convert "struct objc_class *" to "#".
9426 if (II == &Idents.get(Name: "objc_class")) {
9427 S += '#';
9428 return;
9429 }
9430 // GCC binary compat: Need to convert "struct objc_object *" to "@".
9431 if (II == &Idents.get(Name: "objc_object")) {
9432 S += '@';
9433 return;
9434 }
9435 // If the encoded string for the class includes template names, just emit
9436 // "^v" for pointers to the class.
9437 if (getLangOpts().CPlusPlus &&
9438 (!getLangOpts().EncodeCXXClassTemplateSpec &&
9439 hasTemplateSpecializationInEncodedString(
9440 T: RTy, VisitBasesAndFields: Options.ExpandPointedToStructures()))) {
9441 S += "^v";
9442 return;
9443 }
9444 // fall through...
9445 }
9446 S += '^';
9447 getLegacyIntegralTypeEncoding(PointeeTy);
9448
9449 ObjCEncOptions NewOptions;
9450 if (Options.ExpandPointedToStructures())
9451 NewOptions.setExpandStructures();
9452 getObjCEncodingForTypeImpl(T: PointeeTy, S, Options: NewOptions,
9453 /*Field=*/FD: nullptr, NotEncodedT);
9454 return;
9455 }
9456
9457 case Type::ConstantArray:
9458 case Type::IncompleteArray:
9459 case Type::VariableArray: {
9460 const auto *AT = cast<ArrayType>(Val&: CT);
9461
9462 if (isa<IncompleteArrayType>(Val: AT) && !Options.IsStructField()) {
9463 // Incomplete arrays are encoded as a pointer to the array element.
9464 S += '^';
9465
9466 getObjCEncodingForTypeImpl(
9467 T: AT->getElementType(), S,
9468 Options: Options.keepingOnly(Mask: ObjCEncOptions().setExpandStructures()), FD);
9469 } else {
9470 S += '[';
9471
9472 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: AT))
9473 S += llvm::utostr(X: CAT->getZExtSize());
9474 else {
9475 //Variable length arrays are encoded as a regular array with 0 elements.
9476 assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) &&
9477 "Unknown array type!");
9478 S += '0';
9479 }
9480
9481 getObjCEncodingForTypeImpl(
9482 T: AT->getElementType(), S,
9483 Options: Options.keepingOnly(Mask: ObjCEncOptions().setExpandStructures()), FD,
9484 NotEncodedT);
9485 S += ']';
9486 }
9487 return;
9488 }
9489
9490 case Type::FunctionNoProto:
9491 case Type::FunctionProto:
9492 S += '?';
9493 return;
9494
9495 case Type::Record: {
9496 RecordDecl *RDecl = cast<RecordType>(Val&: CT)->getDecl();
9497 S += RDecl->isUnion() ? '(' : '{';
9498 // Anonymous structures print as '?'
9499 if (const IdentifierInfo *II = RDecl->getIdentifier()) {
9500 S += II->getName();
9501 if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(Val: RDecl)) {
9502 const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
9503 llvm::raw_string_ostream OS(S);
9504 printTemplateArgumentList(OS, Args: TemplateArgs.asArray(),
9505 Policy: getPrintingPolicy());
9506 }
9507 } else {
9508 S += '?';
9509 }
9510 if (Options.ExpandStructures()) {
9511 S += '=';
9512 if (!RDecl->isUnion()) {
9513 getObjCEncodingForStructureImpl(RD: RDecl, S, Field: FD, includeVBases: true, NotEncodedT);
9514 } else {
9515 for (const auto *Field : RDecl->fields()) {
9516 if (FD) {
9517 S += '"';
9518 S += Field->getNameAsString();
9519 S += '"';
9520 }
9521
9522 // Special case bit-fields.
9523 if (Field->isBitField()) {
9524 getObjCEncodingForTypeImpl(T: Field->getType(), S,
9525 Options: ObjCEncOptions().setExpandStructures(),
9526 FD: Field);
9527 } else {
9528 QualType qt = Field->getType();
9529 getLegacyIntegralTypeEncoding(PointeeTy&: qt);
9530 getObjCEncodingForTypeImpl(
9531 T: qt, S,
9532 Options: ObjCEncOptions().setExpandStructures().setIsStructField(), FD,
9533 NotEncodedT);
9534 }
9535 }
9536 }
9537 }
9538 S += RDecl->isUnion() ? ')' : '}';
9539 return;
9540 }
9541
9542 case Type::BlockPointer: {
9543 const auto *BT = T->castAs<BlockPointerType>();
9544 S += "@?"; // Unlike a pointer-to-function, which is "^?".
9545 if (Options.EncodeBlockParameters()) {
9546 const auto *FT = BT->getPointeeType()->castAs<FunctionType>();
9547
9548 S += '<';
9549 // Block return type
9550 getObjCEncodingForTypeImpl(T: FT->getReturnType(), S,
9551 Options: Options.forComponentType(), FD, NotEncodedT);
9552 // Block self
9553 S += "@?";
9554 // Block parameters
9555 if (const auto *FPT = dyn_cast<FunctionProtoType>(Val: FT)) {
9556 for (const auto &I : FPT->param_types())
9557 getObjCEncodingForTypeImpl(T: I, S, Options: Options.forComponentType(), FD,
9558 NotEncodedT);
9559 }
9560 S += '>';
9561 }
9562 return;
9563 }
9564
9565 case Type::ObjCObject: {
9566 // hack to match legacy encoding of *id and *Class
9567 QualType Ty = getObjCObjectPointerType(ObjectT: CT);
9568 if (Ty->isObjCIdType()) {
9569 S += "{objc_object=}";
9570 return;
9571 }
9572 else if (Ty->isObjCClassType()) {
9573 S += "{objc_class=}";
9574 return;
9575 }
9576 // TODO: Double check to make sure this intentionally falls through.
9577 [[fallthrough]];
9578 }
9579
9580 case Type::ObjCInterface: {
9581 // Ignore protocol qualifiers when mangling at this level.
9582 // @encode(class_name)
9583 ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface();
9584 S += '{';
9585 S += OI->getObjCRuntimeNameAsString();
9586 if (Options.ExpandStructures()) {
9587 S += '=';
9588 SmallVector<const ObjCIvarDecl*, 32> Ivars;
9589 DeepCollectObjCIvars(OI, leafClass: true, Ivars);
9590 for (unsigned i = 0, e = Ivars.size(); i != e; ++i) {
9591 const FieldDecl *Field = Ivars[i];
9592 if (Field->isBitField())
9593 getObjCEncodingForTypeImpl(T: Field->getType(), S,
9594 Options: ObjCEncOptions().setExpandStructures(),
9595 FD: Field);
9596 else
9597 getObjCEncodingForTypeImpl(T: Field->getType(), S,
9598 Options: ObjCEncOptions().setExpandStructures(), FD,
9599 NotEncodedT);
9600 }
9601 }
9602 S += '}';
9603 return;
9604 }
9605
9606 case Type::ObjCObjectPointer: {
9607 const auto *OPT = T->castAs<ObjCObjectPointerType>();
9608 if (OPT->isObjCIdType()) {
9609 S += '@';
9610 return;
9611 }
9612
9613 if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) {
9614 // FIXME: Consider if we need to output qualifiers for 'Class<p>'.
9615 // Since this is a binary compatibility issue, need to consult with
9616 // runtime folks. Fortunately, this is a *very* obscure construct.
9617 S += '#';
9618 return;
9619 }
9620
9621 if (OPT->isObjCQualifiedIdType()) {
9622 getObjCEncodingForTypeImpl(
9623 T: getObjCIdType(), S,
9624 Options: Options.keepingOnly(Mask: ObjCEncOptions()
9625 .setExpandPointedToStructures()
9626 .setExpandStructures()),
9627 FD);
9628 if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) {
9629 // Note that we do extended encoding of protocol qualifier list
9630 // Only when doing ivar or property encoding.
9631 S += '"';
9632 for (const auto *I : OPT->quals()) {
9633 S += '<';
9634 S += I->getObjCRuntimeNameAsString();
9635 S += '>';
9636 }
9637 S += '"';
9638 }
9639 return;
9640 }
9641
9642 S += '@';
9643 if (OPT->getInterfaceDecl() &&
9644 (FD || Options.EncodingProperty() || Options.EncodeClassNames())) {
9645 S += '"';
9646 S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString();
9647 for (const auto *I : OPT->quals()) {
9648 S += '<';
9649 S += I->getObjCRuntimeNameAsString();
9650 S += '>';
9651 }
9652 S += '"';
9653 }
9654 return;
9655 }
9656
9657 // gcc just blithely ignores member pointers.
9658 // FIXME: we should do better than that. 'M' is available.
9659 case Type::MemberPointer:
9660 // This matches gcc's encoding, even though technically it is insufficient.
9661 //FIXME. We should do a better job than gcc.
9662 case Type::Vector:
9663 case Type::ExtVector:
9664 // Until we have a coherent encoding of these three types, issue warning.
9665 if (NotEncodedT)
9666 *NotEncodedT = T;
9667 return;
9668
9669 case Type::ConstantMatrix:
9670 if (NotEncodedT)
9671 *NotEncodedT = T;
9672 return;
9673
9674 case Type::BitInt:
9675 if (NotEncodedT)
9676 *NotEncodedT = T;
9677 return;
9678
9679 // We could see an undeduced auto type here during error recovery.
9680 // Just ignore it.
9681 case Type::Auto:
9682 case Type::DeducedTemplateSpecialization:
9683 return;
9684
9685 case Type::HLSLAttributedResource:
9686 case Type::HLSLInlineSpirv:
9687 case Type::OverflowBehavior:
9688 llvm_unreachable("unexpected type");
9689
9690 case Type::ArrayParameter:
9691 case Type::Pipe:
9692#define ABSTRACT_TYPE(KIND, BASE)
9693#define TYPE(KIND, BASE)
9694#define DEPENDENT_TYPE(KIND, BASE) \
9695 case Type::KIND:
9696#define NON_CANONICAL_TYPE(KIND, BASE) \
9697 case Type::KIND:
9698#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \
9699 case Type::KIND:
9700#include "clang/AST/TypeNodes.inc"
9701 llvm_unreachable("@encode for dependent type!");
9702 }
9703 llvm_unreachable("bad type kind!");
9704}
9705
9706void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
9707 std::string &S,
9708 const FieldDecl *FD,
9709 bool includeVBases,
9710 QualType *NotEncodedT) const {
9711 assert(RDecl && "Expected non-null RecordDecl");
9712 assert(!RDecl->isUnion() && "Should not be called for unions");
9713 if (!RDecl->getDefinition() || RDecl->getDefinition()->isInvalidDecl())
9714 return;
9715
9716 const auto *CXXRec = dyn_cast<CXXRecordDecl>(Val: RDecl);
9717 std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets;
9718 const ASTRecordLayout &layout = getASTRecordLayout(D: RDecl);
9719
9720 if (CXXRec) {
9721 for (const auto &BI : CXXRec->bases()) {
9722 if (!BI.isVirtual()) {
9723 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl();
9724 if (base->isEmpty())
9725 continue;
9726 uint64_t offs = toBits(CharSize: layout.getBaseClassOffset(Base: base));
9727 FieldOrBaseOffsets.insert(position: FieldOrBaseOffsets.upper_bound(x: offs),
9728 x: std::make_pair(x&: offs, y&: base));
9729 }
9730 }
9731 }
9732
9733 for (FieldDecl *Field : RDecl->fields()) {
9734 if (!Field->isZeroLengthBitField() && Field->isZeroSize(Ctx: *this))
9735 continue;
9736 uint64_t offs = layout.getFieldOffset(FieldNo: Field->getFieldIndex());
9737 FieldOrBaseOffsets.insert(position: FieldOrBaseOffsets.upper_bound(x: offs),
9738 x: std::make_pair(x&: offs, y&: Field));
9739 }
9740
9741 if (CXXRec && includeVBases) {
9742 for (const auto &BI : CXXRec->vbases()) {
9743 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl();
9744 if (base->isEmpty())
9745 continue;
9746 uint64_t offs = toBits(CharSize: layout.getVBaseClassOffset(VBase: base));
9747 if (offs >= uint64_t(toBits(CharSize: layout.getNonVirtualSize())) &&
9748 FieldOrBaseOffsets.find(x: offs) == FieldOrBaseOffsets.end())
9749 FieldOrBaseOffsets.insert(position: FieldOrBaseOffsets.end(),
9750 x: std::make_pair(x&: offs, y&: base));
9751 }
9752 }
9753
9754 CharUnits size;
9755 if (CXXRec) {
9756 size = includeVBases ? layout.getSize() : layout.getNonVirtualSize();
9757 } else {
9758 size = layout.getSize();
9759 }
9760
9761#ifndef NDEBUG
9762 uint64_t CurOffs = 0;
9763#endif
9764 std::multimap<uint64_t, NamedDecl *>::iterator
9765 CurLayObj = FieldOrBaseOffsets.begin();
9766
9767 if (CXXRec && CXXRec->isDynamicClass() &&
9768 (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) {
9769 if (FD) {
9770 S += "\"_vptr$";
9771 std::string recname = CXXRec->getNameAsString();
9772 if (recname.empty()) recname = "?";
9773 S += recname;
9774 S += '"';
9775 }
9776 S += "^^?";
9777#ifndef NDEBUG
9778 CurOffs += getTypeSize(VoidPtrTy);
9779#endif
9780 }
9781
9782 if (!RDecl->hasFlexibleArrayMember()) {
9783 // Mark the end of the structure.
9784 uint64_t offs = toBits(CharSize: size);
9785 FieldOrBaseOffsets.insert(position: FieldOrBaseOffsets.upper_bound(x: offs),
9786 x: std::make_pair(x&: offs, y: nullptr));
9787 }
9788
9789 for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) {
9790#ifndef NDEBUG
9791 assert(CurOffs <= CurLayObj->first);
9792 if (CurOffs < CurLayObj->first) {
9793 uint64_t padding = CurLayObj->first - CurOffs;
9794 // FIXME: There doesn't seem to be a way to indicate in the encoding that
9795 // packing/alignment of members is different that normal, in which case
9796 // the encoding will be out-of-sync with the real layout.
9797 // If the runtime switches to just consider the size of types without
9798 // taking into account alignment, we could make padding explicit in the
9799 // encoding (e.g. using arrays of chars). The encoding strings would be
9800 // longer then though.
9801 CurOffs += padding;
9802 }
9803#endif
9804
9805 NamedDecl *dcl = CurLayObj->second;
9806 if (!dcl)
9807 break; // reached end of structure.
9808
9809 if (auto *base = dyn_cast<CXXRecordDecl>(Val: dcl)) {
9810 // We expand the bases without their virtual bases since those are going
9811 // in the initial structure. Note that this differs from gcc which
9812 // expands virtual bases each time one is encountered in the hierarchy,
9813 // making the encoding type bigger than it really is.
9814 getObjCEncodingForStructureImpl(RDecl: base, S, FD, /*includeVBases*/false,
9815 NotEncodedT);
9816 assert(!base->isEmpty());
9817#ifndef NDEBUG
9818 CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize());
9819#endif
9820 } else {
9821 const auto *field = cast<FieldDecl>(Val: dcl);
9822 if (FD) {
9823 S += '"';
9824 S += field->getNameAsString();
9825 S += '"';
9826 }
9827
9828 if (field->isBitField()) {
9829 EncodeBitField(Ctx: this, S, T: field->getType(), FD: field);
9830#ifndef NDEBUG
9831 CurOffs += field->getBitWidthValue();
9832#endif
9833 } else {
9834 QualType qt = field->getType();
9835 getLegacyIntegralTypeEncoding(PointeeTy&: qt);
9836 getObjCEncodingForTypeImpl(
9837 T: qt, S, Options: ObjCEncOptions().setExpandStructures().setIsStructField(),
9838 FD, NotEncodedT);
9839#ifndef NDEBUG
9840 CurOffs += getTypeSize(field->getType());
9841#endif
9842 }
9843 }
9844 }
9845}
9846
9847void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT,
9848 std::string& S) const {
9849 if (QT & Decl::OBJC_TQ_In)
9850 S += 'n';
9851 if (QT & Decl::OBJC_TQ_Inout)
9852 S += 'N';
9853 if (QT & Decl::OBJC_TQ_Out)
9854 S += 'o';
9855 if (QT & Decl::OBJC_TQ_Bycopy)
9856 S += 'O';
9857 if (QT & Decl::OBJC_TQ_Byref)
9858 S += 'R';
9859 if (QT & Decl::OBJC_TQ_Oneway)
9860 S += 'V';
9861}
9862
9863TypedefDecl *ASTContext::getObjCIdDecl() const {
9864 if (!ObjCIdDecl) {
9865 QualType T = getObjCObjectType(BaseType: ObjCBuiltinIdTy, Protocols: {}, NumProtocols: {});
9866 T = getObjCObjectPointerType(ObjectT: T);
9867 ObjCIdDecl = buildImplicitTypedef(T, Name: "id");
9868 }
9869 return ObjCIdDecl;
9870}
9871
9872TypedefDecl *ASTContext::getObjCSelDecl() const {
9873 if (!ObjCSelDecl) {
9874 QualType T = getPointerType(T: ObjCBuiltinSelTy);
9875 ObjCSelDecl = buildImplicitTypedef(T, Name: "SEL");
9876 }
9877 return ObjCSelDecl;
9878}
9879
9880TypedefDecl *ASTContext::getObjCClassDecl() const {
9881 if (!ObjCClassDecl) {
9882 QualType T = getObjCObjectType(BaseType: ObjCBuiltinClassTy, Protocols: {}, NumProtocols: {});
9883 T = getObjCObjectPointerType(ObjectT: T);
9884 ObjCClassDecl = buildImplicitTypedef(T, Name: "Class");
9885 }
9886 return ObjCClassDecl;
9887}
9888
9889ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const {
9890 if (!ObjCProtocolClassDecl) {
9891 ObjCProtocolClassDecl
9892 = ObjCInterfaceDecl::Create(C: *this, DC: getTranslationUnitDecl(),
9893 atLoc: SourceLocation(),
9894 Id: &Idents.get(Name: "Protocol"),
9895 /*typeParamList=*/nullptr,
9896 /*PrevDecl=*/nullptr,
9897 ClassLoc: SourceLocation(), isInternal: true);
9898 }
9899
9900 return ObjCProtocolClassDecl;
9901}
9902
9903PointerAuthQualifier ASTContext::getObjCMemberSelTypePtrAuth() {
9904 if (!getLangOpts().PointerAuthObjcInterfaceSel)
9905 return PointerAuthQualifier();
9906 return PointerAuthQualifier::Create(
9907 Key: getLangOpts().PointerAuthObjcInterfaceSelKey,
9908 /*isAddressDiscriminated=*/IsAddressDiscriminated: true, ExtraDiscriminator: SelPointerConstantDiscriminator,
9909 AuthenticationMode: PointerAuthenticationMode::SignAndAuth,
9910 /*isIsaPointer=*/IsIsaPointer: false,
9911 /*authenticatesNullValues=*/AuthenticatesNullValues: false);
9912}
9913
9914//===----------------------------------------------------------------------===//
9915// __builtin_va_list Construction Functions
9916//===----------------------------------------------------------------------===//
9917
9918static TypedefDecl *CreateCharPtrNamedVaListDecl(const ASTContext *Context,
9919 StringRef Name) {
9920 // typedef char* __builtin[_ms]_va_list;
9921 QualType T = Context->getPointerType(T: Context->CharTy);
9922 return Context->buildImplicitTypedef(T, Name);
9923}
9924
9925static TypedefDecl *CreateMSVaListDecl(const ASTContext *Context) {
9926 return CreateCharPtrNamedVaListDecl(Context, Name: "__builtin_ms_va_list");
9927}
9928
9929static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) {
9930 return CreateCharPtrNamedVaListDecl(Context, Name: "__builtin_va_list");
9931}
9932
9933static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) {
9934 // typedef void* __builtin_va_list;
9935 QualType T = Context->getPointerType(T: Context->VoidTy);
9936 return Context->buildImplicitTypedef(T, Name: "__builtin_va_list");
9937}
9938
9939static TypedefDecl *
9940CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) {
9941 // struct __va_list
9942 RecordDecl *VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list");
9943 if (Context->getLangOpts().CPlusPlus) {
9944 // namespace std { struct __va_list {
9945 auto *NS = NamespaceDecl::Create(
9946 C&: const_cast<ASTContext &>(*Context), DC: Context->getTranslationUnitDecl(),
9947 /*Inline=*/false, StartLoc: SourceLocation(), IdLoc: SourceLocation(),
9948 Id: &Context->Idents.get(Name: "std"),
9949 /*PrevDecl=*/nullptr, /*Nested=*/false);
9950 NS->setImplicit();
9951 VaListTagDecl->setDeclContext(NS);
9952 }
9953
9954 VaListTagDecl->startDefinition();
9955
9956 const size_t NumFields = 5;
9957 QualType FieldTypes[NumFields];
9958 const char *FieldNames[NumFields];
9959
9960 // void *__stack;
9961 FieldTypes[0] = Context->getPointerType(T: Context->VoidTy);
9962 FieldNames[0] = "__stack";
9963
9964 // void *__gr_top;
9965 FieldTypes[1] = Context->getPointerType(T: Context->VoidTy);
9966 FieldNames[1] = "__gr_top";
9967
9968 // void *__vr_top;
9969 FieldTypes[2] = Context->getPointerType(T: Context->VoidTy);
9970 FieldNames[2] = "__vr_top";
9971
9972 // int __gr_offs;
9973 FieldTypes[3] = Context->IntTy;
9974 FieldNames[3] = "__gr_offs";
9975
9976 // int __vr_offs;
9977 FieldTypes[4] = Context->IntTy;
9978 FieldNames[4] = "__vr_offs";
9979
9980 // Create fields
9981 for (unsigned i = 0; i < NumFields; ++i) {
9982 FieldDecl *Field = FieldDecl::Create(C: const_cast<ASTContext &>(*Context),
9983 DC: VaListTagDecl,
9984 StartLoc: SourceLocation(),
9985 IdLoc: SourceLocation(),
9986 Id: &Context->Idents.get(Name: FieldNames[i]),
9987 T: FieldTypes[i], /*TInfo=*/nullptr,
9988 /*BitWidth=*/BW: nullptr,
9989 /*Mutable=*/false,
9990 InitStyle: ICIS_NoInit);
9991 Field->setAccess(AS_public);
9992 VaListTagDecl->addDecl(D: Field);
9993 }
9994 VaListTagDecl->completeDefinition();
9995 Context->VaListTagDecl = VaListTagDecl;
9996 CanQualType VaListTagType = Context->getCanonicalTagType(TD: VaListTagDecl);
9997
9998 // } __builtin_va_list;
9999 return Context->buildImplicitTypedef(T: VaListTagType, Name: "__builtin_va_list");
10000}
10001
10002static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) {
10003 // typedef struct __va_list_tag {
10004 RecordDecl *VaListTagDecl;
10005
10006 VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
10007 VaListTagDecl->startDefinition();
10008
10009 const size_t NumFields = 5;
10010 QualType FieldTypes[NumFields];
10011 const char *FieldNames[NumFields];
10012
10013 // unsigned char gpr;
10014 FieldTypes[0] = Context->UnsignedCharTy;
10015 FieldNames[0] = "gpr";
10016
10017 // unsigned char fpr;
10018 FieldTypes[1] = Context->UnsignedCharTy;
10019 FieldNames[1] = "fpr";
10020
10021 // unsigned short reserved;
10022 FieldTypes[2] = Context->UnsignedShortTy;
10023 FieldNames[2] = "reserved";
10024
10025 // void* overflow_arg_area;
10026 FieldTypes[3] = Context->getPointerType(T: Context->VoidTy);
10027 FieldNames[3] = "overflow_arg_area";
10028
10029 // void* reg_save_area;
10030 FieldTypes[4] = Context->getPointerType(T: Context->VoidTy);
10031 FieldNames[4] = "reg_save_area";
10032
10033 // Create fields
10034 for (unsigned i = 0; i < NumFields; ++i) {
10035 FieldDecl *Field = FieldDecl::Create(C: *Context, DC: VaListTagDecl,
10036 StartLoc: SourceLocation(),
10037 IdLoc: SourceLocation(),
10038 Id: &Context->Idents.get(Name: FieldNames[i]),
10039 T: FieldTypes[i], /*TInfo=*/nullptr,
10040 /*BitWidth=*/BW: nullptr,
10041 /*Mutable=*/false,
10042 InitStyle: ICIS_NoInit);
10043 Field->setAccess(AS_public);
10044 VaListTagDecl->addDecl(D: Field);
10045 }
10046 VaListTagDecl->completeDefinition();
10047 Context->VaListTagDecl = VaListTagDecl;
10048 CanQualType VaListTagType = Context->getCanonicalTagType(TD: VaListTagDecl);
10049
10050 // } __va_list_tag;
10051 TypedefDecl *VaListTagTypedefDecl =
10052 Context->buildImplicitTypedef(T: VaListTagType, Name: "__va_list_tag");
10053
10054 QualType VaListTagTypedefType =
10055 Context->getTypedefType(Keyword: ElaboratedTypeKeyword::None,
10056 /*Qualifier=*/std::nullopt, Decl: VaListTagTypedefDecl);
10057
10058 // typedef __va_list_tag __builtin_va_list[1];
10059 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 1);
10060 QualType VaListTagArrayType = Context->getConstantArrayType(
10061 EltTy: VaListTagTypedefType, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
10062 return Context->buildImplicitTypedef(T: VaListTagArrayType, Name: "__builtin_va_list");
10063}
10064
10065static TypedefDecl *
10066CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) {
10067 // struct __va_list_tag {
10068 RecordDecl *VaListTagDecl;
10069 VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
10070 VaListTagDecl->startDefinition();
10071
10072 const size_t NumFields = 4;
10073 QualType FieldTypes[NumFields];
10074 const char *FieldNames[NumFields];
10075
10076 // unsigned gp_offset;
10077 FieldTypes[0] = Context->UnsignedIntTy;
10078 FieldNames[0] = "gp_offset";
10079
10080 // unsigned fp_offset;
10081 FieldTypes[1] = Context->UnsignedIntTy;
10082 FieldNames[1] = "fp_offset";
10083
10084 // void* overflow_arg_area;
10085 FieldTypes[2] = Context->getPointerType(T: Context->VoidTy);
10086 FieldNames[2] = "overflow_arg_area";
10087
10088 // void* reg_save_area;
10089 FieldTypes[3] = Context->getPointerType(T: Context->VoidTy);
10090 FieldNames[3] = "reg_save_area";
10091
10092 // Create fields
10093 for (unsigned i = 0; i < NumFields; ++i) {
10094 FieldDecl *Field = FieldDecl::Create(C: const_cast<ASTContext &>(*Context),
10095 DC: VaListTagDecl,
10096 StartLoc: SourceLocation(),
10097 IdLoc: SourceLocation(),
10098 Id: &Context->Idents.get(Name: FieldNames[i]),
10099 T: FieldTypes[i], /*TInfo=*/nullptr,
10100 /*BitWidth=*/BW: nullptr,
10101 /*Mutable=*/false,
10102 InitStyle: ICIS_NoInit);
10103 Field->setAccess(AS_public);
10104 VaListTagDecl->addDecl(D: Field);
10105 }
10106 VaListTagDecl->completeDefinition();
10107 Context->VaListTagDecl = VaListTagDecl;
10108 CanQualType VaListTagType = Context->getCanonicalTagType(TD: VaListTagDecl);
10109
10110 // };
10111
10112 // typedef struct __va_list_tag __builtin_va_list[1];
10113 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 1);
10114 QualType VaListTagArrayType = Context->getConstantArrayType(
10115 EltTy: VaListTagType, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
10116 return Context->buildImplicitTypedef(T: VaListTagArrayType, Name: "__builtin_va_list");
10117}
10118
10119static TypedefDecl *
10120CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) {
10121 // struct __va_list
10122 RecordDecl *VaListDecl = Context->buildImplicitRecord(Name: "__va_list");
10123 if (Context->getLangOpts().CPlusPlus) {
10124 // namespace std { struct __va_list {
10125 NamespaceDecl *NS;
10126 NS = NamespaceDecl::Create(C&: const_cast<ASTContext &>(*Context),
10127 DC: Context->getTranslationUnitDecl(),
10128 /*Inline=*/false, StartLoc: SourceLocation(),
10129 IdLoc: SourceLocation(), Id: &Context->Idents.get(Name: "std"),
10130 /*PrevDecl=*/nullptr, /*Nested=*/false);
10131 NS->setImplicit();
10132 VaListDecl->setDeclContext(NS);
10133 }
10134
10135 VaListDecl->startDefinition();
10136
10137 // void * __ap;
10138 FieldDecl *Field = FieldDecl::Create(C: const_cast<ASTContext &>(*Context),
10139 DC: VaListDecl,
10140 StartLoc: SourceLocation(),
10141 IdLoc: SourceLocation(),
10142 Id: &Context->Idents.get(Name: "__ap"),
10143 T: Context->getPointerType(T: Context->VoidTy),
10144 /*TInfo=*/nullptr,
10145 /*BitWidth=*/BW: nullptr,
10146 /*Mutable=*/false,
10147 InitStyle: ICIS_NoInit);
10148 Field->setAccess(AS_public);
10149 VaListDecl->addDecl(D: Field);
10150
10151 // };
10152 VaListDecl->completeDefinition();
10153 Context->VaListTagDecl = VaListDecl;
10154
10155 // typedef struct __va_list __builtin_va_list;
10156 CanQualType T = Context->getCanonicalTagType(TD: VaListDecl);
10157 return Context->buildImplicitTypedef(T, Name: "__builtin_va_list");
10158}
10159
10160static TypedefDecl *
10161CreateSystemZBuiltinVaListDecl(const ASTContext *Context) {
10162 // struct __va_list_tag {
10163 RecordDecl *VaListTagDecl;
10164 VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
10165 VaListTagDecl->startDefinition();
10166
10167 const size_t NumFields = 4;
10168 QualType FieldTypes[NumFields];
10169 const char *FieldNames[NumFields];
10170
10171 // long __gpr;
10172 FieldTypes[0] = Context->LongTy;
10173 FieldNames[0] = "__gpr";
10174
10175 // long __fpr;
10176 FieldTypes[1] = Context->LongTy;
10177 FieldNames[1] = "__fpr";
10178
10179 // void *__overflow_arg_area;
10180 FieldTypes[2] = Context->getPointerType(T: Context->VoidTy);
10181 FieldNames[2] = "__overflow_arg_area";
10182
10183 // void *__reg_save_area;
10184 FieldTypes[3] = Context->getPointerType(T: Context->VoidTy);
10185 FieldNames[3] = "__reg_save_area";
10186
10187 // Create fields
10188 for (unsigned i = 0; i < NumFields; ++i) {
10189 FieldDecl *Field = FieldDecl::Create(C: const_cast<ASTContext &>(*Context),
10190 DC: VaListTagDecl,
10191 StartLoc: SourceLocation(),
10192 IdLoc: SourceLocation(),
10193 Id: &Context->Idents.get(Name: FieldNames[i]),
10194 T: FieldTypes[i], /*TInfo=*/nullptr,
10195 /*BitWidth=*/BW: nullptr,
10196 /*Mutable=*/false,
10197 InitStyle: ICIS_NoInit);
10198 Field->setAccess(AS_public);
10199 VaListTagDecl->addDecl(D: Field);
10200 }
10201 VaListTagDecl->completeDefinition();
10202 Context->VaListTagDecl = VaListTagDecl;
10203 CanQualType VaListTagType = Context->getCanonicalTagType(TD: VaListTagDecl);
10204
10205 // };
10206
10207 // typedef __va_list_tag __builtin_va_list[1];
10208 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 1);
10209 QualType VaListTagArrayType = Context->getConstantArrayType(
10210 EltTy: VaListTagType, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
10211
10212 return Context->buildImplicitTypedef(T: VaListTagArrayType, Name: "__builtin_va_list");
10213}
10214
10215static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) {
10216 // typedef struct __va_list_tag {
10217 RecordDecl *VaListTagDecl;
10218 VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
10219 VaListTagDecl->startDefinition();
10220
10221 const size_t NumFields = 3;
10222 QualType FieldTypes[NumFields];
10223 const char *FieldNames[NumFields];
10224
10225 // void *CurrentSavedRegisterArea;
10226 FieldTypes[0] = Context->getPointerType(T: Context->VoidTy);
10227 FieldNames[0] = "__current_saved_reg_area_pointer";
10228
10229 // void *SavedRegAreaEnd;
10230 FieldTypes[1] = Context->getPointerType(T: Context->VoidTy);
10231 FieldNames[1] = "__saved_reg_area_end_pointer";
10232
10233 // void *OverflowArea;
10234 FieldTypes[2] = Context->getPointerType(T: Context->VoidTy);
10235 FieldNames[2] = "__overflow_area_pointer";
10236
10237 // Create fields
10238 for (unsigned i = 0; i < NumFields; ++i) {
10239 FieldDecl *Field = FieldDecl::Create(
10240 C: const_cast<ASTContext &>(*Context), DC: VaListTagDecl, StartLoc: SourceLocation(),
10241 IdLoc: SourceLocation(), Id: &Context->Idents.get(Name: FieldNames[i]), T: FieldTypes[i],
10242 /*TInfo=*/nullptr,
10243 /*BitWidth=*/BW: nullptr,
10244 /*Mutable=*/false, InitStyle: ICIS_NoInit);
10245 Field->setAccess(AS_public);
10246 VaListTagDecl->addDecl(D: Field);
10247 }
10248 VaListTagDecl->completeDefinition();
10249 Context->VaListTagDecl = VaListTagDecl;
10250 CanQualType VaListTagType = Context->getCanonicalTagType(TD: VaListTagDecl);
10251
10252 // } __va_list_tag;
10253 TypedefDecl *VaListTagTypedefDecl =
10254 Context->buildImplicitTypedef(T: VaListTagType, Name: "__va_list_tag");
10255
10256 QualType VaListTagTypedefType =
10257 Context->getTypedefType(Keyword: ElaboratedTypeKeyword::None,
10258 /*Qualifier=*/std::nullopt, Decl: VaListTagTypedefDecl);
10259
10260 // typedef __va_list_tag __builtin_va_list[1];
10261 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 1);
10262 QualType VaListTagArrayType = Context->getConstantArrayType(
10263 EltTy: VaListTagTypedefType, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
10264
10265 return Context->buildImplicitTypedef(T: VaListTagArrayType, Name: "__builtin_va_list");
10266}
10267
10268static TypedefDecl *
10269CreateXtensaABIBuiltinVaListDecl(const ASTContext *Context) {
10270 // typedef struct __va_list_tag {
10271 RecordDecl *VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
10272
10273 VaListTagDecl->startDefinition();
10274
10275 // int* __va_stk;
10276 // int* __va_reg;
10277 // int __va_ndx;
10278 constexpr size_t NumFields = 3;
10279 QualType FieldTypes[NumFields] = {Context->getPointerType(T: Context->IntTy),
10280 Context->getPointerType(T: Context->IntTy),
10281 Context->IntTy};
10282 const char *FieldNames[NumFields] = {"__va_stk", "__va_reg", "__va_ndx"};
10283
10284 // Create fields
10285 for (unsigned i = 0; i < NumFields; ++i) {
10286 FieldDecl *Field = FieldDecl::Create(
10287 C: *Context, DC: VaListTagDecl, StartLoc: SourceLocation(), IdLoc: SourceLocation(),
10288 Id: &Context->Idents.get(Name: FieldNames[i]), T: FieldTypes[i], /*TInfo=*/nullptr,
10289 /*BitWidth=*/BW: nullptr,
10290 /*Mutable=*/false, InitStyle: ICIS_NoInit);
10291 Field->setAccess(AS_public);
10292 VaListTagDecl->addDecl(D: Field);
10293 }
10294 VaListTagDecl->completeDefinition();
10295 Context->VaListTagDecl = VaListTagDecl;
10296 CanQualType VaListTagType = Context->getCanonicalTagType(TD: VaListTagDecl);
10297
10298 // } __va_list_tag;
10299 TypedefDecl *VaListTagTypedefDecl =
10300 Context->buildImplicitTypedef(T: VaListTagType, Name: "__builtin_va_list");
10301
10302 return VaListTagTypedefDecl;
10303}
10304
10305static TypedefDecl *CreateVaListDecl(const ASTContext *Context,
10306 TargetInfo::BuiltinVaListKind Kind) {
10307 switch (Kind) {
10308 case TargetInfo::CharPtrBuiltinVaList:
10309 return CreateCharPtrBuiltinVaListDecl(Context);
10310 case TargetInfo::VoidPtrBuiltinVaList:
10311 return CreateVoidPtrBuiltinVaListDecl(Context);
10312 case TargetInfo::AArch64ABIBuiltinVaList:
10313 return CreateAArch64ABIBuiltinVaListDecl(Context);
10314 case TargetInfo::PowerABIBuiltinVaList:
10315 return CreatePowerABIBuiltinVaListDecl(Context);
10316 case TargetInfo::X86_64ABIBuiltinVaList:
10317 return CreateX86_64ABIBuiltinVaListDecl(Context);
10318 case TargetInfo::AAPCSABIBuiltinVaList:
10319 return CreateAAPCSABIBuiltinVaListDecl(Context);
10320 case TargetInfo::SystemZBuiltinVaList:
10321 return CreateSystemZBuiltinVaListDecl(Context);
10322 case TargetInfo::HexagonBuiltinVaList:
10323 return CreateHexagonBuiltinVaListDecl(Context);
10324 case TargetInfo::XtensaABIBuiltinVaList:
10325 return CreateXtensaABIBuiltinVaListDecl(Context);
10326 }
10327
10328 llvm_unreachable("Unhandled __builtin_va_list type kind");
10329}
10330
10331TypedefDecl *ASTContext::getBuiltinVaListDecl() const {
10332 if (!BuiltinVaListDecl) {
10333 BuiltinVaListDecl = CreateVaListDecl(Context: this, Kind: Target->getBuiltinVaListKind());
10334 assert(BuiltinVaListDecl->isImplicit());
10335 }
10336
10337 return BuiltinVaListDecl;
10338}
10339
10340Decl *ASTContext::getVaListTagDecl() const {
10341 // Force the creation of VaListTagDecl by building the __builtin_va_list
10342 // declaration.
10343 if (!VaListTagDecl)
10344 (void)getBuiltinVaListDecl();
10345
10346 return VaListTagDecl;
10347}
10348
10349TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const {
10350 if (!BuiltinMSVaListDecl)
10351 BuiltinMSVaListDecl = CreateMSVaListDecl(Context: this);
10352
10353 return BuiltinMSVaListDecl;
10354}
10355
10356bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const {
10357 // Allow redecl custom type checking builtin for HLSL.
10358 if (LangOpts.HLSL && FD->getBuiltinID() != Builtin::NotBuiltin &&
10359 BuiltinInfo.hasCustomTypechecking(ID: FD->getBuiltinID()))
10360 return true;
10361 // Allow redecl custom type checking builtin for SPIR-V.
10362 if (getTargetInfo().getTriple().isSPIROrSPIRV() &&
10363 BuiltinInfo.isTSBuiltin(ID: FD->getBuiltinID()) &&
10364 BuiltinInfo.hasCustomTypechecking(ID: FD->getBuiltinID()))
10365 return true;
10366 return BuiltinInfo.canBeRedeclared(ID: FD->getBuiltinID());
10367}
10368
10369void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) {
10370 assert(ObjCConstantStringType.isNull() &&
10371 "'NSConstantString' type already set!");
10372
10373 ObjCConstantStringType = getObjCInterfaceType(Decl);
10374}
10375
10376/// Retrieve the template name that corresponds to a non-empty
10377/// lookup.
10378TemplateName
10379ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin,
10380 UnresolvedSetIterator End) const {
10381 unsigned size = End - Begin;
10382 assert(size > 1 && "set is not overloaded!");
10383
10384 void *memory = Allocate(Size: sizeof(OverloadedTemplateStorage) +
10385 size * sizeof(FunctionTemplateDecl*));
10386 auto *OT = new (memory) OverloadedTemplateStorage(size);
10387
10388 NamedDecl **Storage = OT->getStorage();
10389 for (UnresolvedSetIterator I = Begin; I != End; ++I) {
10390 NamedDecl *D = *I;
10391 assert(isa<FunctionTemplateDecl>(D) ||
10392 isa<UnresolvedUsingValueDecl>(D) ||
10393 (isa<UsingShadowDecl>(D) &&
10394 isa<FunctionTemplateDecl>(D->getUnderlyingDecl())));
10395 *Storage++ = D;
10396 }
10397
10398 return TemplateName(OT);
10399}
10400
10401/// Retrieve a template name representing an unqualified-id that has been
10402/// assumed to name a template for ADL purposes.
10403TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const {
10404 auto *OT = new (*this) AssumedTemplateStorage(Name);
10405 return TemplateName(OT);
10406}
10407
10408/// Retrieve the template name that represents a qualified
10409/// template name such as \c std::vector.
10410TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier Qualifier,
10411 bool TemplateKeyword,
10412 TemplateName Template) const {
10413 assert(Template.getKind() == TemplateName::Template ||
10414 Template.getKind() == TemplateName::UsingTemplate);
10415
10416 if (Template.getAsTemplateDecl()->getKind() == Decl::TemplateTemplateParm) {
10417 assert(!Qualifier && "unexpected qualified template template parameter");
10418 assert(TemplateKeyword == false);
10419 return Template;
10420 }
10421
10422 // FIXME: Canonicalization?
10423 llvm::FoldingSetNodeID ID;
10424 QualifiedTemplateName::Profile(ID, NNS: Qualifier, TemplateKeyword, TN: Template);
10425
10426 void *InsertPos = nullptr;
10427 QualifiedTemplateName *QTN =
10428 QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
10429 if (!QTN) {
10430 QTN = new (*this, alignof(QualifiedTemplateName))
10431 QualifiedTemplateName(Qualifier, TemplateKeyword, Template);
10432 QualifiedTemplateNames.InsertNode(N: QTN, InsertPos);
10433 }
10434
10435 return TemplateName(QTN);
10436}
10437
10438/// Retrieve the template name that represents a dependent
10439/// template name such as \c MetaFun::template operator+.
10440TemplateName
10441ASTContext::getDependentTemplateName(const DependentTemplateStorage &S) const {
10442 llvm::FoldingSetNodeID ID;
10443 S.Profile(ID);
10444
10445 void *InsertPos = nullptr;
10446 if (DependentTemplateName *QTN =
10447 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos))
10448 return TemplateName(QTN);
10449
10450 DependentTemplateName *QTN =
10451 new (*this, alignof(DependentTemplateName)) DependentTemplateName(S);
10452 DependentTemplateNames.InsertNode(N: QTN, InsertPos);
10453 return TemplateName(QTN);
10454}
10455
10456TemplateName ASTContext::getSubstTemplateTemplateParm(TemplateName Replacement,
10457 Decl *AssociatedDecl,
10458 unsigned Index,
10459 UnsignedOrNone PackIndex,
10460 bool Final) const {
10461 llvm::FoldingSetNodeID ID;
10462 SubstTemplateTemplateParmStorage::Profile(ID, Replacement, AssociatedDecl,
10463 Index, PackIndex, Final);
10464
10465 void *insertPos = nullptr;
10466 SubstTemplateTemplateParmStorage *subst
10467 = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
10468
10469 if (!subst) {
10470 subst = new (*this) SubstTemplateTemplateParmStorage(
10471 Replacement, AssociatedDecl, Index, PackIndex, Final);
10472 SubstTemplateTemplateParms.InsertNode(N: subst, InsertPos: insertPos);
10473 }
10474
10475 return TemplateName(subst);
10476}
10477
10478TemplateName
10479ASTContext::getSubstTemplateTemplateParmPack(const TemplateArgument &ArgPack,
10480 Decl *AssociatedDecl,
10481 unsigned Index, bool Final) const {
10482 auto &Self = const_cast<ASTContext &>(*this);
10483 llvm::FoldingSetNodeID ID;
10484 SubstTemplateTemplateParmPackStorage::Profile(ID, Context&: Self, ArgPack,
10485 AssociatedDecl, Index, Final);
10486
10487 void *InsertPos = nullptr;
10488 SubstTemplateTemplateParmPackStorage *Subst
10489 = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos);
10490
10491 if (!Subst) {
10492 Subst = new (*this) SubstTemplateTemplateParmPackStorage(
10493 ArgPack.pack_elements(), AssociatedDecl, Index, Final);
10494 SubstTemplateTemplateParmPacks.InsertNode(N: Subst, InsertPos);
10495 }
10496
10497 return TemplateName(Subst);
10498}
10499
10500/// Retrieve the template name that represents a template name
10501/// deduced from a specialization.
10502TemplateName
10503ASTContext::getDeducedTemplateName(TemplateName Underlying,
10504 DefaultArguments DefaultArgs) const {
10505 if (!DefaultArgs)
10506 return Underlying;
10507
10508 llvm::FoldingSetNodeID ID;
10509 DeducedTemplateStorage::Profile(ID, Context: *this, Underlying, DefArgs: DefaultArgs);
10510
10511 void *InsertPos = nullptr;
10512 DeducedTemplateStorage *DTS =
10513 DeducedTemplates.FindNodeOrInsertPos(ID, InsertPos);
10514 if (!DTS) {
10515 void *Mem = Allocate(Size: sizeof(DeducedTemplateStorage) +
10516 sizeof(TemplateArgument) * DefaultArgs.Args.size(),
10517 Align: alignof(DeducedTemplateStorage));
10518 DTS = new (Mem) DeducedTemplateStorage(Underlying, DefaultArgs);
10519 DeducedTemplates.InsertNode(N: DTS, InsertPos);
10520 }
10521 return TemplateName(DTS);
10522}
10523
10524/// getFromTargetType - Given one of the integer types provided by
10525/// TargetInfo, produce the corresponding type. The unsigned @p Type
10526/// is actually a value of type @c TargetInfo::IntType.
10527CanQualType ASTContext::getFromTargetType(unsigned Type) const {
10528 switch (Type) {
10529 case TargetInfo::NoInt: return {};
10530 case TargetInfo::SignedChar: return SignedCharTy;
10531 case TargetInfo::UnsignedChar: return UnsignedCharTy;
10532 case TargetInfo::SignedShort: return ShortTy;
10533 case TargetInfo::UnsignedShort: return UnsignedShortTy;
10534 case TargetInfo::SignedInt: return IntTy;
10535 case TargetInfo::UnsignedInt: return UnsignedIntTy;
10536 case TargetInfo::SignedLong: return LongTy;
10537 case TargetInfo::UnsignedLong: return UnsignedLongTy;
10538 case TargetInfo::SignedLongLong: return LongLongTy;
10539 case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy;
10540 }
10541
10542 llvm_unreachable("Unhandled TargetInfo::IntType value");
10543}
10544
10545//===----------------------------------------------------------------------===//
10546// Type Predicates.
10547//===----------------------------------------------------------------------===//
10548
10549/// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's
10550/// garbage collection attribute.
10551///
10552Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const {
10553 if (getLangOpts().getGC() == LangOptions::NonGC)
10554 return Qualifiers::GCNone;
10555
10556 assert(getLangOpts().ObjC);
10557 Qualifiers::GC GCAttrs = Ty.getObjCGCAttr();
10558
10559 // Default behaviour under objective-C's gc is for ObjC pointers
10560 // (or pointers to them) be treated as though they were declared
10561 // as __strong.
10562 if (GCAttrs == Qualifiers::GCNone) {
10563 if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType())
10564 return Qualifiers::Strong;
10565 else if (Ty->isPointerType())
10566 return getObjCGCAttrKind(Ty: Ty->castAs<PointerType>()->getPointeeType());
10567 } else {
10568 // It's not valid to set GC attributes on anything that isn't a
10569 // pointer.
10570#ifndef NDEBUG
10571 QualType CT = Ty->getCanonicalTypeInternal();
10572 while (const auto *AT = dyn_cast<ArrayType>(CT))
10573 CT = AT->getElementType();
10574 assert(CT->isAnyPointerType() || CT->isBlockPointerType());
10575#endif
10576 }
10577 return GCAttrs;
10578}
10579
10580//===----------------------------------------------------------------------===//
10581// Type Compatibility Testing
10582//===----------------------------------------------------------------------===//
10583
10584/// areCompatVectorTypes - Return true if the two specified vector types are
10585/// compatible.
10586static bool areCompatVectorTypes(const VectorType *LHS,
10587 const VectorType *RHS) {
10588 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified());
10589 return LHS->getElementType() == RHS->getElementType() &&
10590 LHS->getNumElements() == RHS->getNumElements();
10591}
10592
10593/// areCompatMatrixTypes - Return true if the two specified matrix types are
10594/// compatible.
10595static bool areCompatMatrixTypes(const ConstantMatrixType *LHS,
10596 const ConstantMatrixType *RHS) {
10597 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified());
10598 return LHS->getElementType() == RHS->getElementType() &&
10599 LHS->getNumRows() == RHS->getNumRows() &&
10600 LHS->getNumColumns() == RHS->getNumColumns();
10601}
10602
10603bool ASTContext::areCompatibleVectorTypes(QualType FirstVec,
10604 QualType SecondVec) {
10605 assert(FirstVec->isVectorType() && "FirstVec should be a vector type");
10606 assert(SecondVec->isVectorType() && "SecondVec should be a vector type");
10607
10608 if (hasSameUnqualifiedType(T1: FirstVec, T2: SecondVec))
10609 return true;
10610
10611 // Treat Neon vector types and most AltiVec vector types as if they are the
10612 // equivalent GCC vector types.
10613 const auto *First = FirstVec->castAs<VectorType>();
10614 const auto *Second = SecondVec->castAs<VectorType>();
10615 if (First->getNumElements() == Second->getNumElements() &&
10616 hasSameType(T1: First->getElementType(), T2: Second->getElementType()) &&
10617 First->getVectorKind() != VectorKind::AltiVecPixel &&
10618 First->getVectorKind() != VectorKind::AltiVecBool &&
10619 Second->getVectorKind() != VectorKind::AltiVecPixel &&
10620 Second->getVectorKind() != VectorKind::AltiVecBool &&
10621 First->getVectorKind() != VectorKind::SveFixedLengthData &&
10622 First->getVectorKind() != VectorKind::SveFixedLengthPredicate &&
10623 Second->getVectorKind() != VectorKind::SveFixedLengthData &&
10624 Second->getVectorKind() != VectorKind::SveFixedLengthPredicate &&
10625 First->getVectorKind() != VectorKind::RVVFixedLengthData &&
10626 Second->getVectorKind() != VectorKind::RVVFixedLengthData &&
10627 First->getVectorKind() != VectorKind::RVVFixedLengthMask &&
10628 Second->getVectorKind() != VectorKind::RVVFixedLengthMask &&
10629 First->getVectorKind() != VectorKind::RVVFixedLengthMask_1 &&
10630 Second->getVectorKind() != VectorKind::RVVFixedLengthMask_1 &&
10631 First->getVectorKind() != VectorKind::RVVFixedLengthMask_2 &&
10632 Second->getVectorKind() != VectorKind::RVVFixedLengthMask_2 &&
10633 First->getVectorKind() != VectorKind::RVVFixedLengthMask_4 &&
10634 Second->getVectorKind() != VectorKind::RVVFixedLengthMask_4)
10635 return true;
10636
10637 // In OpenCL, treat half and _Float16 vector types as compatible.
10638 if (getLangOpts().OpenCL &&
10639 First->getNumElements() == Second->getNumElements()) {
10640 QualType FirstElt = First->getElementType();
10641 QualType SecondElt = Second->getElementType();
10642
10643 if ((FirstElt->isFloat16Type() && SecondElt->isHalfType()) ||
10644 (FirstElt->isHalfType() && SecondElt->isFloat16Type())) {
10645 if (First->getVectorKind() != VectorKind::AltiVecPixel &&
10646 First->getVectorKind() != VectorKind::AltiVecBool &&
10647 Second->getVectorKind() != VectorKind::AltiVecPixel &&
10648 Second->getVectorKind() != VectorKind::AltiVecBool)
10649 return true;
10650 }
10651 }
10652 return false;
10653}
10654
10655bool ASTContext::areCompatibleOverflowBehaviorTypes(QualType LHS,
10656 QualType RHS) {
10657 auto Result = checkOBTAssignmentCompatibility(LHS, RHS);
10658 return Result != OBTAssignResult::IncompatibleKinds;
10659}
10660
10661ASTContext::OBTAssignResult
10662ASTContext::checkOBTAssignmentCompatibility(QualType LHS, QualType RHS) {
10663 const auto *LHSOBT = LHS->getAs<OverflowBehaviorType>();
10664 const auto *RHSOBT = RHS->getAs<OverflowBehaviorType>();
10665
10666 if (!LHSOBT && !RHSOBT)
10667 return OBTAssignResult::Compatible;
10668
10669 if (LHSOBT && RHSOBT) {
10670 if (LHSOBT->getBehaviorKind() != RHSOBT->getBehaviorKind())
10671 return OBTAssignResult::IncompatibleKinds;
10672 return OBTAssignResult::Compatible;
10673 }
10674
10675 QualType LHSUnderlying = LHSOBT ? LHSOBT->desugar() : LHS;
10676 QualType RHSUnderlying = RHSOBT ? RHSOBT->desugar() : RHS;
10677
10678 if (RHSOBT && !LHSOBT) {
10679 if (LHSUnderlying->isIntegerType() && RHSUnderlying->isIntegerType())
10680 return OBTAssignResult::Discards;
10681 }
10682
10683 return OBTAssignResult::NotApplicable;
10684}
10685
10686/// getRVVTypeSize - Return RVV vector register size.
10687static uint64_t getRVVTypeSize(ASTContext &Context, const BuiltinType *Ty) {
10688 assert(Ty->isRVVVLSBuiltinType() && "Invalid RVV Type");
10689 auto VScale = Context.getTargetInfo().getVScaleRange(
10690 LangOpts: Context.getLangOpts(), Mode: TargetInfo::ArmStreamingKind::NotStreaming);
10691 if (!VScale)
10692 return 0;
10693
10694 ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(Ty);
10695
10696 uint64_t EltSize = Context.getTypeSize(T: Info.ElementType);
10697 if (Info.ElementType == Context.BoolTy)
10698 EltSize = 1;
10699
10700 uint64_t MinElts = Info.EC.getKnownMinValue();
10701 return VScale->first * MinElts * EltSize;
10702}
10703
10704bool ASTContext::areCompatibleRVVTypes(QualType FirstType,
10705 QualType SecondType) {
10706 assert(
10707 ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) ||
10708 (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) &&
10709 "Expected RVV builtin type and vector type!");
10710
10711 auto IsValidCast = [this](QualType FirstType, QualType SecondType) {
10712 if (const auto *BT = FirstType->getAs<BuiltinType>()) {
10713 if (const auto *VT = SecondType->getAs<VectorType>()) {
10714 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask) {
10715 BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(Ty: BT);
10716 return FirstType->isRVVVLSBuiltinType() &&
10717 Info.ElementType == BoolTy &&
10718 getTypeSize(T: SecondType) == ((getRVVTypeSize(Context&: *this, Ty: BT)));
10719 }
10720 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask_1) {
10721 BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(Ty: BT);
10722 return FirstType->isRVVVLSBuiltinType() &&
10723 Info.ElementType == BoolTy &&
10724 getTypeSize(T: SecondType) == ((getRVVTypeSize(Context&: *this, Ty: BT) * 8));
10725 }
10726 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask_2) {
10727 BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(Ty: BT);
10728 return FirstType->isRVVVLSBuiltinType() &&
10729 Info.ElementType == BoolTy &&
10730 getTypeSize(T: SecondType) == ((getRVVTypeSize(Context&: *this, Ty: BT)) * 4);
10731 }
10732 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask_4) {
10733 BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(Ty: BT);
10734 return FirstType->isRVVVLSBuiltinType() &&
10735 Info.ElementType == BoolTy &&
10736 getTypeSize(T: SecondType) == ((getRVVTypeSize(Context&: *this, Ty: BT)) * 2);
10737 }
10738 if (VT->getVectorKind() == VectorKind::RVVFixedLengthData ||
10739 VT->getVectorKind() == VectorKind::Generic)
10740 return FirstType->isRVVVLSBuiltinType() &&
10741 getTypeSize(T: SecondType) == getRVVTypeSize(Context&: *this, Ty: BT) &&
10742 hasSameType(T1: VT->getElementType(),
10743 T2: getBuiltinVectorTypeInfo(Ty: BT).ElementType);
10744 }
10745 }
10746 return false;
10747 };
10748
10749 return IsValidCast(FirstType, SecondType) ||
10750 IsValidCast(SecondType, FirstType);
10751}
10752
10753bool ASTContext::areLaxCompatibleRVVTypes(QualType FirstType,
10754 QualType SecondType) {
10755 assert(
10756 ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) ||
10757 (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) &&
10758 "Expected RVV builtin type and vector type!");
10759
10760 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) {
10761 const auto *BT = FirstType->getAs<BuiltinType>();
10762 if (!BT)
10763 return false;
10764
10765 if (!BT->isRVVVLSBuiltinType())
10766 return false;
10767
10768 const auto *VecTy = SecondType->getAs<VectorType>();
10769 if (VecTy && VecTy->getVectorKind() == VectorKind::Generic) {
10770 const LangOptions::LaxVectorConversionKind LVCKind =
10771 getLangOpts().getLaxVectorConversions();
10772
10773 // If __riscv_v_fixed_vlen != N do not allow vector lax conversion.
10774 if (getTypeSize(T: SecondType) != getRVVTypeSize(Context&: *this, Ty: BT))
10775 return false;
10776
10777 // If -flax-vector-conversions=all is specified, the types are
10778 // certainly compatible.
10779 if (LVCKind == LangOptions::LaxVectorConversionKind::All)
10780 return true;
10781
10782 // If -flax-vector-conversions=integer is specified, the types are
10783 // compatible if the elements are integer types.
10784 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer)
10785 return VecTy->getElementType().getCanonicalType()->isIntegerType() &&
10786 FirstType->getRVVEltType(Ctx: *this)->isIntegerType();
10787 }
10788
10789 return false;
10790 };
10791
10792 return IsLaxCompatible(FirstType, SecondType) ||
10793 IsLaxCompatible(SecondType, FirstType);
10794}
10795
10796bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const {
10797 while (true) {
10798 // __strong id
10799 if (const AttributedType *Attr = dyn_cast<AttributedType>(Val&: Ty)) {
10800 if (Attr->getAttrKind() == attr::ObjCOwnership)
10801 return true;
10802
10803 Ty = Attr->getModifiedType();
10804
10805 // X *__strong (...)
10806 } else if (const ParenType *Paren = dyn_cast<ParenType>(Val&: Ty)) {
10807 Ty = Paren->getInnerType();
10808
10809 // We do not want to look through typedefs, typeof(expr),
10810 // typeof(type), or any other way that the type is somehow
10811 // abstracted.
10812 } else {
10813 return false;
10814 }
10815 }
10816}
10817
10818//===----------------------------------------------------------------------===//
10819// ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's.
10820//===----------------------------------------------------------------------===//
10821
10822/// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the
10823/// inheritance hierarchy of 'rProto'.
10824bool
10825ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto,
10826 ObjCProtocolDecl *rProto) const {
10827 if (declaresSameEntity(D1: lProto, D2: rProto))
10828 return true;
10829 for (auto *PI : rProto->protocols())
10830 if (ProtocolCompatibleWithProtocol(lProto, rProto: PI))
10831 return true;
10832 return false;
10833}
10834
10835/// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and
10836/// Class<pr1, ...>.
10837bool ASTContext::ObjCQualifiedClassTypesAreCompatible(
10838 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs) {
10839 for (auto *lhsProto : lhs->quals()) {
10840 bool match = false;
10841 for (auto *rhsProto : rhs->quals()) {
10842 if (ProtocolCompatibleWithProtocol(lProto: lhsProto, rProto: rhsProto)) {
10843 match = true;
10844 break;
10845 }
10846 }
10847 if (!match)
10848 return false;
10849 }
10850 return true;
10851}
10852
10853/// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an
10854/// ObjCQualifiedIDType.
10855bool ASTContext::ObjCQualifiedIdTypesAreCompatible(
10856 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs,
10857 bool compare) {
10858 // Allow id<P..> and an 'id' in all cases.
10859 if (lhs->isObjCIdType() || rhs->isObjCIdType())
10860 return true;
10861
10862 // Don't allow id<P..> to convert to Class or Class<P..> in either direction.
10863 if (lhs->isObjCClassType() || lhs->isObjCQualifiedClassType() ||
10864 rhs->isObjCClassType() || rhs->isObjCQualifiedClassType())
10865 return false;
10866
10867 if (lhs->isObjCQualifiedIdType()) {
10868 if (rhs->qual_empty()) {
10869 // If the RHS is a unqualified interface pointer "NSString*",
10870 // make sure we check the class hierarchy.
10871 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) {
10872 for (auto *I : lhs->quals()) {
10873 // when comparing an id<P> on lhs with a static type on rhs,
10874 // see if static class implements all of id's protocols, directly or
10875 // through its super class and categories.
10876 if (!rhsID->ClassImplementsProtocol(lProto: I, lookupCategory: true))
10877 return false;
10878 }
10879 }
10880 // If there are no qualifiers and no interface, we have an 'id'.
10881 return true;
10882 }
10883 // Both the right and left sides have qualifiers.
10884 for (auto *lhsProto : lhs->quals()) {
10885 bool match = false;
10886
10887 // when comparing an id<P> on lhs with a static type on rhs,
10888 // see if static class implements all of id's protocols, directly or
10889 // through its super class and categories.
10890 for (auto *rhsProto : rhs->quals()) {
10891 if (ProtocolCompatibleWithProtocol(lProto: lhsProto, rProto: rhsProto) ||
10892 (compare && ProtocolCompatibleWithProtocol(lProto: rhsProto, rProto: lhsProto))) {
10893 match = true;
10894 break;
10895 }
10896 }
10897 // If the RHS is a qualified interface pointer "NSString<P>*",
10898 // make sure we check the class hierarchy.
10899 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) {
10900 for (auto *I : lhs->quals()) {
10901 // when comparing an id<P> on lhs with a static type on rhs,
10902 // see if static class implements all of id's protocols, directly or
10903 // through its super class and categories.
10904 if (rhsID->ClassImplementsProtocol(lProto: I, lookupCategory: true)) {
10905 match = true;
10906 break;
10907 }
10908 }
10909 }
10910 if (!match)
10911 return false;
10912 }
10913
10914 return true;
10915 }
10916
10917 assert(rhs->isObjCQualifiedIdType() && "One of the LHS/RHS should be id<x>");
10918
10919 if (lhs->getInterfaceType()) {
10920 // If both the right and left sides have qualifiers.
10921 for (auto *lhsProto : lhs->quals()) {
10922 bool match = false;
10923
10924 // when comparing an id<P> on rhs with a static type on lhs,
10925 // see if static class implements all of id's protocols, directly or
10926 // through its super class and categories.
10927 // First, lhs protocols in the qualifier list must be found, direct
10928 // or indirect in rhs's qualifier list or it is a mismatch.
10929 for (auto *rhsProto : rhs->quals()) {
10930 if (ProtocolCompatibleWithProtocol(lProto: lhsProto, rProto: rhsProto) ||
10931 (compare && ProtocolCompatibleWithProtocol(lProto: rhsProto, rProto: lhsProto))) {
10932 match = true;
10933 break;
10934 }
10935 }
10936 if (!match)
10937 return false;
10938 }
10939
10940 // Static class's protocols, or its super class or category protocols
10941 // must be found, direct or indirect in rhs's qualifier list or it is a mismatch.
10942 if (ObjCInterfaceDecl *lhsID = lhs->getInterfaceDecl()) {
10943 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols;
10944 CollectInheritedProtocols(CDecl: lhsID, Protocols&: LHSInheritedProtocols);
10945 // This is rather dubious but matches gcc's behavior. If lhs has
10946 // no type qualifier and its class has no static protocol(s)
10947 // assume that it is mismatch.
10948 if (LHSInheritedProtocols.empty() && lhs->qual_empty())
10949 return false;
10950 for (auto *lhsProto : LHSInheritedProtocols) {
10951 bool match = false;
10952 for (auto *rhsProto : rhs->quals()) {
10953 if (ProtocolCompatibleWithProtocol(lProto: lhsProto, rProto: rhsProto) ||
10954 (compare && ProtocolCompatibleWithProtocol(lProto: rhsProto, rProto: lhsProto))) {
10955 match = true;
10956 break;
10957 }
10958 }
10959 if (!match)
10960 return false;
10961 }
10962 }
10963 return true;
10964 }
10965 return false;
10966}
10967
10968/// canAssignObjCInterfaces - Return true if the two interface types are
10969/// compatible for assignment from RHS to LHS. This handles validation of any
10970/// protocol qualifiers on the LHS or RHS.
10971bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT,
10972 const ObjCObjectPointerType *RHSOPT) {
10973 const ObjCObjectType* LHS = LHSOPT->getObjectType();
10974 const ObjCObjectType* RHS = RHSOPT->getObjectType();
10975
10976 // If either type represents the built-in 'id' type, return true.
10977 if (LHS->isObjCUnqualifiedId() || RHS->isObjCUnqualifiedId())
10978 return true;
10979
10980 // Function object that propagates a successful result or handles
10981 // __kindof types.
10982 auto finish = [&](bool succeeded) -> bool {
10983 if (succeeded)
10984 return true;
10985
10986 if (!RHS->isKindOfType())
10987 return false;
10988
10989 // Strip off __kindof and protocol qualifiers, then check whether
10990 // we can assign the other way.
10991 return canAssignObjCInterfaces(LHSOPT: RHSOPT->stripObjCKindOfTypeAndQuals(ctx: *this),
10992 RHSOPT: LHSOPT->stripObjCKindOfTypeAndQuals(ctx: *this));
10993 };
10994
10995 // Casts from or to id<P> are allowed when the other side has compatible
10996 // protocols.
10997 if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) {
10998 return finish(ObjCQualifiedIdTypesAreCompatible(lhs: LHSOPT, rhs: RHSOPT, compare: false));
10999 }
11000
11001 // Verify protocol compatibility for casts from Class<P1> to Class<P2>.
11002 if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) {
11003 return finish(ObjCQualifiedClassTypesAreCompatible(lhs: LHSOPT, rhs: RHSOPT));
11004 }
11005
11006 // Casts from Class to Class<Foo>, or vice-versa, are allowed.
11007 if (LHS->isObjCClass() && RHS->isObjCClass()) {
11008 return true;
11009 }
11010
11011 // If we have 2 user-defined types, fall into that path.
11012 if (LHS->getInterface() && RHS->getInterface()) {
11013 return finish(canAssignObjCInterfaces(LHS, RHS));
11014 }
11015
11016 return false;
11017}
11018
11019/// canAssignObjCInterfacesInBlockPointer - This routine is specifically written
11020/// for providing type-safety for objective-c pointers used to pass/return
11021/// arguments in block literals. When passed as arguments, passing 'A*' where
11022/// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is
11023/// not OK. For the return type, the opposite is not OK.
11024bool ASTContext::canAssignObjCInterfacesInBlockPointer(
11025 const ObjCObjectPointerType *LHSOPT,
11026 const ObjCObjectPointerType *RHSOPT,
11027 bool BlockReturnType) {
11028
11029 // Function object that propagates a successful result or handles
11030 // __kindof types.
11031 auto finish = [&](bool succeeded) -> bool {
11032 if (succeeded)
11033 return true;
11034
11035 const ObjCObjectPointerType *Expected = BlockReturnType ? RHSOPT : LHSOPT;
11036 if (!Expected->isKindOfType())
11037 return false;
11038
11039 // Strip off __kindof and protocol qualifiers, then check whether
11040 // we can assign the other way.
11041 return canAssignObjCInterfacesInBlockPointer(
11042 LHSOPT: RHSOPT->stripObjCKindOfTypeAndQuals(ctx: *this),
11043 RHSOPT: LHSOPT->stripObjCKindOfTypeAndQuals(ctx: *this),
11044 BlockReturnType);
11045 };
11046
11047 if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType())
11048 return true;
11049
11050 if (LHSOPT->isObjCBuiltinType()) {
11051 return finish(RHSOPT->isObjCBuiltinType() ||
11052 RHSOPT->isObjCQualifiedIdType());
11053 }
11054
11055 if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) {
11056 if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking)
11057 // Use for block parameters previous type checking for compatibility.
11058 return finish(ObjCQualifiedIdTypesAreCompatible(lhs: LHSOPT, rhs: RHSOPT, compare: false) ||
11059 // Or corrected type checking as in non-compat mode.
11060 (!BlockReturnType &&
11061 ObjCQualifiedIdTypesAreCompatible(lhs: RHSOPT, rhs: LHSOPT, compare: false)));
11062 else
11063 return finish(ObjCQualifiedIdTypesAreCompatible(
11064 lhs: (BlockReturnType ? LHSOPT : RHSOPT),
11065 rhs: (BlockReturnType ? RHSOPT : LHSOPT), compare: false));
11066 }
11067
11068 const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType();
11069 const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType();
11070 if (LHS && RHS) { // We have 2 user-defined types.
11071 if (LHS != RHS) {
11072 if (LHS->getDecl()->isSuperClassOf(I: RHS->getDecl()))
11073 return finish(BlockReturnType);
11074 if (RHS->getDecl()->isSuperClassOf(I: LHS->getDecl()))
11075 return finish(!BlockReturnType);
11076 }
11077 else
11078 return true;
11079 }
11080 return false;
11081}
11082
11083/// Comparison routine for Objective-C protocols to be used with
11084/// llvm::array_pod_sort.
11085static int compareObjCProtocolsByName(ObjCProtocolDecl * const *lhs,
11086 ObjCProtocolDecl * const *rhs) {
11087 return (*lhs)->getName().compare(RHS: (*rhs)->getName());
11088}
11089
11090/// getIntersectionOfProtocols - This routine finds the intersection of set
11091/// of protocols inherited from two distinct objective-c pointer objects with
11092/// the given common base.
11093/// It is used to build composite qualifier list of the composite type of
11094/// the conditional expression involving two objective-c pointer objects.
11095static
11096void getIntersectionOfProtocols(ASTContext &Context,
11097 const ObjCInterfaceDecl *CommonBase,
11098 const ObjCObjectPointerType *LHSOPT,
11099 const ObjCObjectPointerType *RHSOPT,
11100 SmallVectorImpl<ObjCProtocolDecl *> &IntersectionSet) {
11101
11102 const ObjCObjectType* LHS = LHSOPT->getObjectType();
11103 const ObjCObjectType* RHS = RHSOPT->getObjectType();
11104 assert(LHS->getInterface() && "LHS must have an interface base");
11105 assert(RHS->getInterface() && "RHS must have an interface base");
11106
11107 // Add all of the protocols for the LHS.
11108 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSProtocolSet;
11109
11110 // Start with the protocol qualifiers.
11111 for (auto *proto : LHS->quals()) {
11112 Context.CollectInheritedProtocols(CDecl: proto, Protocols&: LHSProtocolSet);
11113 }
11114
11115 // Also add the protocols associated with the LHS interface.
11116 Context.CollectInheritedProtocols(CDecl: LHS->getInterface(), Protocols&: LHSProtocolSet);
11117
11118 // Add all of the protocols for the RHS.
11119 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet;
11120
11121 // Start with the protocol qualifiers.
11122 for (auto *proto : RHS->quals()) {
11123 Context.CollectInheritedProtocols(CDecl: proto, Protocols&: RHSProtocolSet);
11124 }
11125
11126 // Also add the protocols associated with the RHS interface.
11127 Context.CollectInheritedProtocols(CDecl: RHS->getInterface(), Protocols&: RHSProtocolSet);
11128
11129 // Compute the intersection of the collected protocol sets.
11130 for (auto *proto : LHSProtocolSet) {
11131 if (RHSProtocolSet.count(Ptr: proto))
11132 IntersectionSet.push_back(Elt: proto);
11133 }
11134
11135 // Compute the set of protocols that is implied by either the common type or
11136 // the protocols within the intersection.
11137 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> ImpliedProtocols;
11138 Context.CollectInheritedProtocols(CDecl: CommonBase, Protocols&: ImpliedProtocols);
11139
11140 // Remove any implied protocols from the list of inherited protocols.
11141 if (!ImpliedProtocols.empty()) {
11142 llvm::erase_if(C&: IntersectionSet, P: [&](ObjCProtocolDecl *proto) -> bool {
11143 return ImpliedProtocols.contains(Ptr: proto);
11144 });
11145 }
11146
11147 // Sort the remaining protocols by name.
11148 llvm::array_pod_sort(Start: IntersectionSet.begin(), End: IntersectionSet.end(),
11149 Compare: compareObjCProtocolsByName);
11150}
11151
11152/// Determine whether the first type is a subtype of the second.
11153static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs,
11154 QualType rhs) {
11155 // Common case: two object pointers.
11156 const auto *lhsOPT = lhs->getAs<ObjCObjectPointerType>();
11157 const auto *rhsOPT = rhs->getAs<ObjCObjectPointerType>();
11158 if (lhsOPT && rhsOPT)
11159 return ctx.canAssignObjCInterfaces(LHSOPT: lhsOPT, RHSOPT: rhsOPT);
11160
11161 // Two block pointers.
11162 const auto *lhsBlock = lhs->getAs<BlockPointerType>();
11163 const auto *rhsBlock = rhs->getAs<BlockPointerType>();
11164 if (lhsBlock && rhsBlock)
11165 return ctx.typesAreBlockPointerCompatible(lhs, rhs);
11166
11167 // If either is an unqualified 'id' and the other is a block, it's
11168 // acceptable.
11169 if ((lhsOPT && lhsOPT->isObjCIdType() && rhsBlock) ||
11170 (rhsOPT && rhsOPT->isObjCIdType() && lhsBlock))
11171 return true;
11172
11173 return false;
11174}
11175
11176// Check that the given Objective-C type argument lists are equivalent.
11177static bool sameObjCTypeArgs(ASTContext &ctx,
11178 const ObjCInterfaceDecl *iface,
11179 ArrayRef<QualType> lhsArgs,
11180 ArrayRef<QualType> rhsArgs,
11181 bool stripKindOf) {
11182 if (lhsArgs.size() != rhsArgs.size())
11183 return false;
11184
11185 ObjCTypeParamList *typeParams = iface->getTypeParamList();
11186 if (!typeParams)
11187 return false;
11188
11189 for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) {
11190 if (ctx.hasSameType(T1: lhsArgs[i], T2: rhsArgs[i]))
11191 continue;
11192
11193 switch (typeParams->begin()[i]->getVariance()) {
11194 case ObjCTypeParamVariance::Invariant:
11195 if (!stripKindOf ||
11196 !ctx.hasSameType(T1: lhsArgs[i].stripObjCKindOfType(ctx),
11197 T2: rhsArgs[i].stripObjCKindOfType(ctx))) {
11198 return false;
11199 }
11200 break;
11201
11202 case ObjCTypeParamVariance::Covariant:
11203 if (!canAssignObjCObjectTypes(ctx, lhs: lhsArgs[i], rhs: rhsArgs[i]))
11204 return false;
11205 break;
11206
11207 case ObjCTypeParamVariance::Contravariant:
11208 if (!canAssignObjCObjectTypes(ctx, lhs: rhsArgs[i], rhs: lhsArgs[i]))
11209 return false;
11210 break;
11211 }
11212 }
11213
11214 return true;
11215}
11216
11217QualType ASTContext::areCommonBaseCompatible(
11218 const ObjCObjectPointerType *Lptr,
11219 const ObjCObjectPointerType *Rptr) {
11220 const ObjCObjectType *LHS = Lptr->getObjectType();
11221 const ObjCObjectType *RHS = Rptr->getObjectType();
11222 const ObjCInterfaceDecl* LDecl = LHS->getInterface();
11223 const ObjCInterfaceDecl* RDecl = RHS->getInterface();
11224
11225 if (!LDecl || !RDecl)
11226 return {};
11227
11228 // When either LHS or RHS is a kindof type, we should return a kindof type.
11229 // For example, for common base of kindof(ASub1) and kindof(ASub2), we return
11230 // kindof(A).
11231 bool anyKindOf = LHS->isKindOfType() || RHS->isKindOfType();
11232
11233 // Follow the left-hand side up the class hierarchy until we either hit a
11234 // root or find the RHS. Record the ancestors in case we don't find it.
11235 llvm::SmallDenseMap<const ObjCInterfaceDecl *, const ObjCObjectType *, 4>
11236 LHSAncestors;
11237 while (true) {
11238 // Record this ancestor. We'll need this if the common type isn't in the
11239 // path from the LHS to the root.
11240 LHSAncestors[LHS->getInterface()->getCanonicalDecl()] = LHS;
11241
11242 if (declaresSameEntity(D1: LHS->getInterface(), D2: RDecl)) {
11243 // Get the type arguments.
11244 ArrayRef<QualType> LHSTypeArgs = LHS->getTypeArgsAsWritten();
11245 bool anyChanges = false;
11246 if (LHS->isSpecialized() && RHS->isSpecialized()) {
11247 // Both have type arguments, compare them.
11248 if (!sameObjCTypeArgs(ctx&: *this, iface: LHS->getInterface(),
11249 lhsArgs: LHS->getTypeArgs(), rhsArgs: RHS->getTypeArgs(),
11250 /*stripKindOf=*/true))
11251 return {};
11252 } else if (LHS->isSpecialized() != RHS->isSpecialized()) {
11253 // If only one has type arguments, the result will not have type
11254 // arguments.
11255 LHSTypeArgs = {};
11256 anyChanges = true;
11257 }
11258
11259 // Compute the intersection of protocols.
11260 SmallVector<ObjCProtocolDecl *, 8> Protocols;
11261 getIntersectionOfProtocols(Context&: *this, CommonBase: LHS->getInterface(), LHSOPT: Lptr, RHSOPT: Rptr,
11262 IntersectionSet&: Protocols);
11263 if (!Protocols.empty())
11264 anyChanges = true;
11265
11266 // If anything in the LHS will have changed, build a new result type.
11267 // If we need to return a kindof type but LHS is not a kindof type, we
11268 // build a new result type.
11269 if (anyChanges || LHS->isKindOfType() != anyKindOf) {
11270 QualType Result = getObjCInterfaceType(Decl: LHS->getInterface());
11271 Result = getObjCObjectType(baseType: Result, typeArgs: LHSTypeArgs, protocols: Protocols,
11272 isKindOf: anyKindOf || LHS->isKindOfType());
11273 return getObjCObjectPointerType(ObjectT: Result);
11274 }
11275
11276 return getObjCObjectPointerType(ObjectT: QualType(LHS, 0));
11277 }
11278
11279 // Find the superclass.
11280 QualType LHSSuperType = LHS->getSuperClassType();
11281 if (LHSSuperType.isNull())
11282 break;
11283
11284 LHS = LHSSuperType->castAs<ObjCObjectType>();
11285 }
11286
11287 // We didn't find anything by following the LHS to its root; now check
11288 // the RHS against the cached set of ancestors.
11289 while (true) {
11290 auto KnownLHS = LHSAncestors.find(Val: RHS->getInterface()->getCanonicalDecl());
11291 if (KnownLHS != LHSAncestors.end()) {
11292 LHS = KnownLHS->second;
11293
11294 // Get the type arguments.
11295 ArrayRef<QualType> RHSTypeArgs = RHS->getTypeArgsAsWritten();
11296 bool anyChanges = false;
11297 if (LHS->isSpecialized() && RHS->isSpecialized()) {
11298 // Both have type arguments, compare them.
11299 if (!sameObjCTypeArgs(ctx&: *this, iface: LHS->getInterface(),
11300 lhsArgs: LHS->getTypeArgs(), rhsArgs: RHS->getTypeArgs(),
11301 /*stripKindOf=*/true))
11302 return {};
11303 } else if (LHS->isSpecialized() != RHS->isSpecialized()) {
11304 // If only one has type arguments, the result will not have type
11305 // arguments.
11306 RHSTypeArgs = {};
11307 anyChanges = true;
11308 }
11309
11310 // Compute the intersection of protocols.
11311 SmallVector<ObjCProtocolDecl *, 8> Protocols;
11312 getIntersectionOfProtocols(Context&: *this, CommonBase: RHS->getInterface(), LHSOPT: Lptr, RHSOPT: Rptr,
11313 IntersectionSet&: Protocols);
11314 if (!Protocols.empty())
11315 anyChanges = true;
11316
11317 // If we need to return a kindof type but RHS is not a kindof type, we
11318 // build a new result type.
11319 if (anyChanges || RHS->isKindOfType() != anyKindOf) {
11320 QualType Result = getObjCInterfaceType(Decl: RHS->getInterface());
11321 Result = getObjCObjectType(baseType: Result, typeArgs: RHSTypeArgs, protocols: Protocols,
11322 isKindOf: anyKindOf || RHS->isKindOfType());
11323 return getObjCObjectPointerType(ObjectT: Result);
11324 }
11325
11326 return getObjCObjectPointerType(ObjectT: QualType(RHS, 0));
11327 }
11328
11329 // Find the superclass of the RHS.
11330 QualType RHSSuperType = RHS->getSuperClassType();
11331 if (RHSSuperType.isNull())
11332 break;
11333
11334 RHS = RHSSuperType->castAs<ObjCObjectType>();
11335 }
11336
11337 return {};
11338}
11339
11340bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS,
11341 const ObjCObjectType *RHS) {
11342 assert(LHS->getInterface() && "LHS is not an interface type");
11343 assert(RHS->getInterface() && "RHS is not an interface type");
11344
11345 // Verify that the base decls are compatible: the RHS must be a subclass of
11346 // the LHS.
11347 ObjCInterfaceDecl *LHSInterface = LHS->getInterface();
11348 bool IsSuperClass = LHSInterface->isSuperClassOf(I: RHS->getInterface());
11349 if (!IsSuperClass)
11350 return false;
11351
11352 // If the LHS has protocol qualifiers, determine whether all of them are
11353 // satisfied by the RHS (i.e., the RHS has a superset of the protocols in the
11354 // LHS).
11355 if (LHS->getNumProtocols() > 0) {
11356 // OK if conversion of LHS to SuperClass results in narrowing of types
11357 // ; i.e., SuperClass may implement at least one of the protocols
11358 // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok.
11359 // But not SuperObj<P1,P2,P3> = lhs<P1,P2>.
11360 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols;
11361 CollectInheritedProtocols(CDecl: RHS->getInterface(), Protocols&: SuperClassInheritedProtocols);
11362 // Also, if RHS has explicit quelifiers, include them for comparing with LHS's
11363 // qualifiers.
11364 for (auto *RHSPI : RHS->quals())
11365 CollectInheritedProtocols(CDecl: RHSPI, Protocols&: SuperClassInheritedProtocols);
11366 // If there is no protocols associated with RHS, it is not a match.
11367 if (SuperClassInheritedProtocols.empty())
11368 return false;
11369
11370 for (const auto *LHSProto : LHS->quals()) {
11371 bool SuperImplementsProtocol = false;
11372 for (auto *SuperClassProto : SuperClassInheritedProtocols)
11373 if (SuperClassProto->lookupProtocolNamed(PName: LHSProto->getIdentifier())) {
11374 SuperImplementsProtocol = true;
11375 break;
11376 }
11377 if (!SuperImplementsProtocol)
11378 return false;
11379 }
11380 }
11381
11382 // If the LHS is specialized, we may need to check type arguments.
11383 if (LHS->isSpecialized()) {
11384 // Follow the superclass chain until we've matched the LHS class in the
11385 // hierarchy. This substitutes type arguments through.
11386 const ObjCObjectType *RHSSuper = RHS;
11387 while (!declaresSameEntity(D1: RHSSuper->getInterface(), D2: LHSInterface))
11388 RHSSuper = RHSSuper->getSuperClassType()->castAs<ObjCObjectType>();
11389
11390 // If the RHS is specializd, compare type arguments.
11391 if (RHSSuper->isSpecialized() &&
11392 !sameObjCTypeArgs(ctx&: *this, iface: LHS->getInterface(),
11393 lhsArgs: LHS->getTypeArgs(), rhsArgs: RHSSuper->getTypeArgs(),
11394 /*stripKindOf=*/true)) {
11395 return false;
11396 }
11397 }
11398
11399 return true;
11400}
11401
11402bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) {
11403 // get the "pointed to" types
11404 const auto *LHSOPT = LHS->getAs<ObjCObjectPointerType>();
11405 const auto *RHSOPT = RHS->getAs<ObjCObjectPointerType>();
11406
11407 if (!LHSOPT || !RHSOPT)
11408 return false;
11409
11410 return canAssignObjCInterfaces(LHSOPT, RHSOPT) ||
11411 canAssignObjCInterfaces(LHSOPT: RHSOPT, RHSOPT: LHSOPT);
11412}
11413
11414bool ASTContext::canBindObjCObjectType(QualType To, QualType From) {
11415 return canAssignObjCInterfaces(
11416 LHSOPT: getObjCObjectPointerType(ObjectT: To)->castAs<ObjCObjectPointerType>(),
11417 RHSOPT: getObjCObjectPointerType(ObjectT: From)->castAs<ObjCObjectPointerType>());
11418}
11419
11420/// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible,
11421/// both shall have the identically qualified version of a compatible type.
11422/// C99 6.2.7p1: Two types have compatible types if their types are the
11423/// same. See 6.7.[2,3,5] for additional rules.
11424bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS,
11425 bool CompareUnqualified) {
11426 if (getLangOpts().CPlusPlus)
11427 return hasSameType(T1: LHS, T2: RHS);
11428
11429 return !mergeTypes(LHS, RHS, OfBlockPointer: false, Unqualified: CompareUnqualified).isNull();
11430}
11431
11432bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) {
11433 return typesAreCompatible(LHS, RHS);
11434}
11435
11436bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) {
11437 return !mergeTypes(LHS, RHS, OfBlockPointer: true).isNull();
11438}
11439
11440/// mergeTransparentUnionType - if T is a transparent union type and a member
11441/// of T is compatible with SubType, return the merged type, else return
11442/// QualType()
11443QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType,
11444 bool OfBlockPointer,
11445 bool Unqualified) {
11446 if (const RecordType *UT = T->getAsUnionType()) {
11447 RecordDecl *UD = UT->getDecl()->getMostRecentDecl();
11448 if (UD->hasAttr<TransparentUnionAttr>()) {
11449 for (const auto *I : UD->fields()) {
11450 QualType ET = I->getType().getUnqualifiedType();
11451 QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified);
11452 if (!MT.isNull())
11453 return MT;
11454 }
11455 }
11456 }
11457
11458 return {};
11459}
11460
11461/// mergeFunctionParameterTypes - merge two types which appear as function
11462/// parameter types
11463QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs,
11464 bool OfBlockPointer,
11465 bool Unqualified) {
11466 // GNU extension: two types are compatible if they appear as a function
11467 // argument, one of the types is a transparent union type and the other
11468 // type is compatible with a union member
11469 QualType lmerge = mergeTransparentUnionType(T: lhs, SubType: rhs, OfBlockPointer,
11470 Unqualified);
11471 if (!lmerge.isNull())
11472 return lmerge;
11473
11474 QualType rmerge = mergeTransparentUnionType(T: rhs, SubType: lhs, OfBlockPointer,
11475 Unqualified);
11476 if (!rmerge.isNull())
11477 return rmerge;
11478
11479 return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified);
11480}
11481
11482QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
11483 bool OfBlockPointer, bool Unqualified,
11484 bool AllowCXX,
11485 bool IsConditionalOperator) {
11486 const auto *lbase = lhs->castAs<FunctionType>();
11487 const auto *rbase = rhs->castAs<FunctionType>();
11488 const auto *lproto = dyn_cast<FunctionProtoType>(Val: lbase);
11489 const auto *rproto = dyn_cast<FunctionProtoType>(Val: rbase);
11490 bool allLTypes = true;
11491 bool allRTypes = true;
11492
11493 // Check return type
11494 QualType retType;
11495 if (OfBlockPointer) {
11496 QualType RHS = rbase->getReturnType();
11497 QualType LHS = lbase->getReturnType();
11498 bool UnqualifiedResult = Unqualified;
11499 if (!UnqualifiedResult)
11500 UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers());
11501 retType = mergeTypes(LHS, RHS, OfBlockPointer: true, Unqualified: UnqualifiedResult, BlockReturnType: true);
11502 }
11503 else
11504 retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), OfBlockPointer: false,
11505 Unqualified);
11506 if (retType.isNull())
11507 return {};
11508
11509 if (Unqualified)
11510 retType = retType.getUnqualifiedType();
11511
11512 CanQualType LRetType = getCanonicalType(T: lbase->getReturnType());
11513 CanQualType RRetType = getCanonicalType(T: rbase->getReturnType());
11514 if (Unqualified) {
11515 LRetType = LRetType.getUnqualifiedType();
11516 RRetType = RRetType.getUnqualifiedType();
11517 }
11518
11519 if (getCanonicalType(T: retType) != LRetType)
11520 allLTypes = false;
11521 if (getCanonicalType(T: retType) != RRetType)
11522 allRTypes = false;
11523
11524 // FIXME: double check this
11525 // FIXME: should we error if lbase->getRegParmAttr() != 0 &&
11526 // rbase->getRegParmAttr() != 0 &&
11527 // lbase->getRegParmAttr() != rbase->getRegParmAttr()?
11528 FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo();
11529 FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo();
11530
11531 // Compatible functions must have compatible calling conventions
11532 if (lbaseInfo.getCC() != rbaseInfo.getCC())
11533 return {};
11534
11535 // Regparm is part of the calling convention.
11536 if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm())
11537 return {};
11538 if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm())
11539 return {};
11540
11541 if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult())
11542 return {};
11543 if (lbaseInfo.getNoCallerSavedRegs() != rbaseInfo.getNoCallerSavedRegs())
11544 return {};
11545 if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck())
11546 return {};
11547
11548 // When merging declarations, it's common for supplemental information like
11549 // attributes to only be present in one of the declarations, and we generally
11550 // want type merging to preserve the union of information. So a merged
11551 // function type should be noreturn if it was noreturn in *either* operand
11552 // type.
11553 //
11554 // But for the conditional operator, this is backwards. The result of the
11555 // operator could be either operand, and its type should conservatively
11556 // reflect that. So a function type in a composite type is noreturn only
11557 // if it's noreturn in *both* operand types.
11558 //
11559 // Arguably, noreturn is a kind of subtype, and the conditional operator
11560 // ought to produce the most specific common supertype of its operand types.
11561 // That would differ from this rule in contravariant positions. However,
11562 // neither C nor C++ generally uses this kind of subtype reasoning. Also,
11563 // as a practical matter, it would only affect C code that does abstraction of
11564 // higher-order functions (taking noreturn callbacks!), which is uncommon to
11565 // say the least. So we use the simpler rule.
11566 bool NoReturn = IsConditionalOperator
11567 ? lbaseInfo.getNoReturn() && rbaseInfo.getNoReturn()
11568 : lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn();
11569 if (lbaseInfo.getNoReturn() != NoReturn)
11570 allLTypes = false;
11571 if (rbaseInfo.getNoReturn() != NoReturn)
11572 allRTypes = false;
11573
11574 FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(noReturn: NoReturn);
11575
11576 std::optional<FunctionEffectSet> MergedFX;
11577
11578 if (lproto && rproto) { // two C99 style function prototypes
11579 assert((AllowCXX ||
11580 (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) &&
11581 "C++ shouldn't be here");
11582 // Compatible functions must have the same number of parameters
11583 if (lproto->getNumParams() != rproto->getNumParams())
11584 return {};
11585
11586 // Variadic and non-variadic functions aren't compatible
11587 if (lproto->isVariadic() != rproto->isVariadic())
11588 return {};
11589
11590 if (lproto->getMethodQuals() != rproto->getMethodQuals())
11591 return {};
11592
11593 // Function protos with different 'cfi_salt' values aren't compatible.
11594 if (lproto->getExtraAttributeInfo().CFISalt !=
11595 rproto->getExtraAttributeInfo().CFISalt)
11596 return {};
11597
11598 // Function effects are handled similarly to noreturn, see above.
11599 FunctionEffectsRef LHSFX = lproto->getFunctionEffects();
11600 FunctionEffectsRef RHSFX = rproto->getFunctionEffects();
11601 if (LHSFX != RHSFX) {
11602 if (IsConditionalOperator)
11603 MergedFX = FunctionEffectSet::getIntersection(LHS: LHSFX, RHS: RHSFX);
11604 else {
11605 FunctionEffectSet::Conflicts Errs;
11606 MergedFX = FunctionEffectSet::getUnion(LHS: LHSFX, RHS: RHSFX, Errs);
11607 // Here we're discarding a possible error due to conflicts in the effect
11608 // sets. But we're not in a context where we can report it. The
11609 // operation does however guarantee maintenance of invariants.
11610 }
11611 if (*MergedFX != LHSFX)
11612 allLTypes = false;
11613 if (*MergedFX != RHSFX)
11614 allRTypes = false;
11615 }
11616
11617 SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos;
11618 bool canUseLeft, canUseRight;
11619 if (!mergeExtParameterInfo(FirstFnType: lproto, SecondFnType: rproto, CanUseFirst&: canUseLeft, CanUseSecond&: canUseRight,
11620 NewParamInfos&: newParamInfos))
11621 return {};
11622
11623 if (!canUseLeft)
11624 allLTypes = false;
11625 if (!canUseRight)
11626 allRTypes = false;
11627
11628 // Check parameter type compatibility
11629 SmallVector<QualType, 10> types;
11630 for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) {
11631 QualType lParamType = lproto->getParamType(i).getUnqualifiedType();
11632 QualType rParamType = rproto->getParamType(i).getUnqualifiedType();
11633 QualType paramType = mergeFunctionParameterTypes(
11634 lhs: lParamType, rhs: rParamType, OfBlockPointer, Unqualified);
11635 if (paramType.isNull())
11636 return {};
11637
11638 if (Unqualified)
11639 paramType = paramType.getUnqualifiedType();
11640
11641 types.push_back(Elt: paramType);
11642 if (Unqualified) {
11643 lParamType = lParamType.getUnqualifiedType();
11644 rParamType = rParamType.getUnqualifiedType();
11645 }
11646
11647 if (getCanonicalType(T: paramType) != getCanonicalType(T: lParamType))
11648 allLTypes = false;
11649 if (getCanonicalType(T: paramType) != getCanonicalType(T: rParamType))
11650 allRTypes = false;
11651 }
11652
11653 if (allLTypes) return lhs;
11654 if (allRTypes) return rhs;
11655
11656 FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo();
11657 EPI.ExtInfo = einfo;
11658 EPI.ExtParameterInfos =
11659 newParamInfos.empty() ? nullptr : newParamInfos.data();
11660 if (MergedFX)
11661 EPI.FunctionEffects = *MergedFX;
11662 return getFunctionType(ResultTy: retType, Args: types, EPI);
11663 }
11664
11665 if (lproto) allRTypes = false;
11666 if (rproto) allLTypes = false;
11667
11668 const FunctionProtoType *proto = lproto ? lproto : rproto;
11669 if (proto) {
11670 assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here");
11671 if (proto->isVariadic())
11672 return {};
11673 // Check that the types are compatible with the types that
11674 // would result from default argument promotions (C99 6.7.5.3p15).
11675 // The only types actually affected are promotable integer
11676 // types and floats, which would be passed as a different
11677 // type depending on whether the prototype is visible.
11678 for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) {
11679 QualType paramTy = proto->getParamType(i);
11680
11681 // Look at the converted type of enum types, since that is the type used
11682 // to pass enum values.
11683 if (const auto *ED = paramTy->getAsEnumDecl()) {
11684 paramTy = ED->getIntegerType();
11685 if (paramTy.isNull())
11686 return {};
11687 }
11688
11689 if (isPromotableIntegerType(T: paramTy) ||
11690 getCanonicalType(T: paramTy).getUnqualifiedType() == FloatTy)
11691 return {};
11692 }
11693
11694 if (allLTypes) return lhs;
11695 if (allRTypes) return rhs;
11696
11697 FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo();
11698 EPI.ExtInfo = einfo;
11699 if (MergedFX)
11700 EPI.FunctionEffects = *MergedFX;
11701 return getFunctionType(ResultTy: retType, Args: proto->getParamTypes(), EPI);
11702 }
11703
11704 if (allLTypes) return lhs;
11705 if (allRTypes) return rhs;
11706 return getFunctionNoProtoType(ResultTy: retType, Info: einfo);
11707}
11708
11709/// Given that we have an enum type and a non-enum type, try to merge them.
11710static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET,
11711 QualType other, bool isBlockReturnType) {
11712 // C99 6.7.2.2p4: Each enumerated type shall be compatible with char,
11713 // a signed integer type, or an unsigned integer type.
11714 // Compatibility is based on the underlying type, not the promotion
11715 // type.
11716 QualType underlyingType =
11717 ET->getDecl()->getDefinitionOrSelf()->getIntegerType();
11718 if (underlyingType.isNull())
11719 return {};
11720 if (Context.hasSameType(T1: underlyingType, T2: other))
11721 return other;
11722
11723 // In block return types, we're more permissive and accept any
11724 // integral type of the same size.
11725 if (isBlockReturnType && other->isIntegerType() &&
11726 Context.getTypeSize(T: underlyingType) == Context.getTypeSize(T: other))
11727 return other;
11728
11729 return {};
11730}
11731
11732QualType ASTContext::mergeTagDefinitions(QualType LHS, QualType RHS) {
11733 // C17 and earlier and C++ disallow two tag definitions within the same TU
11734 // from being compatible.
11735 if (LangOpts.CPlusPlus || !LangOpts.C23)
11736 return {};
11737
11738 // Nameless tags are comparable only within outer definitions. At the top
11739 // level they are not comparable.
11740 const TagDecl *LTagD = LHS->castAsTagDecl(), *RTagD = RHS->castAsTagDecl();
11741 if (!LTagD->getIdentifier() || !RTagD->getIdentifier())
11742 return {};
11743
11744 // C23, on the other hand, requires the members to be "the same enough", so
11745 // we use a structural equivalence check.
11746 StructuralEquivalenceContext::NonEquivalentDeclSet NonEquivalentDecls;
11747 StructuralEquivalenceContext Ctx(
11748 getLangOpts(), *this, *this, NonEquivalentDecls,
11749 StructuralEquivalenceKind::Default, /*StrictTypeSpelling=*/false,
11750 /*Complain=*/false, /*ErrorOnTagTypeMismatch=*/true);
11751 return Ctx.IsEquivalent(T1: LHS, T2: RHS) ? LHS : QualType{};
11752}
11753
11754std::optional<QualType> ASTContext::tryMergeOverflowBehaviorTypes(
11755 QualType LHS, QualType RHS, bool OfBlockPointer, bool Unqualified,
11756 bool BlockReturnType, bool IsConditionalOperator) {
11757 const auto *LHSOBT = LHS->getAs<OverflowBehaviorType>();
11758 const auto *RHSOBT = RHS->getAs<OverflowBehaviorType>();
11759
11760 if (!LHSOBT && !RHSOBT)
11761 return std::nullopt;
11762
11763 if (LHSOBT) {
11764 if (RHSOBT) {
11765 if (LHSOBT->getBehaviorKind() != RHSOBT->getBehaviorKind())
11766 return QualType();
11767
11768 QualType MergedUnderlying = mergeTypes(
11769 LHSOBT->getUnderlyingType(), RHSOBT->getUnderlyingType(),
11770 OfBlockPointer, Unqualified, BlockReturnType, IsConditionalOperator);
11771
11772 if (MergedUnderlying.isNull())
11773 return QualType();
11774
11775 if (getCanonicalType(T: LHSOBT) == getCanonicalType(T: RHSOBT)) {
11776 if (LHSOBT->getUnderlyingType() == RHSOBT->getUnderlyingType())
11777 return getCommonSugaredType(X: LHS, Y: RHS);
11778 return getOverflowBehaviorType(
11779 Kind: LHSOBT->getBehaviorKind(),
11780 Underlying: getCanonicalType(T: LHSOBT->getUnderlyingType()));
11781 }
11782
11783 // For different underlying types that successfully merge, wrap the
11784 // merged underlying type with the common overflow behavior
11785 return getOverflowBehaviorType(Kind: LHSOBT->getBehaviorKind(),
11786 Underlying: MergedUnderlying);
11787 }
11788 return mergeTypes(LHSOBT->getUnderlyingType(), RHS, OfBlockPointer,
11789 Unqualified, BlockReturnType, IsConditionalOperator);
11790 }
11791
11792 return mergeTypes(LHS, RHSOBT->getUnderlyingType(), OfBlockPointer,
11793 Unqualified, BlockReturnType, IsConditionalOperator);
11794}
11795
11796QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, bool OfBlockPointer,
11797 bool Unqualified, bool BlockReturnType,
11798 bool IsConditionalOperator) {
11799 // For C++ we will not reach this code with reference types (see below),
11800 // for OpenMP variant call overloading we might.
11801 //
11802 // C++ [expr]: If an expression initially has the type "reference to T", the
11803 // type is adjusted to "T" prior to any further analysis, the expression
11804 // designates the object or function denoted by the reference, and the
11805 // expression is an lvalue unless the reference is an rvalue reference and
11806 // the expression is a function call (possibly inside parentheses).
11807 auto *LHSRefTy = LHS->getAs<ReferenceType>();
11808 auto *RHSRefTy = RHS->getAs<ReferenceType>();
11809 if (LangOpts.OpenMP && LHSRefTy && RHSRefTy &&
11810 LHS->getTypeClass() == RHS->getTypeClass())
11811 return mergeTypes(LHS: LHSRefTy->getPointeeType(), RHS: RHSRefTy->getPointeeType(),
11812 OfBlockPointer, Unqualified, BlockReturnType);
11813 if (LHSRefTy || RHSRefTy)
11814 return {};
11815
11816 if (std::optional<QualType> MergedOBT =
11817 tryMergeOverflowBehaviorTypes(LHS, RHS, OfBlockPointer, Unqualified,
11818 BlockReturnType, IsConditionalOperator))
11819 return *MergedOBT;
11820
11821 if (Unqualified) {
11822 LHS = LHS.getUnqualifiedType();
11823 RHS = RHS.getUnqualifiedType();
11824 }
11825
11826 QualType LHSCan = getCanonicalType(T: LHS),
11827 RHSCan = getCanonicalType(T: RHS);
11828
11829 // If two types are identical, they are compatible.
11830 if (LHSCan == RHSCan)
11831 return LHS;
11832
11833 // If the qualifiers are different, the types aren't compatible... mostly.
11834 Qualifiers LQuals = LHSCan.getLocalQualifiers();
11835 Qualifiers RQuals = RHSCan.getLocalQualifiers();
11836 if (LQuals != RQuals) {
11837 // If any of these qualifiers are different, we have a type
11838 // mismatch.
11839 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() ||
11840 LQuals.getAddressSpace() != RQuals.getAddressSpace() ||
11841 LQuals.getObjCLifetime() != RQuals.getObjCLifetime() ||
11842 !LQuals.getPointerAuth().isEquivalent(Other: RQuals.getPointerAuth()) ||
11843 LQuals.hasUnaligned() != RQuals.hasUnaligned())
11844 return {};
11845
11846 // Exactly one GC qualifier difference is allowed: __strong is
11847 // okay if the other type has no GC qualifier but is an Objective
11848 // C object pointer (i.e. implicitly strong by default). We fix
11849 // this by pretending that the unqualified type was actually
11850 // qualified __strong.
11851 Qualifiers::GC GC_L = LQuals.getObjCGCAttr();
11852 Qualifiers::GC GC_R = RQuals.getObjCGCAttr();
11853 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements");
11854
11855 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak)
11856 return {};
11857
11858 if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) {
11859 return mergeTypes(LHS, RHS: getObjCGCQualType(T: RHS, GCAttr: Qualifiers::Strong));
11860 }
11861 if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) {
11862 return mergeTypes(LHS: getObjCGCQualType(T: LHS, GCAttr: Qualifiers::Strong), RHS);
11863 }
11864 return {};
11865 }
11866
11867 // Okay, qualifiers are equal.
11868
11869 Type::TypeClass LHSClass = LHSCan->getTypeClass();
11870 Type::TypeClass RHSClass = RHSCan->getTypeClass();
11871
11872 // We want to consider the two function types to be the same for these
11873 // comparisons, just force one to the other.
11874 if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto;
11875 if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto;
11876
11877 // Same as above for arrays
11878 if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray)
11879 LHSClass = Type::ConstantArray;
11880 if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray)
11881 RHSClass = Type::ConstantArray;
11882
11883 // ObjCInterfaces are just specialized ObjCObjects.
11884 if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject;
11885 if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject;
11886
11887 // Canonicalize ExtVector -> Vector.
11888 if (LHSClass == Type::ExtVector) LHSClass = Type::Vector;
11889 if (RHSClass == Type::ExtVector) RHSClass = Type::Vector;
11890
11891 // If the canonical type classes don't match.
11892 if (LHSClass != RHSClass) {
11893 // Note that we only have special rules for turning block enum
11894 // returns into block int returns, not vice-versa.
11895 if (const auto *ETy = LHS->getAsCanonical<EnumType>()) {
11896 return mergeEnumWithInteger(Context&: *this, ET: ETy, other: RHS, isBlockReturnType: false);
11897 }
11898 if (const EnumType *ETy = RHS->getAsCanonical<EnumType>()) {
11899 return mergeEnumWithInteger(Context&: *this, ET: ETy, other: LHS, isBlockReturnType: BlockReturnType);
11900 }
11901 // allow block pointer type to match an 'id' type.
11902 if (OfBlockPointer && !BlockReturnType) {
11903 if (LHS->isObjCIdType() && RHS->isBlockPointerType())
11904 return LHS;
11905 if (RHS->isObjCIdType() && LHS->isBlockPointerType())
11906 return RHS;
11907 }
11908 // Allow __auto_type to match anything; it merges to the type with more
11909 // information.
11910 if (const auto *AT = LHS->getAs<AutoType>()) {
11911 if (!AT->isDeduced() && AT->isGNUAutoType())
11912 return RHS;
11913 }
11914 if (const auto *AT = RHS->getAs<AutoType>()) {
11915 if (!AT->isDeduced() && AT->isGNUAutoType())
11916 return LHS;
11917 }
11918 return {};
11919 }
11920
11921 // The canonical type classes match.
11922 switch (LHSClass) {
11923#define TYPE(Class, Base)
11924#define ABSTRACT_TYPE(Class, Base)
11925#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
11926#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
11927#define DEPENDENT_TYPE(Class, Base) case Type::Class:
11928#include "clang/AST/TypeNodes.inc"
11929 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
11930
11931 case Type::Auto:
11932 case Type::DeducedTemplateSpecialization:
11933 case Type::LValueReference:
11934 case Type::RValueReference:
11935 case Type::MemberPointer:
11936 llvm_unreachable("C++ should never be in mergeTypes");
11937
11938 case Type::ObjCInterface:
11939 case Type::IncompleteArray:
11940 case Type::VariableArray:
11941 case Type::FunctionProto:
11942 case Type::ExtVector:
11943 case Type::OverflowBehavior:
11944 llvm_unreachable("Types are eliminated above");
11945
11946 case Type::Pointer:
11947 {
11948 // Merge two pointer types, while trying to preserve typedef info
11949 QualType LHSPointee = LHS->castAs<PointerType>()->getPointeeType();
11950 QualType RHSPointee = RHS->castAs<PointerType>()->getPointeeType();
11951 if (Unqualified) {
11952 LHSPointee = LHSPointee.getUnqualifiedType();
11953 RHSPointee = RHSPointee.getUnqualifiedType();
11954 }
11955 QualType ResultType = mergeTypes(LHS: LHSPointee, RHS: RHSPointee, OfBlockPointer: false,
11956 Unqualified);
11957 if (ResultType.isNull())
11958 return {};
11959 if (getCanonicalType(T: LHSPointee) == getCanonicalType(T: ResultType))
11960 return LHS;
11961 if (getCanonicalType(T: RHSPointee) == getCanonicalType(T: ResultType))
11962 return RHS;
11963 return getPointerType(T: ResultType);
11964 }
11965 case Type::BlockPointer:
11966 {
11967 // Merge two block pointer types, while trying to preserve typedef info
11968 QualType LHSPointee = LHS->castAs<BlockPointerType>()->getPointeeType();
11969 QualType RHSPointee = RHS->castAs<BlockPointerType>()->getPointeeType();
11970 if (Unqualified) {
11971 LHSPointee = LHSPointee.getUnqualifiedType();
11972 RHSPointee = RHSPointee.getUnqualifiedType();
11973 }
11974 if (getLangOpts().OpenCL) {
11975 Qualifiers LHSPteeQual = LHSPointee.getQualifiers();
11976 Qualifiers RHSPteeQual = RHSPointee.getQualifiers();
11977 // Blocks can't be an expression in a ternary operator (OpenCL v2.0
11978 // 6.12.5) thus the following check is asymmetric.
11979 if (!LHSPteeQual.isAddressSpaceSupersetOf(other: RHSPteeQual, Ctx: *this))
11980 return {};
11981 LHSPteeQual.removeAddressSpace();
11982 RHSPteeQual.removeAddressSpace();
11983 LHSPointee =
11984 QualType(LHSPointee.getTypePtr(), LHSPteeQual.getAsOpaqueValue());
11985 RHSPointee =
11986 QualType(RHSPointee.getTypePtr(), RHSPteeQual.getAsOpaqueValue());
11987 }
11988 QualType ResultType = mergeTypes(LHS: LHSPointee, RHS: RHSPointee, OfBlockPointer,
11989 Unqualified);
11990 if (ResultType.isNull())
11991 return {};
11992 if (getCanonicalType(T: LHSPointee) == getCanonicalType(T: ResultType))
11993 return LHS;
11994 if (getCanonicalType(T: RHSPointee) == getCanonicalType(T: ResultType))
11995 return RHS;
11996 return getBlockPointerType(T: ResultType);
11997 }
11998 case Type::Atomic:
11999 {
12000 // Merge two pointer types, while trying to preserve typedef info
12001 QualType LHSValue = LHS->castAs<AtomicType>()->getValueType();
12002 QualType RHSValue = RHS->castAs<AtomicType>()->getValueType();
12003 if (Unqualified) {
12004 LHSValue = LHSValue.getUnqualifiedType();
12005 RHSValue = RHSValue.getUnqualifiedType();
12006 }
12007 QualType ResultType = mergeTypes(LHS: LHSValue, RHS: RHSValue, OfBlockPointer: false,
12008 Unqualified);
12009 if (ResultType.isNull())
12010 return {};
12011 if (getCanonicalType(T: LHSValue) == getCanonicalType(T: ResultType))
12012 return LHS;
12013 if (getCanonicalType(T: RHSValue) == getCanonicalType(T: ResultType))
12014 return RHS;
12015 return getAtomicType(T: ResultType);
12016 }
12017 case Type::ConstantArray:
12018 {
12019 const ConstantArrayType* LCAT = getAsConstantArrayType(T: LHS);
12020 const ConstantArrayType* RCAT = getAsConstantArrayType(T: RHS);
12021 if (LCAT && RCAT && RCAT->getZExtSize() != LCAT->getZExtSize())
12022 return {};
12023
12024 QualType LHSElem = getAsArrayType(T: LHS)->getElementType();
12025 QualType RHSElem = getAsArrayType(T: RHS)->getElementType();
12026 if (Unqualified) {
12027 LHSElem = LHSElem.getUnqualifiedType();
12028 RHSElem = RHSElem.getUnqualifiedType();
12029 }
12030
12031 QualType ResultType = mergeTypes(LHS: LHSElem, RHS: RHSElem, OfBlockPointer: false, Unqualified);
12032 if (ResultType.isNull())
12033 return {};
12034
12035 const VariableArrayType* LVAT = getAsVariableArrayType(T: LHS);
12036 const VariableArrayType* RVAT = getAsVariableArrayType(T: RHS);
12037
12038 // If either side is a variable array, and both are complete, check whether
12039 // the current dimension is definite.
12040 if (LVAT || RVAT) {
12041 auto SizeFetch = [this](const VariableArrayType* VAT,
12042 const ConstantArrayType* CAT)
12043 -> std::pair<bool,llvm::APInt> {
12044 if (VAT) {
12045 std::optional<llvm::APSInt> TheInt;
12046 Expr *E = VAT->getSizeExpr();
12047 if (E && (TheInt = E->getIntegerConstantExpr(Ctx: *this)))
12048 return std::make_pair(x: true, y&: *TheInt);
12049 return std::make_pair(x: false, y: llvm::APSInt());
12050 }
12051 if (CAT)
12052 return std::make_pair(x: true, y: CAT->getSize());
12053 return std::make_pair(x: false, y: llvm::APInt());
12054 };
12055
12056 bool HaveLSize, HaveRSize;
12057 llvm::APInt LSize, RSize;
12058 std::tie(args&: HaveLSize, args&: LSize) = SizeFetch(LVAT, LCAT);
12059 std::tie(args&: HaveRSize, args&: RSize) = SizeFetch(RVAT, RCAT);
12060 if (HaveLSize && HaveRSize && !llvm::APInt::isSameValue(I1: LSize, I2: RSize))
12061 return {}; // Definite, but unequal, array dimension
12062 }
12063
12064 if (LCAT && getCanonicalType(T: LHSElem) == getCanonicalType(T: ResultType))
12065 return LHS;
12066 if (RCAT && getCanonicalType(T: RHSElem) == getCanonicalType(T: ResultType))
12067 return RHS;
12068 if (LCAT)
12069 return getConstantArrayType(EltTy: ResultType, ArySizeIn: LCAT->getSize(),
12070 SizeExpr: LCAT->getSizeExpr(), ASM: ArraySizeModifier(), IndexTypeQuals: 0);
12071 if (RCAT)
12072 return getConstantArrayType(EltTy: ResultType, ArySizeIn: RCAT->getSize(),
12073 SizeExpr: RCAT->getSizeExpr(), ASM: ArraySizeModifier(), IndexTypeQuals: 0);
12074 if (LVAT && getCanonicalType(T: LHSElem) == getCanonicalType(T: ResultType))
12075 return LHS;
12076 if (RVAT && getCanonicalType(T: RHSElem) == getCanonicalType(T: ResultType))
12077 return RHS;
12078 if (LVAT) {
12079 // FIXME: This isn't correct! But tricky to implement because
12080 // the array's size has to be the size of LHS, but the type
12081 // has to be different.
12082 return LHS;
12083 }
12084 if (RVAT) {
12085 // FIXME: This isn't correct! But tricky to implement because
12086 // the array's size has to be the size of RHS, but the type
12087 // has to be different.
12088 return RHS;
12089 }
12090 if (getCanonicalType(T: LHSElem) == getCanonicalType(T: ResultType)) return LHS;
12091 if (getCanonicalType(T: RHSElem) == getCanonicalType(T: ResultType)) return RHS;
12092 return getIncompleteArrayType(elementType: ResultType, ASM: ArraySizeModifier(), elementTypeQuals: 0);
12093 }
12094 case Type::FunctionNoProto:
12095 return mergeFunctionTypes(lhs: LHS, rhs: RHS, OfBlockPointer, Unqualified,
12096 /*AllowCXX=*/false, IsConditionalOperator);
12097 case Type::Record:
12098 case Type::Enum:
12099 return mergeTagDefinitions(LHS, RHS);
12100 case Type::Builtin:
12101 // Only exactly equal builtin types are compatible, which is tested above.
12102 return {};
12103 case Type::Complex:
12104 // Distinct complex types are incompatible.
12105 return {};
12106 case Type::Vector:
12107 // FIXME: The merged type should be an ExtVector!
12108 if (areCompatVectorTypes(LHS: LHSCan->castAs<VectorType>(),
12109 RHS: RHSCan->castAs<VectorType>()))
12110 return LHS;
12111 return {};
12112 case Type::ConstantMatrix:
12113 if (areCompatMatrixTypes(LHS: LHSCan->castAs<ConstantMatrixType>(),
12114 RHS: RHSCan->castAs<ConstantMatrixType>()))
12115 return LHS;
12116 return {};
12117 case Type::ObjCObject: {
12118 // Check if the types are assignment compatible.
12119 // FIXME: This should be type compatibility, e.g. whether
12120 // "LHS x; RHS x;" at global scope is legal.
12121 if (canAssignObjCInterfaces(LHS: LHS->castAs<ObjCObjectType>(),
12122 RHS: RHS->castAs<ObjCObjectType>()))
12123 return LHS;
12124 return {};
12125 }
12126 case Type::ObjCObjectPointer:
12127 if (OfBlockPointer) {
12128 if (canAssignObjCInterfacesInBlockPointer(
12129 LHSOPT: LHS->castAs<ObjCObjectPointerType>(),
12130 RHSOPT: RHS->castAs<ObjCObjectPointerType>(), BlockReturnType))
12131 return LHS;
12132 return {};
12133 }
12134 if (canAssignObjCInterfaces(LHSOPT: LHS->castAs<ObjCObjectPointerType>(),
12135 RHSOPT: RHS->castAs<ObjCObjectPointerType>()))
12136 return LHS;
12137 return {};
12138 case Type::Pipe:
12139 assert(LHS != RHS &&
12140 "Equivalent pipe types should have already been handled!");
12141 return {};
12142 case Type::ArrayParameter:
12143 assert(LHS != RHS &&
12144 "Equivalent ArrayParameter types should have already been handled!");
12145 return {};
12146 case Type::BitInt: {
12147 // Merge two bit-precise int types, while trying to preserve typedef info.
12148 bool LHSUnsigned = LHS->castAs<BitIntType>()->isUnsigned();
12149 bool RHSUnsigned = RHS->castAs<BitIntType>()->isUnsigned();
12150 unsigned LHSBits = LHS->castAs<BitIntType>()->getNumBits();
12151 unsigned RHSBits = RHS->castAs<BitIntType>()->getNumBits();
12152
12153 // Like unsigned/int, shouldn't have a type if they don't match.
12154 if (LHSUnsigned != RHSUnsigned)
12155 return {};
12156
12157 if (LHSBits != RHSBits)
12158 return {};
12159 return LHS;
12160 }
12161 case Type::HLSLAttributedResource: {
12162 const HLSLAttributedResourceType *LHSTy =
12163 LHS->castAs<HLSLAttributedResourceType>();
12164 const HLSLAttributedResourceType *RHSTy =
12165 RHS->castAs<HLSLAttributedResourceType>();
12166 assert(LHSTy->getWrappedType() == RHSTy->getWrappedType() &&
12167 LHSTy->getWrappedType()->isHLSLResourceType() &&
12168 "HLSLAttributedResourceType should always wrap __hlsl_resource_t");
12169
12170 if (LHSTy->getAttrs() == RHSTy->getAttrs() &&
12171 LHSTy->getContainedType() == RHSTy->getContainedType())
12172 return LHS;
12173 return {};
12174 }
12175 case Type::HLSLInlineSpirv:
12176 const HLSLInlineSpirvType *LHSTy = LHS->castAs<HLSLInlineSpirvType>();
12177 const HLSLInlineSpirvType *RHSTy = RHS->castAs<HLSLInlineSpirvType>();
12178
12179 if (LHSTy->getOpcode() == RHSTy->getOpcode() &&
12180 LHSTy->getSize() == RHSTy->getSize() &&
12181 LHSTy->getAlignment() == RHSTy->getAlignment()) {
12182 for (size_t I = 0; I < LHSTy->getOperands().size(); I++)
12183 if (LHSTy->getOperands()[I] != RHSTy->getOperands()[I])
12184 return {};
12185
12186 return LHS;
12187 }
12188 return {};
12189 }
12190
12191 llvm_unreachable("Invalid Type::Class!");
12192}
12193
12194bool ASTContext::mergeExtParameterInfo(
12195 const FunctionProtoType *FirstFnType, const FunctionProtoType *SecondFnType,
12196 bool &CanUseFirst, bool &CanUseSecond,
12197 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &NewParamInfos) {
12198 assert(NewParamInfos.empty() && "param info list not empty");
12199 CanUseFirst = CanUseSecond = true;
12200 bool FirstHasInfo = FirstFnType->hasExtParameterInfos();
12201 bool SecondHasInfo = SecondFnType->hasExtParameterInfos();
12202
12203 // Fast path: if the first type doesn't have ext parameter infos,
12204 // we match if and only if the second type also doesn't have them.
12205 if (!FirstHasInfo && !SecondHasInfo)
12206 return true;
12207
12208 bool NeedParamInfo = false;
12209 size_t E = FirstHasInfo ? FirstFnType->getExtParameterInfos().size()
12210 : SecondFnType->getExtParameterInfos().size();
12211
12212 for (size_t I = 0; I < E; ++I) {
12213 FunctionProtoType::ExtParameterInfo FirstParam, SecondParam;
12214 if (FirstHasInfo)
12215 FirstParam = FirstFnType->getExtParameterInfo(I);
12216 if (SecondHasInfo)
12217 SecondParam = SecondFnType->getExtParameterInfo(I);
12218
12219 // Cannot merge unless everything except the noescape flag matches.
12220 if (FirstParam.withIsNoEscape(NoEscape: false) != SecondParam.withIsNoEscape(NoEscape: false))
12221 return false;
12222
12223 bool FirstNoEscape = FirstParam.isNoEscape();
12224 bool SecondNoEscape = SecondParam.isNoEscape();
12225 bool IsNoEscape = FirstNoEscape && SecondNoEscape;
12226 NewParamInfos.push_back(Elt: FirstParam.withIsNoEscape(NoEscape: IsNoEscape));
12227 if (NewParamInfos.back().getOpaqueValue())
12228 NeedParamInfo = true;
12229 if (FirstNoEscape != IsNoEscape)
12230 CanUseFirst = false;
12231 if (SecondNoEscape != IsNoEscape)
12232 CanUseSecond = false;
12233 }
12234
12235 if (!NeedParamInfo)
12236 NewParamInfos.clear();
12237
12238 return true;
12239}
12240
12241void ASTContext::ResetObjCLayout(const ObjCInterfaceDecl *D) {
12242 if (auto It = ObjCLayouts.find(Val: D); It != ObjCLayouts.end()) {
12243 It->second = nullptr;
12244 for (auto *SubClass : ObjCSubClasses.lookup(Val: D))
12245 ResetObjCLayout(D: SubClass);
12246 }
12247}
12248
12249/// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and
12250/// 'RHS' attributes and returns the merged version; including for function
12251/// return types.
12252QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) {
12253 QualType LHSCan = getCanonicalType(T: LHS),
12254 RHSCan = getCanonicalType(T: RHS);
12255 // If two types are identical, they are compatible.
12256 if (LHSCan == RHSCan)
12257 return LHS;
12258 if (RHSCan->isFunctionType()) {
12259 if (!LHSCan->isFunctionType())
12260 return {};
12261 QualType OldReturnType =
12262 cast<FunctionType>(Val: RHSCan.getTypePtr())->getReturnType();
12263 QualType NewReturnType =
12264 cast<FunctionType>(Val: LHSCan.getTypePtr())->getReturnType();
12265 QualType ResReturnType =
12266 mergeObjCGCQualifiers(LHS: NewReturnType, RHS: OldReturnType);
12267 if (ResReturnType.isNull())
12268 return {};
12269 if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) {
12270 // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo();
12271 // In either case, use OldReturnType to build the new function type.
12272 const auto *F = LHS->castAs<FunctionType>();
12273 if (const auto *FPT = cast<FunctionProtoType>(Val: F)) {
12274 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
12275 EPI.ExtInfo = getFunctionExtInfo(t: LHS);
12276 QualType ResultType =
12277 getFunctionType(ResultTy: OldReturnType, Args: FPT->getParamTypes(), EPI);
12278 return ResultType;
12279 }
12280 }
12281 return {};
12282 }
12283
12284 // If the qualifiers are different, the types can still be merged.
12285 Qualifiers LQuals = LHSCan.getLocalQualifiers();
12286 Qualifiers RQuals = RHSCan.getLocalQualifiers();
12287 if (LQuals != RQuals) {
12288 // If any of these qualifiers are different, we have a type mismatch.
12289 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() ||
12290 LQuals.getAddressSpace() != RQuals.getAddressSpace())
12291 return {};
12292
12293 // Exactly one GC qualifier difference is allowed: __strong is
12294 // okay if the other type has no GC qualifier but is an Objective
12295 // C object pointer (i.e. implicitly strong by default). We fix
12296 // this by pretending that the unqualified type was actually
12297 // qualified __strong.
12298 Qualifiers::GC GC_L = LQuals.getObjCGCAttr();
12299 Qualifiers::GC GC_R = RQuals.getObjCGCAttr();
12300 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements");
12301
12302 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak)
12303 return {};
12304
12305 if (GC_L == Qualifiers::Strong)
12306 return LHS;
12307 if (GC_R == Qualifiers::Strong)
12308 return RHS;
12309 return {};
12310 }
12311
12312 if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) {
12313 QualType LHSBaseQT = LHS->castAs<ObjCObjectPointerType>()->getPointeeType();
12314 QualType RHSBaseQT = RHS->castAs<ObjCObjectPointerType>()->getPointeeType();
12315 QualType ResQT = mergeObjCGCQualifiers(LHS: LHSBaseQT, RHS: RHSBaseQT);
12316 if (ResQT == LHSBaseQT)
12317 return LHS;
12318 if (ResQT == RHSBaseQT)
12319 return RHS;
12320 }
12321 return {};
12322}
12323
12324//===----------------------------------------------------------------------===//
12325// Integer Predicates
12326//===----------------------------------------------------------------------===//
12327
12328unsigned ASTContext::getIntWidth(QualType T) const {
12329 if (const auto *ED = T->getAsEnumDecl())
12330 T = ED->getIntegerType();
12331 if (T->isBooleanType())
12332 return 1;
12333 if (const auto *EIT = T->getAs<BitIntType>())
12334 return EIT->getNumBits();
12335 // For builtin types, just use the standard type sizing method
12336 return (unsigned)getTypeSize(T);
12337}
12338
12339QualType ASTContext::getCorrespondingUnsignedType(QualType T) const {
12340 assert((T->hasIntegerRepresentation() || T->isEnumeralType() ||
12341 T->isFixedPointType()) &&
12342 "Unexpected type");
12343
12344 // Turn <4 x signed int> -> <4 x unsigned int>
12345 if (const auto *VTy = T->getAs<VectorType>())
12346 return getVectorType(vecType: getCorrespondingUnsignedType(T: VTy->getElementType()),
12347 NumElts: VTy->getNumElements(), VecKind: VTy->getVectorKind());
12348
12349 // For _BitInt, return an unsigned _BitInt with same width.
12350 if (const auto *EITy = T->getAs<BitIntType>())
12351 return getBitIntType(/*Unsigned=*/IsUnsigned: true, NumBits: EITy->getNumBits());
12352
12353 // For the overflow behavior types, construct a new unsigned variant
12354 if (const auto *OBT = T->getAs<OverflowBehaviorType>())
12355 return getOverflowBehaviorType(
12356 Kind: OBT->getBehaviorKind(),
12357 Underlying: getCorrespondingUnsignedType(T: OBT->getUnderlyingType()));
12358
12359 // For enums, get the underlying integer type of the enum, and let the general
12360 // integer type signchanging code handle it.
12361 if (const auto *ED = T->getAsEnumDecl())
12362 T = ED->getIntegerType();
12363
12364 switch (T->castAs<BuiltinType>()->getKind()) {
12365 case BuiltinType::Char_U:
12366 // Plain `char` is mapped to `unsigned char` even if it's already unsigned
12367 case BuiltinType::Char_S:
12368 case BuiltinType::SChar:
12369 case BuiltinType::Char8:
12370 return UnsignedCharTy;
12371 case BuiltinType::Short:
12372 return UnsignedShortTy;
12373 case BuiltinType::Int:
12374 return UnsignedIntTy;
12375 case BuiltinType::Long:
12376 return UnsignedLongTy;
12377 case BuiltinType::LongLong:
12378 return UnsignedLongLongTy;
12379 case BuiltinType::Int128:
12380 return UnsignedInt128Ty;
12381 // wchar_t is special. It is either signed or not, but when it's signed,
12382 // there's no matching "unsigned wchar_t". Therefore we return the unsigned
12383 // version of its underlying type instead.
12384 case BuiltinType::WChar_S:
12385 return getUnsignedWCharType();
12386
12387 case BuiltinType::ShortAccum:
12388 return UnsignedShortAccumTy;
12389 case BuiltinType::Accum:
12390 return UnsignedAccumTy;
12391 case BuiltinType::LongAccum:
12392 return UnsignedLongAccumTy;
12393 case BuiltinType::SatShortAccum:
12394 return SatUnsignedShortAccumTy;
12395 case BuiltinType::SatAccum:
12396 return SatUnsignedAccumTy;
12397 case BuiltinType::SatLongAccum:
12398 return SatUnsignedLongAccumTy;
12399 case BuiltinType::ShortFract:
12400 return UnsignedShortFractTy;
12401 case BuiltinType::Fract:
12402 return UnsignedFractTy;
12403 case BuiltinType::LongFract:
12404 return UnsignedLongFractTy;
12405 case BuiltinType::SatShortFract:
12406 return SatUnsignedShortFractTy;
12407 case BuiltinType::SatFract:
12408 return SatUnsignedFractTy;
12409 case BuiltinType::SatLongFract:
12410 return SatUnsignedLongFractTy;
12411 default:
12412 assert((T->hasUnsignedIntegerRepresentation() ||
12413 T->isUnsignedFixedPointType()) &&
12414 "Unexpected signed integer or fixed point type");
12415 return T;
12416 }
12417}
12418
12419QualType ASTContext::getCorrespondingSignedType(QualType T) const {
12420 assert((T->hasIntegerRepresentation() || T->isEnumeralType() ||
12421 T->isFixedPointType()) &&
12422 "Unexpected type");
12423
12424 // Turn <4 x unsigned int> -> <4 x signed int>
12425 if (const auto *VTy = T->getAs<VectorType>())
12426 return getVectorType(vecType: getCorrespondingSignedType(T: VTy->getElementType()),
12427 NumElts: VTy->getNumElements(), VecKind: VTy->getVectorKind());
12428
12429 // For _BitInt, return a signed _BitInt with same width.
12430 if (const auto *EITy = T->getAs<BitIntType>())
12431 return getBitIntType(/*Unsigned=*/IsUnsigned: false, NumBits: EITy->getNumBits());
12432
12433 // For enums, get the underlying integer type of the enum, and let the general
12434 // integer type signchanging code handle it.
12435 if (const auto *ED = T->getAsEnumDecl())
12436 T = ED->getIntegerType();
12437
12438 switch (T->castAs<BuiltinType>()->getKind()) {
12439 case BuiltinType::Char_S:
12440 // Plain `char` is mapped to `signed char` even if it's already signed
12441 case BuiltinType::Char_U:
12442 case BuiltinType::UChar:
12443 case BuiltinType::Char8:
12444 return SignedCharTy;
12445 case BuiltinType::UShort:
12446 return ShortTy;
12447 case BuiltinType::UInt:
12448 return IntTy;
12449 case BuiltinType::ULong:
12450 return LongTy;
12451 case BuiltinType::ULongLong:
12452 return LongLongTy;
12453 case BuiltinType::UInt128:
12454 return Int128Ty;
12455 // wchar_t is special. It is either unsigned or not, but when it's unsigned,
12456 // there's no matching "signed wchar_t". Therefore we return the signed
12457 // version of its underlying type instead.
12458 case BuiltinType::WChar_U:
12459 return getSignedWCharType();
12460
12461 case BuiltinType::UShortAccum:
12462 return ShortAccumTy;
12463 case BuiltinType::UAccum:
12464 return AccumTy;
12465 case BuiltinType::ULongAccum:
12466 return LongAccumTy;
12467 case BuiltinType::SatUShortAccum:
12468 return SatShortAccumTy;
12469 case BuiltinType::SatUAccum:
12470 return SatAccumTy;
12471 case BuiltinType::SatULongAccum:
12472 return SatLongAccumTy;
12473 case BuiltinType::UShortFract:
12474 return ShortFractTy;
12475 case BuiltinType::UFract:
12476 return FractTy;
12477 case BuiltinType::ULongFract:
12478 return LongFractTy;
12479 case BuiltinType::SatUShortFract:
12480 return SatShortFractTy;
12481 case BuiltinType::SatUFract:
12482 return SatFractTy;
12483 case BuiltinType::SatULongFract:
12484 return SatLongFractTy;
12485 default:
12486 assert(
12487 (T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) &&
12488 "Unexpected signed integer or fixed point type");
12489 return T;
12490 }
12491}
12492
12493ASTMutationListener::~ASTMutationListener() = default;
12494
12495void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD,
12496 QualType ReturnType) {}
12497
12498//===----------------------------------------------------------------------===//
12499// Builtin Type Computation
12500//===----------------------------------------------------------------------===//
12501
12502/// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the
12503/// pointer over the consumed characters. This returns the resultant type. If
12504/// AllowTypeModifiers is false then modifier like * are not parsed, just basic
12505/// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of
12506/// a vector of "i*".
12507///
12508/// RequiresICE is filled in on return to indicate whether the value is required
12509/// to be an Integer Constant Expression.
12510static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
12511 ASTContext::GetBuiltinTypeError &Error,
12512 bool &RequiresICE,
12513 bool AllowTypeModifiers) {
12514 // Modifiers.
12515 int HowLong = 0;
12516 bool Signed = false, Unsigned = false;
12517 RequiresICE = false;
12518
12519 // Read the prefixed modifiers first.
12520 bool Done = false;
12521 #ifndef NDEBUG
12522 bool IsSpecial = false;
12523 #endif
12524 while (!Done) {
12525 switch (*Str++) {
12526 default: Done = true; --Str; break;
12527 case 'I':
12528 RequiresICE = true;
12529 break;
12530 case 'S':
12531 assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!");
12532 assert(!Signed && "Can't use 'S' modifier multiple times!");
12533 Signed = true;
12534 break;
12535 case 'U':
12536 assert(!Signed && "Can't use both 'S' and 'U' modifiers!");
12537 assert(!Unsigned && "Can't use 'U' modifier multiple times!");
12538 Unsigned = true;
12539 break;
12540 case 'L':
12541 assert(!IsSpecial && "Can't use 'L' with 'W', 'N', 'Z' or 'O' modifiers");
12542 assert(HowLong <= 2 && "Can't have LLLL modifier");
12543 ++HowLong;
12544 break;
12545 case 'N':
12546 // 'N' behaves like 'L' for all non LP64 targets and 'int' otherwise.
12547 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
12548 assert(HowLong == 0 && "Can't use both 'L' and 'N' modifiers!");
12549 #ifndef NDEBUG
12550 IsSpecial = true;
12551 #endif
12552 if (Context.getTargetInfo().getLongWidth() == 32)
12553 ++HowLong;
12554 break;
12555 case 'W':
12556 // This modifier represents int64 type.
12557 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
12558 assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!");
12559 #ifndef NDEBUG
12560 IsSpecial = true;
12561 #endif
12562 switch (Context.getTargetInfo().getInt64Type()) {
12563 default:
12564 llvm_unreachable("Unexpected integer type");
12565 case TargetInfo::SignedLong:
12566 HowLong = 1;
12567 break;
12568 case TargetInfo::SignedLongLong:
12569 HowLong = 2;
12570 break;
12571 }
12572 break;
12573 case 'Z':
12574 // This modifier represents int32 type.
12575 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
12576 assert(HowLong == 0 && "Can't use both 'L' and 'Z' modifiers!");
12577 #ifndef NDEBUG
12578 IsSpecial = true;
12579 #endif
12580 switch (Context.getTargetInfo().getIntTypeByWidth(BitWidth: 32, IsSigned: true)) {
12581 default:
12582 llvm_unreachable("Unexpected integer type");
12583 case TargetInfo::SignedInt:
12584 HowLong = 0;
12585 break;
12586 case TargetInfo::SignedLong:
12587 HowLong = 1;
12588 break;
12589 case TargetInfo::SignedLongLong:
12590 HowLong = 2;
12591 break;
12592 }
12593 break;
12594 case 'O':
12595 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
12596 assert(HowLong == 0 && "Can't use both 'L' and 'O' modifiers!");
12597 #ifndef NDEBUG
12598 IsSpecial = true;
12599 #endif
12600 if (Context.getLangOpts().OpenCL)
12601 HowLong = 1;
12602 else
12603 HowLong = 2;
12604 break;
12605 }
12606 }
12607
12608 QualType Type;
12609
12610 // Read the base type.
12611 switch (*Str++) {
12612 default:
12613 llvm_unreachable("Unknown builtin type letter!");
12614 case 'x':
12615 assert(HowLong == 0 && !Signed && !Unsigned &&
12616 "Bad modifiers used with 'x'!");
12617 Type = Context.Float16Ty;
12618 break;
12619 case 'y':
12620 assert(HowLong == 0 && !Signed && !Unsigned &&
12621 "Bad modifiers used with 'y'!");
12622 Type = Context.BFloat16Ty;
12623 break;
12624 case 'v':
12625 assert(HowLong == 0 && !Signed && !Unsigned &&
12626 "Bad modifiers used with 'v'!");
12627 Type = Context.VoidTy;
12628 break;
12629 case 'h':
12630 assert(HowLong == 0 && !Signed && !Unsigned &&
12631 "Bad modifiers used with 'h'!");
12632 Type = Context.HalfTy;
12633 break;
12634 case 'f':
12635 assert(HowLong == 0 && !Signed && !Unsigned &&
12636 "Bad modifiers used with 'f'!");
12637 Type = Context.FloatTy;
12638 break;
12639 case 'd':
12640 assert(HowLong < 3 && !Signed && !Unsigned &&
12641 "Bad modifiers used with 'd'!");
12642 if (HowLong == 1)
12643 Type = Context.LongDoubleTy;
12644 else if (HowLong == 2)
12645 Type = Context.Float128Ty;
12646 else
12647 Type = Context.DoubleTy;
12648 break;
12649 case 's':
12650 assert(HowLong == 0 && "Bad modifiers used with 's'!");
12651 if (Unsigned)
12652 Type = Context.UnsignedShortTy;
12653 else
12654 Type = Context.ShortTy;
12655 break;
12656 case 'i':
12657 if (HowLong == 3)
12658 Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty;
12659 else if (HowLong == 2)
12660 Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy;
12661 else if (HowLong == 1)
12662 Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy;
12663 else
12664 Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy;
12665 break;
12666 case 'c':
12667 assert(HowLong == 0 && "Bad modifiers used with 'c'!");
12668 if (Signed)
12669 Type = Context.SignedCharTy;
12670 else if (Unsigned)
12671 Type = Context.UnsignedCharTy;
12672 else
12673 Type = Context.CharTy;
12674 break;
12675 case 'b': // boolean
12676 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!");
12677 Type = Context.BoolTy;
12678 break;
12679 case 'z': // size_t.
12680 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!");
12681 Type = Context.getSizeType();
12682 break;
12683 case 'w': // wchar_t.
12684 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'w'!");
12685 Type = Context.getWideCharType();
12686 break;
12687 case 'F':
12688 Type = Context.getCFConstantStringType();
12689 break;
12690 case 'G':
12691 Type = Context.getObjCIdType();
12692 break;
12693 case 'H':
12694 Type = Context.getObjCSelType();
12695 break;
12696 case 'M':
12697 Type = Context.getObjCSuperType();
12698 break;
12699 case 'a':
12700 Type = Context.getBuiltinVaListType();
12701 assert(!Type.isNull() && "builtin va list type not initialized!");
12702 break;
12703 case 'A':
12704 // This is a "reference" to a va_list; however, what exactly
12705 // this means depends on how va_list is defined. There are two
12706 // different kinds of va_list: ones passed by value, and ones
12707 // passed by reference. An example of a by-value va_list is
12708 // x86, where va_list is a char*. An example of by-ref va_list
12709 // is x86-64, where va_list is a __va_list_tag[1]. For x86,
12710 // we want this argument to be a char*&; for x86-64, we want
12711 // it to be a __va_list_tag*.
12712 Type = Context.getBuiltinVaListType();
12713 assert(!Type.isNull() && "builtin va list type not initialized!");
12714 if (Type->isArrayType())
12715 Type = Context.getArrayDecayedType(Ty: Type);
12716 else
12717 Type = Context.getLValueReferenceType(T: Type);
12718 break;
12719 case 'q': {
12720 char *End;
12721 unsigned NumElements = strtoul(nptr: Str, endptr: &End, base: 10);
12722 assert(End != Str && "Missing vector size");
12723 Str = End;
12724
12725 QualType ElementType = DecodeTypeFromStr(Str, Context, Error,
12726 RequiresICE, AllowTypeModifiers: false);
12727 assert(!RequiresICE && "Can't require vector ICE");
12728
12729 Type = Context.getScalableVectorType(EltTy: ElementType, NumElts: NumElements);
12730 break;
12731 }
12732 case 'Q': {
12733 switch (*Str++) {
12734 case 'a': {
12735 Type = Context.SveCountTy;
12736 break;
12737 }
12738 case 'b': {
12739 Type = Context.AMDGPUBufferRsrcTy;
12740 break;
12741 }
12742 case 't': {
12743 Type = Context.AMDGPUTextureTy;
12744 break;
12745 }
12746 case 'r': {
12747 Type = Context.HLSLResourceTy;
12748 break;
12749 }
12750 default:
12751 llvm_unreachable("Unexpected target builtin type");
12752 }
12753 break;
12754 }
12755 case 'V': {
12756 char *End;
12757 unsigned NumElements = strtoul(nptr: Str, endptr: &End, base: 10);
12758 assert(End != Str && "Missing vector size");
12759 Str = End;
12760
12761 QualType ElementType = DecodeTypeFromStr(Str, Context, Error,
12762 RequiresICE, AllowTypeModifiers: false);
12763 assert(!RequiresICE && "Can't require vector ICE");
12764
12765 // TODO: No way to make AltiVec vectors in builtins yet.
12766 Type = Context.getVectorType(vecType: ElementType, NumElts: NumElements, VecKind: VectorKind::Generic);
12767 break;
12768 }
12769 case 'E': {
12770 char *End;
12771
12772 unsigned NumElements = strtoul(nptr: Str, endptr: &End, base: 10);
12773 assert(End != Str && "Missing vector size");
12774
12775 Str = End;
12776
12777 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE,
12778 AllowTypeModifiers: false);
12779 Type = Context.getExtVectorType(vecType: ElementType, NumElts: NumElements);
12780 break;
12781 }
12782 case 'X': {
12783 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE,
12784 AllowTypeModifiers: false);
12785 assert(!RequiresICE && "Can't require complex ICE");
12786 Type = Context.getComplexType(T: ElementType);
12787 break;
12788 }
12789 case 'Y':
12790 Type = Context.getPointerDiffType();
12791 break;
12792 case 'P':
12793 Type = Context.getFILEType();
12794 if (Type.isNull()) {
12795 Error = ASTContext::GE_Missing_stdio;
12796 return {};
12797 }
12798 break;
12799 case 'J':
12800 if (Signed)
12801 Type = Context.getsigjmp_bufType();
12802 else
12803 Type = Context.getjmp_bufType();
12804
12805 if (Type.isNull()) {
12806 Error = ASTContext::GE_Missing_setjmp;
12807 return {};
12808 }
12809 break;
12810 case 'K':
12811 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!");
12812 Type = Context.getucontext_tType();
12813
12814 if (Type.isNull()) {
12815 Error = ASTContext::GE_Missing_ucontext;
12816 return {};
12817 }
12818 break;
12819 case 'p':
12820 Type = Context.getProcessIDType();
12821 break;
12822 case 'm':
12823 Type = Context.MFloat8Ty;
12824 break;
12825 }
12826
12827 // If there are modifiers and if we're allowed to parse them, go for it.
12828 Done = !AllowTypeModifiers;
12829 while (!Done) {
12830 switch (char c = *Str++) {
12831 default: Done = true; --Str; break;
12832 case '*':
12833 case '&': {
12834 // Both pointers and references can have their pointee types
12835 // qualified with an address space.
12836 char *End;
12837 unsigned AddrSpace = strtoul(nptr: Str, endptr: &End, base: 10);
12838 if (End != Str) {
12839 // Note AddrSpace == 0 is not the same as an unspecified address space.
12840 Type = Context.getAddrSpaceQualType(
12841 T: Type,
12842 AddressSpace: Context.getLangASForBuiltinAddressSpace(AS: AddrSpace));
12843 Str = End;
12844 }
12845 if (c == '*')
12846 Type = Context.getPointerType(T: Type);
12847 else
12848 Type = Context.getLValueReferenceType(T: Type);
12849 break;
12850 }
12851 // FIXME: There's no way to have a built-in with an rvalue ref arg.
12852 case 'C':
12853 Type = Type.withConst();
12854 break;
12855 case 'D':
12856 Type = Context.getVolatileType(T: Type);
12857 break;
12858 case 'R':
12859 Type = Type.withRestrict();
12860 break;
12861 }
12862 }
12863
12864 assert((!RequiresICE || Type->isIntegralOrEnumerationType()) &&
12865 "Integer constant 'I' type must be an integer");
12866
12867 return Type;
12868}
12869
12870// On some targets such as PowerPC, some of the builtins are defined with custom
12871// type descriptors for target-dependent types. These descriptors are decoded in
12872// other functions, but it may be useful to be able to fall back to default
12873// descriptor decoding to define builtins mixing target-dependent and target-
12874// independent types. This function allows decoding one type descriptor with
12875// default decoding.
12876QualType ASTContext::DecodeTypeStr(const char *&Str, const ASTContext &Context,
12877 GetBuiltinTypeError &Error, bool &RequireICE,
12878 bool AllowTypeModifiers) const {
12879 return DecodeTypeFromStr(Str, Context, Error, RequiresICE&: RequireICE, AllowTypeModifiers);
12880}
12881
12882/// GetBuiltinType - Return the type for the specified builtin.
12883QualType ASTContext::GetBuiltinType(unsigned Id,
12884 GetBuiltinTypeError &Error,
12885 unsigned *IntegerConstantArgs) const {
12886 const char *TypeStr = BuiltinInfo.getTypeString(ID: Id);
12887 if (TypeStr[0] == '\0') {
12888 Error = GE_Missing_type;
12889 return {};
12890 }
12891
12892 SmallVector<QualType, 8> ArgTypes;
12893
12894 bool RequiresICE = false;
12895 Error = GE_None;
12896 QualType ResType = DecodeTypeFromStr(Str&: TypeStr, Context: *this, Error,
12897 RequiresICE, AllowTypeModifiers: true);
12898 if (Error != GE_None)
12899 return {};
12900
12901 assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE");
12902
12903 while (TypeStr[0] && TypeStr[0] != '.') {
12904 QualType Ty = DecodeTypeFromStr(Str&: TypeStr, Context: *this, Error, RequiresICE, AllowTypeModifiers: true);
12905 if (Error != GE_None)
12906 return {};
12907
12908 // If this argument is required to be an IntegerConstantExpression and the
12909 // caller cares, fill in the bitmask we return.
12910 if (RequiresICE && IntegerConstantArgs)
12911 *IntegerConstantArgs |= 1 << ArgTypes.size();
12912
12913 // Do array -> pointer decay. The builtin should use the decayed type.
12914 if (Ty->isArrayType())
12915 Ty = getArrayDecayedType(Ty);
12916
12917 ArgTypes.push_back(Elt: Ty);
12918 }
12919
12920 if (Id == Builtin::BI__GetExceptionInfo)
12921 return {};
12922
12923 assert((TypeStr[0] != '.' || TypeStr[1] == 0) &&
12924 "'.' should only occur at end of builtin type list!");
12925
12926 bool Variadic = (TypeStr[0] == '.');
12927
12928 FunctionType::ExtInfo EI(Target->getDefaultCallingConv());
12929 if (BuiltinInfo.isNoReturn(ID: Id))
12930 EI = EI.withNoReturn(noReturn: true);
12931
12932 // We really shouldn't be making a no-proto type here.
12933 if (ArgTypes.empty() && Variadic && !getLangOpts().requiresStrictPrototypes())
12934 return getFunctionNoProtoType(ResultTy: ResType, Info: EI);
12935
12936 FunctionProtoType::ExtProtoInfo EPI;
12937 EPI.ExtInfo = EI;
12938 EPI.Variadic = Variadic;
12939 if (getLangOpts().CPlusPlus && BuiltinInfo.isNoThrow(ID: Id))
12940 EPI.ExceptionSpec.Type =
12941 getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone;
12942
12943 return getFunctionType(ResultTy: ResType, Args: ArgTypes, EPI);
12944}
12945
12946static GVALinkage basicGVALinkageForFunction(const ASTContext &Context,
12947 const FunctionDecl *FD) {
12948 if (!FD->isExternallyVisible())
12949 return GVA_Internal;
12950
12951 // Non-user-provided functions get emitted as weak definitions with every
12952 // use, no matter whether they've been explicitly instantiated etc.
12953 if (!FD->isUserProvided())
12954 return GVA_DiscardableODR;
12955
12956 GVALinkage External;
12957 switch (FD->getTemplateSpecializationKind()) {
12958 case TSK_Undeclared:
12959 case TSK_ExplicitSpecialization:
12960 External = GVA_StrongExternal;
12961 break;
12962
12963 case TSK_ExplicitInstantiationDefinition:
12964 return GVA_StrongODR;
12965
12966 // C++11 [temp.explicit]p10:
12967 // [ Note: The intent is that an inline function that is the subject of
12968 // an explicit instantiation declaration will still be implicitly
12969 // instantiated when used so that the body can be considered for
12970 // inlining, but that no out-of-line copy of the inline function would be
12971 // generated in the translation unit. -- end note ]
12972 case TSK_ExplicitInstantiationDeclaration:
12973 return GVA_AvailableExternally;
12974
12975 case TSK_ImplicitInstantiation:
12976 External = GVA_DiscardableODR;
12977 break;
12978 }
12979
12980 if (!FD->isInlined())
12981 return External;
12982
12983 if ((!Context.getLangOpts().CPlusPlus &&
12984 !Context.getTargetInfo().getCXXABI().isMicrosoft() &&
12985 !FD->hasAttr<DLLExportAttr>()) ||
12986 FD->hasAttr<GNUInlineAttr>()) {
12987 // FIXME: This doesn't match gcc's behavior for dllexport inline functions.
12988
12989 // GNU or C99 inline semantics. Determine whether this symbol should be
12990 // externally visible.
12991 if (FD->isInlineDefinitionExternallyVisible())
12992 return External;
12993
12994 // C99 inline semantics, where the symbol is not externally visible.
12995 return GVA_AvailableExternally;
12996 }
12997
12998 // Functions specified with extern and inline in -fms-compatibility mode
12999 // forcibly get emitted. While the body of the function cannot be later
13000 // replaced, the function definition cannot be discarded.
13001 if (FD->isMSExternInline())
13002 return GVA_StrongODR;
13003
13004 if (Context.getTargetInfo().getCXXABI().isMicrosoft() &&
13005 isa<CXXConstructorDecl>(Val: FD) &&
13006 cast<CXXConstructorDecl>(Val: FD)->isInheritingConstructor() &&
13007 !FD->hasAttr<DLLExportAttr>()) {
13008 // Both Clang and MSVC implement inherited constructors as forwarding
13009 // thunks that delegate to the base constructor. Keep non-dllexport
13010 // inheriting constructor thunks internal since they are not needed
13011 // outside the translation unit.
13012 //
13013 // dllexport inherited constructors are exempted so they are externally
13014 // visible, matching MSVC's export behavior. Inherited constructors
13015 // whose parameters prevent ABI-compatible forwarding (e.g. callee-
13016 // cleanup types) are excluded from export in Sema to avoid silent
13017 // runtime mismatches.
13018 return GVA_Internal;
13019 }
13020
13021 return GVA_DiscardableODR;
13022}
13023
13024static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context,
13025 const Decl *D, GVALinkage L) {
13026 // See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx
13027 // dllexport/dllimport on inline functions.
13028 if (D->hasAttr<DLLImportAttr>()) {
13029 if (L == GVA_DiscardableODR || L == GVA_StrongODR)
13030 return GVA_AvailableExternally;
13031 } else if (D->hasAttr<DLLExportAttr>()) {
13032 if (L == GVA_DiscardableODR)
13033 return GVA_StrongODR;
13034 } else if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice) {
13035 // Device-side functions with __global__ attribute must always be
13036 // visible externally so they can be launched from host.
13037 if (D->hasAttr<CUDAGlobalAttr>() &&
13038 (L == GVA_DiscardableODR || L == GVA_Internal))
13039 return GVA_StrongODR;
13040 // Single source offloading languages like CUDA/HIP need to be able to
13041 // access static device variables from host code of the same compilation
13042 // unit. This is done by externalizing the static variable with a shared
13043 // name between the host and device compilation which is the same for the
13044 // same compilation unit whereas different among different compilation
13045 // units.
13046 if (Context.shouldExternalize(D))
13047 return GVA_StrongExternal;
13048 }
13049 return L;
13050}
13051
13052/// Adjust the GVALinkage for a declaration based on what an external AST source
13053/// knows about whether there can be other definitions of this declaration.
13054static GVALinkage
13055adjustGVALinkageForExternalDefinitionKind(const ASTContext &Ctx, const Decl *D,
13056 GVALinkage L) {
13057 ExternalASTSource *Source = Ctx.getExternalSource();
13058 if (!Source)
13059 return L;
13060
13061 switch (Source->hasExternalDefinitions(D)) {
13062 case ExternalASTSource::EK_Never:
13063 // Other translation units rely on us to provide the definition.
13064 if (L == GVA_DiscardableODR)
13065 return GVA_StrongODR;
13066 break;
13067
13068 case ExternalASTSource::EK_Always:
13069 return GVA_AvailableExternally;
13070
13071 case ExternalASTSource::EK_ReplyHazy:
13072 break;
13073 }
13074 return L;
13075}
13076
13077GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const {
13078 return adjustGVALinkageForExternalDefinitionKind(Ctx: *this, D: FD,
13079 L: adjustGVALinkageForAttributes(Context: *this, D: FD,
13080 L: basicGVALinkageForFunction(Context: *this, FD)));
13081}
13082
13083static GVALinkage basicGVALinkageForVariable(const ASTContext &Context,
13084 const VarDecl *VD) {
13085 // As an extension for interactive REPLs, make sure constant variables are
13086 // only emitted once instead of LinkageComputer::getLVForNamespaceScopeDecl
13087 // marking them as internal.
13088 if (Context.getLangOpts().CPlusPlus &&
13089 Context.getLangOpts().IncrementalExtensions &&
13090 VD->getType().isConstQualified() &&
13091 !VD->getType().isVolatileQualified() && !VD->isInline() &&
13092 !isa<VarTemplateSpecializationDecl>(Val: VD) && !VD->getDescribedVarTemplate())
13093 return GVA_DiscardableODR;
13094
13095 if (!VD->isExternallyVisible())
13096 return GVA_Internal;
13097
13098 if (VD->isStaticLocal()) {
13099 const DeclContext *LexicalContext = VD->getParentFunctionOrMethod();
13100 while (LexicalContext && !isa<FunctionDecl>(Val: LexicalContext))
13101 LexicalContext = LexicalContext->getLexicalParent();
13102
13103 // ObjC Blocks can create local variables that don't have a FunctionDecl
13104 // LexicalContext.
13105 if (!LexicalContext)
13106 return GVA_DiscardableODR;
13107
13108 // Otherwise, let the static local variable inherit its linkage from the
13109 // nearest enclosing function.
13110 auto StaticLocalLinkage =
13111 Context.GetGVALinkageForFunction(FD: cast<FunctionDecl>(Val: LexicalContext));
13112
13113 // Itanium ABI 5.2.2: "Each COMDAT group [for a static local variable] must
13114 // be emitted in any object with references to the symbol for the object it
13115 // contains, whether inline or out-of-line."
13116 // Similar behavior is observed with MSVC. An alternative ABI could use
13117 // StrongODR/AvailableExternally to match the function, but none are
13118 // known/supported currently.
13119 if (StaticLocalLinkage == GVA_StrongODR ||
13120 StaticLocalLinkage == GVA_AvailableExternally)
13121 return GVA_DiscardableODR;
13122 return StaticLocalLinkage;
13123 }
13124
13125 // MSVC treats in-class initialized static data members as definitions.
13126 // By giving them non-strong linkage, out-of-line definitions won't
13127 // cause link errors.
13128 if (Context.isMSStaticDataMemberInlineDefinition(VD))
13129 return GVA_DiscardableODR;
13130
13131 // Most non-template variables have strong linkage; inline variables are
13132 // linkonce_odr or (occasionally, for compatibility) weak_odr.
13133 GVALinkage StrongLinkage;
13134 switch (Context.getInlineVariableDefinitionKind(VD)) {
13135 case ASTContext::InlineVariableDefinitionKind::None:
13136 StrongLinkage = GVA_StrongExternal;
13137 break;
13138 case ASTContext::InlineVariableDefinitionKind::Weak:
13139 case ASTContext::InlineVariableDefinitionKind::WeakUnknown:
13140 StrongLinkage = GVA_DiscardableODR;
13141 break;
13142 case ASTContext::InlineVariableDefinitionKind::Strong:
13143 StrongLinkage = GVA_StrongODR;
13144 break;
13145 }
13146
13147 switch (VD->getTemplateSpecializationKind()) {
13148 case TSK_Undeclared:
13149 return StrongLinkage;
13150
13151 case TSK_ExplicitSpecialization:
13152 return Context.getTargetInfo().getCXXABI().isMicrosoft() &&
13153 VD->isStaticDataMember()
13154 ? GVA_StrongODR
13155 : StrongLinkage;
13156
13157 case TSK_ExplicitInstantiationDefinition:
13158 return GVA_StrongODR;
13159
13160 case TSK_ExplicitInstantiationDeclaration:
13161 return GVA_AvailableExternally;
13162
13163 case TSK_ImplicitInstantiation:
13164 return GVA_DiscardableODR;
13165 }
13166
13167 llvm_unreachable("Invalid Linkage!");
13168}
13169
13170GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) const {
13171 return adjustGVALinkageForExternalDefinitionKind(Ctx: *this, D: VD,
13172 L: adjustGVALinkageForAttributes(Context: *this, D: VD,
13173 L: basicGVALinkageForVariable(Context: *this, VD)));
13174}
13175
13176bool ASTContext::DeclMustBeEmitted(const Decl *D) {
13177 if (const auto *VD = dyn_cast<VarDecl>(Val: D)) {
13178 if (!VD->isFileVarDecl())
13179 return false;
13180 // Global named register variables (GNU extension) are never emitted.
13181 if (VD->getStorageClass() == SC_Register)
13182 return false;
13183 if (VD->getDescribedVarTemplate() ||
13184 isa<VarTemplatePartialSpecializationDecl>(Val: VD))
13185 return false;
13186 } else if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
13187 // We never need to emit an uninstantiated function template.
13188 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate)
13189 return false;
13190 } else if (isa<PragmaCommentDecl>(Val: D))
13191 return true;
13192 else if (isa<PragmaDetectMismatchDecl>(Val: D))
13193 return true;
13194 else if (isa<OMPRequiresDecl>(Val: D))
13195 return true;
13196 else if (isa<OMPThreadPrivateDecl>(Val: D))
13197 return !D->getDeclContext()->isDependentContext();
13198 else if (isa<OMPAllocateDecl>(Val: D))
13199 return !D->getDeclContext()->isDependentContext();
13200 else if (isa<OMPDeclareReductionDecl>(Val: D) || isa<OMPDeclareMapperDecl>(Val: D))
13201 return !D->getDeclContext()->isDependentContext();
13202 else if (isa<ImportDecl>(Val: D))
13203 return true;
13204 else
13205 return false;
13206
13207 // If this is a member of a class template, we do not need to emit it.
13208 if (D->getDeclContext()->isDependentContext())
13209 return false;
13210
13211 // Weak references don't produce any output by themselves.
13212 if (D->hasAttr<WeakRefAttr>())
13213 return false;
13214
13215 // SYCL device compilation requires that functions defined with the
13216 // sycl_kernel_entry_point or sycl_external attributes be emitted. All
13217 // other entities are emitted only if they are used by a function
13218 // defined with one of those attributes.
13219 if (LangOpts.SYCLIsDevice)
13220 return isa<FunctionDecl>(Val: D) && (D->hasAttr<SYCLKernelEntryPointAttr>() ||
13221 D->hasAttr<SYCLExternalAttr>());
13222
13223 // Aliases and used decls are required.
13224 if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>())
13225 return true;
13226
13227 if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
13228 // Forward declarations aren't required.
13229 if (!FD->doesThisDeclarationHaveABody())
13230 return FD->doesDeclarationForceExternallyVisibleDefinition();
13231
13232 // Constructors and destructors are required.
13233 if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>())
13234 return true;
13235
13236 // The key function for a class is required. This rule only comes
13237 // into play when inline functions can be key functions, though.
13238 if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) {
13239 if (const auto *MD = dyn_cast<CXXMethodDecl>(Val: FD)) {
13240 const CXXRecordDecl *RD = MD->getParent();
13241 if (MD->isOutOfLine() && RD->isDynamicClass()) {
13242 const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD);
13243 if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl())
13244 return true;
13245 }
13246 }
13247 }
13248
13249 GVALinkage Linkage = GetGVALinkageForFunction(FD);
13250
13251 // static, static inline, always_inline, and extern inline functions can
13252 // always be deferred. Normal inline functions can be deferred in C99/C++.
13253 // Implicit template instantiations can also be deferred in C++.
13254 return !isDiscardableGVALinkage(L: Linkage);
13255 }
13256
13257 const auto *VD = cast<VarDecl>(Val: D);
13258 assert(VD->isFileVarDecl() && "Expected file scoped var");
13259
13260 // If the decl is marked as `declare target to`, it should be emitted for the
13261 // host and for the device.
13262 if (LangOpts.OpenMP &&
13263 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
13264 return true;
13265
13266 if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly &&
13267 !isMSStaticDataMemberInlineDefinition(VD))
13268 return false;
13269
13270 if (VD->shouldEmitInExternalSource())
13271 return false;
13272
13273 // Variables that can be needed in other TUs are required.
13274 auto Linkage = GetGVALinkageForVariable(VD);
13275 if (!isDiscardableGVALinkage(L: Linkage))
13276 return true;
13277
13278 // We never need to emit a variable that is available in another TU.
13279 if (Linkage == GVA_AvailableExternally)
13280 return false;
13281
13282 // Variables that have destruction with side-effects are required.
13283 if (VD->needsDestruction(Ctx: *this))
13284 return true;
13285
13286 // Variables that have initialization with side-effects are required.
13287 if (VD->hasInitWithSideEffects())
13288 return true;
13289
13290 // Likewise, variables with tuple-like bindings are required if their
13291 // bindings have side-effects.
13292 if (const auto *DD = dyn_cast<DecompositionDecl>(Val: VD)) {
13293 for (const auto *BD : DD->flat_bindings())
13294 if (const auto *BindingVD = BD->getHoldingVar())
13295 if (DeclMustBeEmitted(D: BindingVD))
13296 return true;
13297 }
13298
13299 return false;
13300}
13301
13302void ASTContext::forEachMultiversionedFunctionVersion(
13303 const FunctionDecl *FD,
13304 llvm::function_ref<void(FunctionDecl *)> Pred) const {
13305 assert(FD->isMultiVersion() && "Only valid for multiversioned functions");
13306 llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls;
13307 FD = FD->getMostRecentDecl();
13308 // FIXME: The order of traversal here matters and depends on the order of
13309 // lookup results, which happens to be (mostly) oldest-to-newest, but we
13310 // shouldn't rely on that.
13311 for (auto *CurDecl :
13312 FD->getDeclContext()->getRedeclContext()->lookup(Name: FD->getDeclName())) {
13313 FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl();
13314 if (CurFD && hasSameType(T1: CurFD->getType(), T2: FD->getType()) &&
13315 SeenDecls.insert(V: CurFD).second) {
13316 Pred(CurFD);
13317 }
13318 }
13319}
13320
13321CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic,
13322 bool IsCXXMethod) const {
13323 // Pass through to the C++ ABI object
13324 if (IsCXXMethod)
13325 return ABI->getDefaultMethodCallConv(isVariadic: IsVariadic);
13326
13327 switch (LangOpts.getDefaultCallingConv()) {
13328 case LangOptions::DCC_None:
13329 break;
13330 case LangOptions::DCC_CDecl:
13331 return CC_C;
13332 case LangOptions::DCC_FastCall:
13333 if (getTargetInfo().hasFeature(Feature: "sse2") && !IsVariadic)
13334 return CC_X86FastCall;
13335 break;
13336 case LangOptions::DCC_StdCall:
13337 if (!IsVariadic)
13338 return CC_X86StdCall;
13339 break;
13340 case LangOptions::DCC_VectorCall:
13341 // __vectorcall cannot be applied to variadic functions.
13342 if (!IsVariadic)
13343 return CC_X86VectorCall;
13344 break;
13345 case LangOptions::DCC_RegCall:
13346 // __regcall cannot be applied to variadic functions.
13347 if (!IsVariadic)
13348 return CC_X86RegCall;
13349 break;
13350 case LangOptions::DCC_RtdCall:
13351 if (!IsVariadic)
13352 return CC_M68kRTD;
13353 break;
13354 }
13355 return Target->getDefaultCallingConv();
13356}
13357
13358bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const {
13359 // Pass through to the C++ ABI object
13360 return ABI->isNearlyEmpty(RD);
13361}
13362
13363VTableContextBase *ASTContext::getVTableContext() {
13364 if (!VTContext) {
13365 auto ABI = Target->getCXXABI();
13366 if (ABI.isMicrosoft())
13367 VTContext.reset(p: new MicrosoftVTableContext(*this));
13368 else {
13369 auto ComponentLayout = getLangOpts().RelativeCXXABIVTables
13370 ? ItaniumVTableContext::Relative
13371 : ItaniumVTableContext::Pointer;
13372 VTContext.reset(p: new ItaniumVTableContext(*this, ComponentLayout));
13373 }
13374 }
13375 return VTContext.get();
13376}
13377
13378MangleContext *ASTContext::createMangleContext(const TargetInfo *T) {
13379 if (!T)
13380 T = Target;
13381 switch (T->getCXXABI().getKind()) {
13382 case TargetCXXABI::AppleARM64:
13383 case TargetCXXABI::Fuchsia:
13384 case TargetCXXABI::GenericAArch64:
13385 case TargetCXXABI::GenericItanium:
13386 case TargetCXXABI::GenericARM:
13387 case TargetCXXABI::GenericMIPS:
13388 case TargetCXXABI::iOS:
13389 case TargetCXXABI::WebAssembly:
13390 case TargetCXXABI::WatchOS:
13391 case TargetCXXABI::XL:
13392 return ItaniumMangleContext::create(Context&: *this, Diags&: getDiagnostics());
13393 case TargetCXXABI::Microsoft:
13394 return MicrosoftMangleContext::create(Context&: *this, Diags&: getDiagnostics());
13395 }
13396 llvm_unreachable("Unsupported ABI");
13397}
13398
13399MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) {
13400 assert(T.getCXXABI().getKind() != TargetCXXABI::Microsoft &&
13401 "Device mangle context does not support Microsoft mangling.");
13402 switch (T.getCXXABI().getKind()) {
13403 case TargetCXXABI::AppleARM64:
13404 case TargetCXXABI::Fuchsia:
13405 case TargetCXXABI::GenericAArch64:
13406 case TargetCXXABI::GenericItanium:
13407 case TargetCXXABI::GenericARM:
13408 case TargetCXXABI::GenericMIPS:
13409 case TargetCXXABI::iOS:
13410 case TargetCXXABI::WebAssembly:
13411 case TargetCXXABI::WatchOS:
13412 case TargetCXXABI::XL:
13413 return ItaniumMangleContext::create(
13414 Context&: *this, Diags&: getDiagnostics(),
13415 Discriminator: [](ASTContext &, const NamedDecl *ND) -> UnsignedOrNone {
13416 if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: ND))
13417 return RD->getDeviceLambdaManglingNumber();
13418 return std::nullopt;
13419 },
13420 /*IsAux=*/true);
13421 case TargetCXXABI::Microsoft:
13422 return MicrosoftMangleContext::create(Context&: *this, Diags&: getDiagnostics(),
13423 /*IsAux=*/true);
13424 }
13425 llvm_unreachable("Unsupported ABI");
13426}
13427
13428MangleContext *ASTContext::cudaNVInitDeviceMC() {
13429 // If the host and device have different C++ ABIs, mark it as the device
13430 // mangle context so that the mangling needs to retrieve the additional
13431 // device lambda mangling number instead of the regular host one.
13432 if (getAuxTargetInfo() && getTargetInfo().getCXXABI().isMicrosoft() &&
13433 getAuxTargetInfo()->getCXXABI().isItaniumFamily()) {
13434 return createDeviceMangleContext(T: *getAuxTargetInfo());
13435 }
13436
13437 return createMangleContext(T: getAuxTargetInfo());
13438}
13439
13440CXXABI::~CXXABI() = default;
13441
13442size_t ASTContext::getSideTableAllocatedMemory() const {
13443 return ASTRecordLayouts.getMemorySize() +
13444 llvm::capacity_in_bytes(X: ObjCLayouts) +
13445 llvm::capacity_in_bytes(X: KeyFunctions) +
13446 llvm::capacity_in_bytes(X: ObjCImpls) +
13447 llvm::capacity_in_bytes(X: BlockVarCopyInits) +
13448 llvm::capacity_in_bytes(X: DeclAttrs) +
13449 llvm::capacity_in_bytes(X: TemplateOrInstantiation) +
13450 llvm::capacity_in_bytes(X: InstantiatedFromUsingDecl) +
13451 llvm::capacity_in_bytes(X: InstantiatedFromUsingShadowDecl) +
13452 llvm::capacity_in_bytes(X: InstantiatedFromUnnamedFieldDecl) +
13453 llvm::capacity_in_bytes(X: OverriddenMethods) +
13454 llvm::capacity_in_bytes(X: Types) +
13455 llvm::capacity_in_bytes(x: VariableArrayTypes);
13456}
13457
13458/// getIntTypeForBitwidth -
13459/// sets integer QualTy according to specified details:
13460/// bitwidth, signed/unsigned.
13461/// Returns empty type if there is no appropriate target types.
13462QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth,
13463 unsigned Signed) const {
13464 TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(BitWidth: DestWidth, IsSigned: Signed);
13465 CanQualType QualTy = getFromTargetType(Type: Ty);
13466 if (!QualTy && DestWidth == 128)
13467 return Signed ? Int128Ty : UnsignedInt128Ty;
13468 return QualTy;
13469}
13470
13471/// getRealTypeForBitwidth -
13472/// sets floating point QualTy according to specified bitwidth.
13473/// Returns empty type if there is no appropriate target types.
13474QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth,
13475 FloatModeKind ExplicitType) const {
13476 FloatModeKind Ty =
13477 getTargetInfo().getRealTypeByWidth(BitWidth: DestWidth, ExplicitType);
13478 switch (Ty) {
13479 case FloatModeKind::Half:
13480 return HalfTy;
13481 case FloatModeKind::Float:
13482 return FloatTy;
13483 case FloatModeKind::Double:
13484 return DoubleTy;
13485 case FloatModeKind::LongDouble:
13486 return LongDoubleTy;
13487 case FloatModeKind::Float128:
13488 return Float128Ty;
13489 case FloatModeKind::Ibm128:
13490 return Ibm128Ty;
13491 case FloatModeKind::NoFloat:
13492 return {};
13493 }
13494
13495 llvm_unreachable("Unhandled TargetInfo::RealType value");
13496}
13497
13498void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) {
13499 if (Number <= 1)
13500 return;
13501
13502 MangleNumbers[ND] = Number;
13503
13504 if (Listener)
13505 Listener->AddedManglingNumber(D: ND, Number);
13506}
13507
13508unsigned ASTContext::getManglingNumber(const NamedDecl *ND,
13509 bool ForAuxTarget) const {
13510 auto I = MangleNumbers.find(Key: ND);
13511 unsigned Res = I != MangleNumbers.end() ? I->second : 1;
13512 // CUDA/HIP host compilation encodes host and device mangling numbers
13513 // as lower and upper half of 32 bit integer.
13514 if (LangOpts.CUDA && !LangOpts.CUDAIsDevice) {
13515 Res = ForAuxTarget ? Res >> 16 : Res & 0xFFFF;
13516 } else {
13517 assert(!ForAuxTarget && "Only CUDA/HIP host compilation supports mangling "
13518 "number for aux target");
13519 }
13520 return Res > 1 ? Res : 1;
13521}
13522
13523void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) {
13524 if (Number <= 1)
13525 return;
13526
13527 StaticLocalNumbers[VD] = Number;
13528
13529 if (Listener)
13530 Listener->AddedStaticLocalNumbers(D: VD, Number);
13531}
13532
13533unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const {
13534 auto I = StaticLocalNumbers.find(Key: VD);
13535 return I != StaticLocalNumbers.end() ? I->second : 1;
13536}
13537
13538void ASTContext::setIsDestroyingOperatorDelete(const FunctionDecl *FD,
13539 bool IsDestroying) {
13540 if (!IsDestroying) {
13541 assert(!DestroyingOperatorDeletes.contains(FD->getCanonicalDecl()));
13542 return;
13543 }
13544 DestroyingOperatorDeletes.insert(V: FD->getCanonicalDecl());
13545}
13546
13547bool ASTContext::isDestroyingOperatorDelete(const FunctionDecl *FD) const {
13548 return DestroyingOperatorDeletes.contains(V: FD->getCanonicalDecl());
13549}
13550
13551void ASTContext::setIsTypeAwareOperatorNewOrDelete(const FunctionDecl *FD,
13552 bool IsTypeAware) {
13553 if (!IsTypeAware) {
13554 assert(!TypeAwareOperatorNewAndDeletes.contains(FD->getCanonicalDecl()));
13555 return;
13556 }
13557 TypeAwareOperatorNewAndDeletes.insert(V: FD->getCanonicalDecl());
13558}
13559
13560bool ASTContext::isTypeAwareOperatorNewOrDelete(const FunctionDecl *FD) const {
13561 return TypeAwareOperatorNewAndDeletes.contains(V: FD->getCanonicalDecl());
13562}
13563
13564void ASTContext::addOperatorDeleteForVDtor(const CXXDestructorDecl *Dtor,
13565 FunctionDecl *OperatorDelete,
13566 OperatorDeleteKind K) const {
13567 switch (K) {
13568 case OperatorDeleteKind::Regular:
13569 OperatorDeletesForVirtualDtor[Dtor->getCanonicalDecl()] = OperatorDelete;
13570 break;
13571 case OperatorDeleteKind::GlobalRegular:
13572 GlobalOperatorDeletesForVirtualDtor[Dtor->getCanonicalDecl()] =
13573 OperatorDelete;
13574 break;
13575 case OperatorDeleteKind::Array:
13576 ArrayOperatorDeletesForVirtualDtor[Dtor->getCanonicalDecl()] =
13577 OperatorDelete;
13578 break;
13579 case OperatorDeleteKind::ArrayGlobal:
13580 GlobalArrayOperatorDeletesForVirtualDtor[Dtor->getCanonicalDecl()] =
13581 OperatorDelete;
13582 break;
13583 }
13584}
13585
13586bool ASTContext::dtorHasOperatorDelete(const CXXDestructorDecl *Dtor,
13587 OperatorDeleteKind K) const {
13588 switch (K) {
13589 case OperatorDeleteKind::Regular:
13590 return OperatorDeletesForVirtualDtor.contains(Val: Dtor->getCanonicalDecl());
13591 case OperatorDeleteKind::GlobalRegular:
13592 return GlobalOperatorDeletesForVirtualDtor.contains(
13593 Val: Dtor->getCanonicalDecl());
13594 case OperatorDeleteKind::Array:
13595 return ArrayOperatorDeletesForVirtualDtor.contains(
13596 Val: Dtor->getCanonicalDecl());
13597 case OperatorDeleteKind::ArrayGlobal:
13598 return GlobalArrayOperatorDeletesForVirtualDtor.contains(
13599 Val: Dtor->getCanonicalDecl());
13600 }
13601 return false;
13602}
13603
13604FunctionDecl *
13605ASTContext::getOperatorDeleteForVDtor(const CXXDestructorDecl *Dtor,
13606 OperatorDeleteKind K) const {
13607 const CXXDestructorDecl *Canon = Dtor->getCanonicalDecl();
13608 switch (K) {
13609 case OperatorDeleteKind::Regular:
13610 if (OperatorDeletesForVirtualDtor.contains(Val: Canon))
13611 return OperatorDeletesForVirtualDtor[Canon];
13612 return nullptr;
13613 case OperatorDeleteKind::GlobalRegular:
13614 if (GlobalOperatorDeletesForVirtualDtor.contains(Val: Canon))
13615 return GlobalOperatorDeletesForVirtualDtor[Canon];
13616 return nullptr;
13617 case OperatorDeleteKind::Array:
13618 if (ArrayOperatorDeletesForVirtualDtor.contains(Val: Canon))
13619 return ArrayOperatorDeletesForVirtualDtor[Canon];
13620 return nullptr;
13621 case OperatorDeleteKind::ArrayGlobal:
13622 if (GlobalArrayOperatorDeletesForVirtualDtor.contains(Val: Canon))
13623 return GlobalArrayOperatorDeletesForVirtualDtor[Canon];
13624 return nullptr;
13625 }
13626 return nullptr;
13627}
13628
13629bool ASTContext::classMaybeNeedsVectorDeletingDestructor(
13630 const CXXRecordDecl *RD) {
13631 if (!getTargetInfo().emitVectorDeletingDtors(getLangOpts()))
13632 return false;
13633
13634 return MaybeRequireVectorDeletingDtor.count(V: RD);
13635}
13636
13637void ASTContext::setClassMaybeNeedsVectorDeletingDestructor(
13638 const CXXRecordDecl *RD) {
13639 if (!getTargetInfo().emitVectorDeletingDtors(getLangOpts()))
13640 return;
13641
13642 MaybeRequireVectorDeletingDtor.insert(V: RD);
13643}
13644
13645MangleNumberingContext &
13646ASTContext::getManglingNumberContext(const DeclContext *DC) {
13647 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C.
13648 std::unique_ptr<MangleNumberingContext> &MCtx = MangleNumberingContexts[DC];
13649 if (!MCtx)
13650 MCtx = createMangleNumberingContext();
13651 return *MCtx;
13652}
13653
13654MangleNumberingContext &
13655ASTContext::getManglingNumberContext(NeedExtraManglingDecl_t, const Decl *D) {
13656 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C.
13657 std::unique_ptr<MangleNumberingContext> &MCtx =
13658 ExtraMangleNumberingContexts[D];
13659 if (!MCtx)
13660 MCtx = createMangleNumberingContext();
13661 return *MCtx;
13662}
13663
13664std::unique_ptr<MangleNumberingContext>
13665ASTContext::createMangleNumberingContext() const {
13666 return ABI->createMangleNumberingContext();
13667}
13668
13669const CXXConstructorDecl *
13670ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) {
13671 return ABI->getCopyConstructorForExceptionObject(
13672 cast<CXXRecordDecl>(Val: RD->getFirstDecl()));
13673}
13674
13675void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD,
13676 CXXConstructorDecl *CD) {
13677 return ABI->addCopyConstructorForExceptionObject(
13678 cast<CXXRecordDecl>(Val: RD->getFirstDecl()),
13679 cast<CXXConstructorDecl>(Val: CD->getFirstDecl()));
13680}
13681
13682void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD,
13683 TypedefNameDecl *DD) {
13684 return ABI->addTypedefNameForUnnamedTagDecl(TD, DD);
13685}
13686
13687TypedefNameDecl *
13688ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl *TD) {
13689 return ABI->getTypedefNameForUnnamedTagDecl(TD);
13690}
13691
13692void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl *TD,
13693 DeclaratorDecl *DD) {
13694 return ABI->addDeclaratorForUnnamedTagDecl(TD, DD);
13695}
13696
13697DeclaratorDecl *ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl *TD) {
13698 return ABI->getDeclaratorForUnnamedTagDecl(TD);
13699}
13700
13701void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) {
13702 ParamIndices[D] = index;
13703}
13704
13705unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const {
13706 ParameterIndexTable::const_iterator I = ParamIndices.find(Val: D);
13707 assert(I != ParamIndices.end() &&
13708 "ParmIndices lacks entry set by ParmVarDecl");
13709 return I->second;
13710}
13711
13712QualType ASTContext::getStringLiteralArrayType(QualType EltTy,
13713 unsigned Length) const {
13714 // A C++ string literal has a const-qualified element type (C++ 2.13.4p1).
13715 if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings)
13716 EltTy = EltTy.withConst();
13717
13718 EltTy = adjustStringLiteralBaseType(Ty: EltTy);
13719
13720 // Get an array type for the string, according to C99 6.4.5. This includes
13721 // the null terminator character.
13722 return getConstantArrayType(EltTy, ArySizeIn: llvm::APInt(32, Length + 1), SizeExpr: nullptr,
13723 ASM: ArraySizeModifier::Normal, /*IndexTypeQuals*/ 0);
13724}
13725
13726StringLiteral *
13727ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const {
13728 StringLiteral *&Result = StringLiteralCache[Key];
13729 if (!Result)
13730 Result = StringLiteral::Create(
13731 Ctx: *this, Str: Key, Kind: StringLiteralKind::Ordinary,
13732 /*Pascal*/ false, Ty: getStringLiteralArrayType(EltTy: CharTy, Length: Key.size()),
13733 Locs: SourceLocation());
13734 return Result;
13735}
13736
13737MSGuidDecl *
13738ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const {
13739 assert(MSGuidTagDecl && "building MS GUID without MS extensions?");
13740
13741 llvm::FoldingSetNodeID ID;
13742 MSGuidDecl::Profile(ID, P: Parts);
13743
13744 void *InsertPos;
13745 if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos))
13746 return Existing;
13747
13748 QualType GUIDType = getMSGuidType().withConst();
13749 MSGuidDecl *New = MSGuidDecl::Create(C: *this, T: GUIDType, P: Parts);
13750 MSGuidDecls.InsertNode(N: New, InsertPos);
13751 return New;
13752}
13753
13754UnnamedGlobalConstantDecl *
13755ASTContext::getUnnamedGlobalConstantDecl(QualType Ty,
13756 const APValue &APVal) const {
13757 llvm::FoldingSetNodeID ID;
13758 UnnamedGlobalConstantDecl::Profile(ID, Ty, APVal);
13759
13760 void *InsertPos;
13761 if (UnnamedGlobalConstantDecl *Existing =
13762 UnnamedGlobalConstantDecls.FindNodeOrInsertPos(ID, InsertPos))
13763 return Existing;
13764
13765 UnnamedGlobalConstantDecl *New =
13766 UnnamedGlobalConstantDecl::Create(C: *this, T: Ty, APVal);
13767 UnnamedGlobalConstantDecls.InsertNode(N: New, InsertPos);
13768 return New;
13769}
13770
13771TemplateParamObjectDecl *
13772ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const {
13773 assert(T->isRecordType() && "template param object of unexpected type");
13774
13775 // C++ [temp.param]p8:
13776 // [...] a static storage duration object of type 'const T' [...]
13777 T.addConst();
13778
13779 llvm::FoldingSetNodeID ID;
13780 TemplateParamObjectDecl::Profile(ID, T, V);
13781
13782 void *InsertPos;
13783 if (TemplateParamObjectDecl *Existing =
13784 TemplateParamObjectDecls.FindNodeOrInsertPos(ID, InsertPos))
13785 return Existing;
13786
13787 TemplateParamObjectDecl *New = TemplateParamObjectDecl::Create(C: *this, T, V);
13788 TemplateParamObjectDecls.InsertNode(N: New, InsertPos);
13789 return New;
13790}
13791
13792bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const {
13793 const llvm::Triple &T = getTargetInfo().getTriple();
13794 if (!T.isOSDarwin())
13795 return false;
13796
13797 if (!(T.isiOS() && T.isOSVersionLT(Major: 7)) &&
13798 !(T.isMacOSX() && T.isOSVersionLT(Major: 10, Minor: 9)))
13799 return false;
13800
13801 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
13802 CharUnits sizeChars = getTypeSizeInChars(T: AtomicTy);
13803 uint64_t Size = sizeChars.getQuantity();
13804 CharUnits alignChars = getTypeAlignInChars(T: AtomicTy);
13805 unsigned Align = alignChars.getQuantity();
13806 unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth();
13807 return (Size != Align || toBits(CharSize: sizeChars) > MaxInlineWidthInBits);
13808}
13809
13810bool
13811ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl,
13812 const ObjCMethodDecl *MethodImpl) {
13813 // No point trying to match an unavailable/deprecated mothod.
13814 if (MethodDecl->hasAttr<UnavailableAttr>()
13815 || MethodDecl->hasAttr<DeprecatedAttr>())
13816 return false;
13817 if (MethodDecl->getObjCDeclQualifier() !=
13818 MethodImpl->getObjCDeclQualifier())
13819 return false;
13820 if (!hasSameType(T1: MethodDecl->getReturnType(), T2: MethodImpl->getReturnType()))
13821 return false;
13822
13823 if (MethodDecl->param_size() != MethodImpl->param_size())
13824 return false;
13825
13826 for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(),
13827 IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(),
13828 EF = MethodDecl->param_end();
13829 IM != EM && IF != EF; ++IM, ++IF) {
13830 const ParmVarDecl *DeclVar = (*IF);
13831 const ParmVarDecl *ImplVar = (*IM);
13832 if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier())
13833 return false;
13834 if (!hasSameType(T1: DeclVar->getType(), T2: ImplVar->getType()))
13835 return false;
13836 }
13837
13838 return (MethodDecl->isVariadic() == MethodImpl->isVariadic());
13839}
13840
13841uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const {
13842 LangAS AS;
13843 if (QT->getUnqualifiedDesugaredType()->isNullPtrType())
13844 AS = LangAS::Default;
13845 else
13846 AS = QT->getPointeeType().getAddressSpace();
13847
13848 return getTargetInfo().getNullPointerValue(AddrSpace: AS);
13849}
13850
13851unsigned ASTContext::getTargetAddressSpace(LangAS AS) const {
13852 return getTargetInfo().getTargetAddressSpace(AS);
13853}
13854
13855bool ASTContext::hasSameExpr(const Expr *X, const Expr *Y) const {
13856 if (X == Y)
13857 return true;
13858 if (!X || !Y)
13859 return false;
13860 llvm::FoldingSetNodeID IDX, IDY;
13861 X->Profile(ID&: IDX, Context: *this, /*Canonical=*/true);
13862 Y->Profile(ID&: IDY, Context: *this, /*Canonical=*/true);
13863 return IDX == IDY;
13864}
13865
13866// The getCommon* helpers return, for given 'same' X and Y entities given as
13867// inputs, another entity which is also the 'same' as the inputs, but which
13868// is closer to the canonical form of the inputs, each according to a given
13869// criteria.
13870// The getCommon*Checked variants are 'null inputs not-allowed' equivalents of
13871// the regular ones.
13872
13873static Decl *getCommonDecl(Decl *X, Decl *Y) {
13874 if (!declaresSameEntity(D1: X, D2: Y))
13875 return nullptr;
13876 for (const Decl *DX : X->redecls()) {
13877 // If we reach Y before reaching the first decl, that means X is older.
13878 if (DX == Y)
13879 return X;
13880 // If we reach the first decl, then Y is older.
13881 if (DX->isFirstDecl())
13882 return Y;
13883 }
13884 llvm_unreachable("Corrupt redecls chain");
13885}
13886
13887template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true>
13888static T *getCommonDecl(T *X, T *Y) {
13889 return cast_or_null<T>(
13890 getCommonDecl(X: const_cast<Decl *>(cast_or_null<Decl>(X)),
13891 Y: const_cast<Decl *>(cast_or_null<Decl>(Y))));
13892}
13893
13894template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true>
13895static T *getCommonDeclChecked(T *X, T *Y) {
13896 return cast<T>(getCommonDecl(X: const_cast<Decl *>(cast<Decl>(X)),
13897 Y: const_cast<Decl *>(cast<Decl>(Y))));
13898}
13899
13900static TemplateName getCommonTemplateName(const ASTContext &Ctx, TemplateName X,
13901 TemplateName Y,
13902 bool IgnoreDeduced = false) {
13903 if (X.getAsVoidPointer() == Y.getAsVoidPointer())
13904 return X;
13905 // FIXME: There are cases here where we could find a common template name
13906 // with more sugar. For example one could be a SubstTemplateTemplate*
13907 // replacing the other.
13908 TemplateName CX = Ctx.getCanonicalTemplateName(Name: X, IgnoreDeduced);
13909 if (CX.getAsVoidPointer() !=
13910 Ctx.getCanonicalTemplateName(Name: Y).getAsVoidPointer())
13911 return TemplateName();
13912 return CX;
13913}
13914
13915static TemplateName getCommonTemplateNameChecked(const ASTContext &Ctx,
13916 TemplateName X, TemplateName Y,
13917 bool IgnoreDeduced) {
13918 TemplateName R = getCommonTemplateName(Ctx, X, Y, IgnoreDeduced);
13919 assert(R.getAsVoidPointer() != nullptr);
13920 return R;
13921}
13922
13923static auto getCommonTypes(const ASTContext &Ctx, ArrayRef<QualType> Xs,
13924 ArrayRef<QualType> Ys, bool Unqualified = false) {
13925 assert(Xs.size() == Ys.size());
13926 SmallVector<QualType, 8> Rs(Xs.size());
13927 for (size_t I = 0; I < Rs.size(); ++I)
13928 Rs[I] = Ctx.getCommonSugaredType(X: Xs[I], Y: Ys[I], Unqualified);
13929 return Rs;
13930}
13931
13932template <class T>
13933static SourceLocation getCommonAttrLoc(const T *X, const T *Y) {
13934 return X->getAttributeLoc() == Y->getAttributeLoc() ? X->getAttributeLoc()
13935 : SourceLocation();
13936}
13937
13938static TemplateArgument getCommonTemplateArgument(const ASTContext &Ctx,
13939 const TemplateArgument &X,
13940 const TemplateArgument &Y) {
13941 if (X.getKind() != Y.getKind())
13942 return TemplateArgument();
13943
13944 switch (X.getKind()) {
13945 case TemplateArgument::ArgKind::Type:
13946 if (!Ctx.hasSameType(T1: X.getAsType(), T2: Y.getAsType()))
13947 return TemplateArgument();
13948 return TemplateArgument(
13949 Ctx.getCommonSugaredType(X: X.getAsType(), Y: Y.getAsType()));
13950 case TemplateArgument::ArgKind::NullPtr:
13951 if (!Ctx.hasSameType(T1: X.getNullPtrType(), T2: Y.getNullPtrType()))
13952 return TemplateArgument();
13953 return TemplateArgument(
13954 Ctx.getCommonSugaredType(X: X.getNullPtrType(), Y: Y.getNullPtrType()),
13955 /*Unqualified=*/true);
13956 case TemplateArgument::ArgKind::Expression:
13957 if (!Ctx.hasSameType(T1: X.getAsExpr()->getType(), T2: Y.getAsExpr()->getType()))
13958 return TemplateArgument();
13959 // FIXME: Try to keep the common sugar.
13960 return X;
13961 case TemplateArgument::ArgKind::Template: {
13962 TemplateName TX = X.getAsTemplate(), TY = Y.getAsTemplate();
13963 TemplateName CTN = ::getCommonTemplateName(Ctx, X: TX, Y: TY);
13964 if (!CTN.getAsVoidPointer())
13965 return TemplateArgument();
13966 return TemplateArgument(CTN);
13967 }
13968 case TemplateArgument::ArgKind::TemplateExpansion: {
13969 TemplateName TX = X.getAsTemplateOrTemplatePattern(),
13970 TY = Y.getAsTemplateOrTemplatePattern();
13971 TemplateName CTN = ::getCommonTemplateName(Ctx, X: TX, Y: TY);
13972 if (!CTN.getAsVoidPointer())
13973 return TemplateName();
13974 auto NExpX = X.getNumTemplateExpansions();
13975 assert(NExpX == Y.getNumTemplateExpansions());
13976 return TemplateArgument(CTN, NExpX);
13977 }
13978 default:
13979 // FIXME: Handle the other argument kinds.
13980 return X;
13981 }
13982}
13983
13984static bool getCommonTemplateArguments(const ASTContext &Ctx,
13985 SmallVectorImpl<TemplateArgument> &R,
13986 ArrayRef<TemplateArgument> Xs,
13987 ArrayRef<TemplateArgument> Ys) {
13988 if (Xs.size() != Ys.size())
13989 return true;
13990 R.resize(N: Xs.size());
13991 for (size_t I = 0; I < R.size(); ++I) {
13992 R[I] = getCommonTemplateArgument(Ctx, X: Xs[I], Y: Ys[I]);
13993 if (R[I].isNull())
13994 return true;
13995 }
13996 return false;
13997}
13998
13999static auto getCommonTemplateArguments(const ASTContext &Ctx,
14000 ArrayRef<TemplateArgument> Xs,
14001 ArrayRef<TemplateArgument> Ys) {
14002 SmallVector<TemplateArgument, 8> R;
14003 bool Different = getCommonTemplateArguments(Ctx, R, Xs, Ys);
14004 assert(!Different);
14005 (void)Different;
14006 return R;
14007}
14008
14009template <class T>
14010static ElaboratedTypeKeyword getCommonTypeKeyword(const T *X, const T *Y,
14011 bool IsSame) {
14012 ElaboratedTypeKeyword KX = X->getKeyword(), KY = Y->getKeyword();
14013 if (KX == KY)
14014 return KX;
14015 KX = getCanonicalElaboratedTypeKeyword(Keyword: KX);
14016 assert(!IsSame || KX == getCanonicalElaboratedTypeKeyword(KY));
14017 return KX;
14018}
14019
14020/// Returns a NestedNameSpecifier which has only the common sugar
14021/// present in both NNS1 and NNS2.
14022static NestedNameSpecifier getCommonNNS(const ASTContext &Ctx,
14023 NestedNameSpecifier NNS1,
14024 NestedNameSpecifier NNS2, bool IsSame) {
14025 // If they are identical, all sugar is common.
14026 if (NNS1 == NNS2)
14027 return NNS1;
14028
14029 // IsSame implies both Qualifiers are equivalent.
14030 NestedNameSpecifier Canon = NNS1.getCanonical();
14031 if (Canon != NNS2.getCanonical()) {
14032 assert(!IsSame && "Should be the same NestedNameSpecifier");
14033 // If they are not the same, there is nothing to unify.
14034 return std::nullopt;
14035 }
14036
14037 NestedNameSpecifier R = std::nullopt;
14038 NestedNameSpecifier::Kind Kind = NNS1.getKind();
14039 assert(Kind == NNS2.getKind());
14040 switch (Kind) {
14041 case NestedNameSpecifier::Kind::Namespace: {
14042 auto [Namespace1, Prefix1] = NNS1.getAsNamespaceAndPrefix();
14043 auto [Namespace2, Prefix2] = NNS2.getAsNamespaceAndPrefix();
14044 auto Kind = Namespace1->getKind();
14045 if (Kind != Namespace2->getKind() ||
14046 (Kind == Decl::NamespaceAlias &&
14047 !declaresSameEntity(D1: Namespace1, D2: Namespace2))) {
14048 R = NestedNameSpecifier(
14049 Ctx,
14050 ::getCommonDeclChecked(X: Namespace1->getNamespace(),
14051 Y: Namespace2->getNamespace()),
14052 /*Prefix=*/std::nullopt);
14053 break;
14054 }
14055 // The prefixes for namespaces are not significant, its declaration
14056 // identifies it uniquely.
14057 NestedNameSpecifier Prefix = ::getCommonNNS(Ctx, NNS1: Prefix1, NNS2: Prefix2,
14058 /*IsSame=*/false);
14059 R = NestedNameSpecifier(Ctx, ::getCommonDeclChecked(X: Namespace1, Y: Namespace2),
14060 Prefix);
14061 break;
14062 }
14063 case NestedNameSpecifier::Kind::Type: {
14064 const Type *T1 = NNS1.getAsType(), *T2 = NNS2.getAsType();
14065 const Type *T = Ctx.getCommonSugaredType(X: QualType(T1, 0), Y: QualType(T2, 0),
14066 /*Unqualified=*/true)
14067 .getTypePtr();
14068 R = NestedNameSpecifier(T);
14069 break;
14070 }
14071 case NestedNameSpecifier::Kind::MicrosoftSuper: {
14072 // FIXME: Can __super even be used with data members?
14073 // If it's only usable in functions, we will never see it here,
14074 // unless we save the qualifiers used in function types.
14075 // In that case, it might be possible NNS2 is a type,
14076 // in which case we should degrade the result to
14077 // a CXXRecordType.
14078 R = NestedNameSpecifier(getCommonDeclChecked(X: NNS1.getAsMicrosoftSuper(),
14079 Y: NNS2.getAsMicrosoftSuper()));
14080 break;
14081 }
14082 case NestedNameSpecifier::Kind::Null:
14083 case NestedNameSpecifier::Kind::Global:
14084 // These are singletons.
14085 llvm_unreachable("singletons did not compare equal");
14086 }
14087 assert(R.getCanonical() == Canon);
14088 return R;
14089}
14090
14091template <class T>
14092static NestedNameSpecifier getCommonQualifier(const ASTContext &Ctx, const T *X,
14093 const T *Y, bool IsSame) {
14094 return ::getCommonNNS(Ctx, NNS1: X->getQualifier(), NNS2: Y->getQualifier(), IsSame);
14095}
14096
14097template <class T>
14098static QualType getCommonElementType(const ASTContext &Ctx, const T *X,
14099 const T *Y) {
14100 return Ctx.getCommonSugaredType(X: X->getElementType(), Y: Y->getElementType());
14101}
14102
14103static QualType getCommonTypeWithQualifierLifting(const ASTContext &Ctx,
14104 QualType X, QualType Y,
14105 Qualifiers &QX,
14106 Qualifiers &QY) {
14107 QualType R = Ctx.getCommonSugaredType(X, Y,
14108 /*Unqualified=*/true);
14109 // Qualifiers common to both element types.
14110 Qualifiers RQ = R.getQualifiers();
14111 // For each side, move to the top level any qualifiers which are not common to
14112 // both element types. The caller must assume top level qualifiers might
14113 // be different, even if they are the same type, and can be treated as sugar.
14114 QX += X.getQualifiers() - RQ;
14115 QY += Y.getQualifiers() - RQ;
14116 return R;
14117}
14118
14119template <class T>
14120static QualType getCommonArrayElementType(const ASTContext &Ctx, const T *X,
14121 Qualifiers &QX, const T *Y,
14122 Qualifiers &QY) {
14123 return getCommonTypeWithQualifierLifting(Ctx, X->getElementType(),
14124 Y->getElementType(), QX, QY);
14125}
14126
14127template <class T>
14128static QualType getCommonPointeeType(const ASTContext &Ctx, const T *X,
14129 const T *Y) {
14130 return Ctx.getCommonSugaredType(X: X->getPointeeType(), Y: Y->getPointeeType());
14131}
14132
14133template <class T>
14134static auto *getCommonSizeExpr(const ASTContext &Ctx, T *X, T *Y) {
14135 assert(Ctx.hasSameExpr(X->getSizeExpr(), Y->getSizeExpr()));
14136 return X->getSizeExpr();
14137}
14138
14139static auto getCommonSizeModifier(const ArrayType *X, const ArrayType *Y) {
14140 assert(X->getSizeModifier() == Y->getSizeModifier());
14141 return X->getSizeModifier();
14142}
14143
14144static auto getCommonIndexTypeCVRQualifiers(const ArrayType *X,
14145 const ArrayType *Y) {
14146 assert(X->getIndexTypeCVRQualifiers() == Y->getIndexTypeCVRQualifiers());
14147 return X->getIndexTypeCVRQualifiers();
14148}
14149
14150// Merges two type lists such that the resulting vector will contain
14151// each type (in a canonical sense) only once, in the order they appear
14152// from X to Y. If they occur in both X and Y, the result will contain
14153// the common sugared type between them.
14154static void mergeTypeLists(const ASTContext &Ctx,
14155 SmallVectorImpl<QualType> &Out, ArrayRef<QualType> X,
14156 ArrayRef<QualType> Y) {
14157 llvm::DenseMap<QualType, unsigned> Found;
14158 for (auto Ts : {X, Y}) {
14159 for (QualType T : Ts) {
14160 auto Res = Found.try_emplace(Key: Ctx.getCanonicalType(T), Args: Out.size());
14161 if (!Res.second) {
14162 QualType &U = Out[Res.first->second];
14163 U = Ctx.getCommonSugaredType(X: U, Y: T);
14164 } else {
14165 Out.emplace_back(Args&: T);
14166 }
14167 }
14168 }
14169}
14170
14171FunctionProtoType::ExceptionSpecInfo
14172ASTContext::mergeExceptionSpecs(FunctionProtoType::ExceptionSpecInfo ESI1,
14173 FunctionProtoType::ExceptionSpecInfo ESI2,
14174 SmallVectorImpl<QualType> &ExceptionTypeStorage,
14175 bool AcceptDependent) const {
14176 ExceptionSpecificationType EST1 = ESI1.Type, EST2 = ESI2.Type;
14177
14178 // If either of them can throw anything, that is the result.
14179 for (auto I : {EST_None, EST_MSAny, EST_NoexceptFalse}) {
14180 if (EST1 == I)
14181 return ESI1;
14182 if (EST2 == I)
14183 return ESI2;
14184 }
14185
14186 // If either of them is non-throwing, the result is the other.
14187 for (auto I :
14188 {EST_NoThrow, EST_DynamicNone, EST_BasicNoexcept, EST_NoexceptTrue}) {
14189 if (EST1 == I)
14190 return ESI2;
14191 if (EST2 == I)
14192 return ESI1;
14193 }
14194
14195 // If we're left with value-dependent computed noexcept expressions, we're
14196 // stuck. Before C++17, we can just drop the exception specification entirely,
14197 // since it's not actually part of the canonical type. And this should never
14198 // happen in C++17, because it would mean we were computing the composite
14199 // pointer type of dependent types, which should never happen.
14200 if (EST1 == EST_DependentNoexcept || EST2 == EST_DependentNoexcept) {
14201 assert(AcceptDependent &&
14202 "computing composite pointer type of dependent types");
14203 return FunctionProtoType::ExceptionSpecInfo();
14204 }
14205
14206 // Switch over the possibilities so that people adding new values know to
14207 // update this function.
14208 switch (EST1) {
14209 case EST_None:
14210 case EST_DynamicNone:
14211 case EST_MSAny:
14212 case EST_BasicNoexcept:
14213 case EST_DependentNoexcept:
14214 case EST_NoexceptFalse:
14215 case EST_NoexceptTrue:
14216 case EST_NoThrow:
14217 llvm_unreachable("These ESTs should be handled above");
14218
14219 case EST_Dynamic: {
14220 // This is the fun case: both exception specifications are dynamic. Form
14221 // the union of the two lists.
14222 assert(EST2 == EST_Dynamic && "other cases should already be handled");
14223 mergeTypeLists(Ctx: *this, Out&: ExceptionTypeStorage, X: ESI1.Exceptions,
14224 Y: ESI2.Exceptions);
14225 FunctionProtoType::ExceptionSpecInfo Result(EST_Dynamic);
14226 Result.Exceptions = ExceptionTypeStorage;
14227 return Result;
14228 }
14229
14230 case EST_Unevaluated:
14231 case EST_Uninstantiated:
14232 case EST_Unparsed:
14233 llvm_unreachable("shouldn't see unresolved exception specifications here");
14234 }
14235
14236 llvm_unreachable("invalid ExceptionSpecificationType");
14237}
14238
14239static QualType getCommonNonSugarTypeNode(const ASTContext &Ctx, const Type *X,
14240 Qualifiers &QX, const Type *Y,
14241 Qualifiers &QY) {
14242 Type::TypeClass TC = X->getTypeClass();
14243 assert(TC == Y->getTypeClass());
14244 switch (TC) {
14245#define UNEXPECTED_TYPE(Class, Kind) \
14246 case Type::Class: \
14247 llvm_unreachable("Unexpected " Kind ": " #Class);
14248
14249#define NON_CANONICAL_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "non-canonical")
14250#define TYPE(Class, Base)
14251#include "clang/AST/TypeNodes.inc"
14252
14253#define SUGAR_FREE_TYPE(Class) UNEXPECTED_TYPE(Class, "sugar-free")
14254 SUGAR_FREE_TYPE(Builtin)
14255 SUGAR_FREE_TYPE(DeducedTemplateSpecialization)
14256 SUGAR_FREE_TYPE(DependentBitInt)
14257 SUGAR_FREE_TYPE(BitInt)
14258 SUGAR_FREE_TYPE(ObjCInterface)
14259 SUGAR_FREE_TYPE(SubstTemplateTypeParmPack)
14260 SUGAR_FREE_TYPE(SubstBuiltinTemplatePack)
14261 SUGAR_FREE_TYPE(UnresolvedUsing)
14262 SUGAR_FREE_TYPE(HLSLAttributedResource)
14263 SUGAR_FREE_TYPE(HLSLInlineSpirv)
14264#undef SUGAR_FREE_TYPE
14265#define NON_UNIQUE_TYPE(Class) UNEXPECTED_TYPE(Class, "non-unique")
14266 NON_UNIQUE_TYPE(TypeOfExpr)
14267 NON_UNIQUE_TYPE(VariableArray)
14268#undef NON_UNIQUE_TYPE
14269
14270 UNEXPECTED_TYPE(TypeOf, "sugar")
14271
14272#undef UNEXPECTED_TYPE
14273
14274 case Type::Auto: {
14275 const auto *AX = cast<AutoType>(Val: X), *AY = cast<AutoType>(Val: Y);
14276 assert(AX->getDeducedKind() == AY->getDeducedKind());
14277 assert(AX->getDeducedKind() != DeducedKind::Deduced);
14278 assert(AX->getKeyword() == AY->getKeyword());
14279 TemplateDecl *CD = ::getCommonDecl(X: AX->getTypeConstraintConcept(),
14280 Y: AY->getTypeConstraintConcept());
14281 SmallVector<TemplateArgument, 8> As;
14282 if (CD &&
14283 getCommonTemplateArguments(Ctx, R&: As, Xs: AX->getTypeConstraintArguments(),
14284 Ys: AY->getTypeConstraintArguments())) {
14285 CD = nullptr; // The arguments differ, so make it unconstrained.
14286 As.clear();
14287 }
14288 return Ctx.getAutoType(DK: AX->getDeducedKind(), DeducedAsType: QualType(), Keyword: AX->getKeyword(),
14289 TypeConstraintConcept: CD, TypeConstraintArgs: As);
14290 }
14291 case Type::IncompleteArray: {
14292 const auto *AX = cast<IncompleteArrayType>(Val: X),
14293 *AY = cast<IncompleteArrayType>(Val: Y);
14294 return Ctx.getIncompleteArrayType(
14295 elementType: getCommonArrayElementType(Ctx, X: AX, QX, Y: AY, QY),
14296 ASM: getCommonSizeModifier(X: AX, Y: AY), elementTypeQuals: getCommonIndexTypeCVRQualifiers(X: AX, Y: AY));
14297 }
14298 case Type::DependentSizedArray: {
14299 const auto *AX = cast<DependentSizedArrayType>(Val: X),
14300 *AY = cast<DependentSizedArrayType>(Val: Y);
14301 return Ctx.getDependentSizedArrayType(
14302 elementType: getCommonArrayElementType(Ctx, X: AX, QX, Y: AY, QY),
14303 numElements: getCommonSizeExpr(Ctx, X: AX, Y: AY), ASM: getCommonSizeModifier(X: AX, Y: AY),
14304 elementTypeQuals: getCommonIndexTypeCVRQualifiers(X: AX, Y: AY));
14305 }
14306 case Type::ConstantArray: {
14307 const auto *AX = cast<ConstantArrayType>(Val: X),
14308 *AY = cast<ConstantArrayType>(Val: Y);
14309 assert(AX->getSize() == AY->getSize());
14310 const Expr *SizeExpr = Ctx.hasSameExpr(X: AX->getSizeExpr(), Y: AY->getSizeExpr())
14311 ? AX->getSizeExpr()
14312 : nullptr;
14313 return Ctx.getConstantArrayType(
14314 EltTy: getCommonArrayElementType(Ctx, X: AX, QX, Y: AY, QY), ArySizeIn: AX->getSize(), SizeExpr,
14315 ASM: getCommonSizeModifier(X: AX, Y: AY), IndexTypeQuals: getCommonIndexTypeCVRQualifiers(X: AX, Y: AY));
14316 }
14317 case Type::ArrayParameter: {
14318 const auto *AX = cast<ArrayParameterType>(Val: X),
14319 *AY = cast<ArrayParameterType>(Val: Y);
14320 assert(AX->getSize() == AY->getSize());
14321 const Expr *SizeExpr = Ctx.hasSameExpr(X: AX->getSizeExpr(), Y: AY->getSizeExpr())
14322 ? AX->getSizeExpr()
14323 : nullptr;
14324 auto ArrayTy = Ctx.getConstantArrayType(
14325 EltTy: getCommonArrayElementType(Ctx, X: AX, QX, Y: AY, QY), ArySizeIn: AX->getSize(), SizeExpr,
14326 ASM: getCommonSizeModifier(X: AX, Y: AY), IndexTypeQuals: getCommonIndexTypeCVRQualifiers(X: AX, Y: AY));
14327 return Ctx.getArrayParameterType(Ty: ArrayTy);
14328 }
14329 case Type::Atomic: {
14330 const auto *AX = cast<AtomicType>(Val: X), *AY = cast<AtomicType>(Val: Y);
14331 return Ctx.getAtomicType(
14332 T: Ctx.getCommonSugaredType(X: AX->getValueType(), Y: AY->getValueType()));
14333 }
14334 case Type::Complex: {
14335 const auto *CX = cast<ComplexType>(Val: X), *CY = cast<ComplexType>(Val: Y);
14336 return Ctx.getComplexType(T: getCommonArrayElementType(Ctx, X: CX, QX, Y: CY, QY));
14337 }
14338 case Type::Pointer: {
14339 const auto *PX = cast<PointerType>(Val: X), *PY = cast<PointerType>(Val: Y);
14340 return Ctx.getPointerType(T: getCommonPointeeType(Ctx, X: PX, Y: PY));
14341 }
14342 case Type::BlockPointer: {
14343 const auto *PX = cast<BlockPointerType>(Val: X), *PY = cast<BlockPointerType>(Val: Y);
14344 return Ctx.getBlockPointerType(T: getCommonPointeeType(Ctx, X: PX, Y: PY));
14345 }
14346 case Type::ObjCObjectPointer: {
14347 const auto *PX = cast<ObjCObjectPointerType>(Val: X),
14348 *PY = cast<ObjCObjectPointerType>(Val: Y);
14349 return Ctx.getObjCObjectPointerType(ObjectT: getCommonPointeeType(Ctx, X: PX, Y: PY));
14350 }
14351 case Type::MemberPointer: {
14352 const auto *PX = cast<MemberPointerType>(Val: X),
14353 *PY = cast<MemberPointerType>(Val: Y);
14354 assert(declaresSameEntity(PX->getMostRecentCXXRecordDecl(),
14355 PY->getMostRecentCXXRecordDecl()));
14356 return Ctx.getMemberPointerType(
14357 T: getCommonPointeeType(Ctx, X: PX, Y: PY),
14358 Qualifier: getCommonQualifier(Ctx, X: PX, Y: PY, /*IsSame=*/true),
14359 Cls: PX->getMostRecentCXXRecordDecl());
14360 }
14361 case Type::LValueReference: {
14362 const auto *PX = cast<LValueReferenceType>(Val: X),
14363 *PY = cast<LValueReferenceType>(Val: Y);
14364 // FIXME: Preserve PointeeTypeAsWritten.
14365 return Ctx.getLValueReferenceType(T: getCommonPointeeType(Ctx, X: PX, Y: PY),
14366 SpelledAsLValue: PX->isSpelledAsLValue() ||
14367 PY->isSpelledAsLValue());
14368 }
14369 case Type::RValueReference: {
14370 const auto *PX = cast<RValueReferenceType>(Val: X),
14371 *PY = cast<RValueReferenceType>(Val: Y);
14372 // FIXME: Preserve PointeeTypeAsWritten.
14373 return Ctx.getRValueReferenceType(T: getCommonPointeeType(Ctx, X: PX, Y: PY));
14374 }
14375 case Type::DependentAddressSpace: {
14376 const auto *PX = cast<DependentAddressSpaceType>(Val: X),
14377 *PY = cast<DependentAddressSpaceType>(Val: Y);
14378 assert(Ctx.hasSameExpr(PX->getAddrSpaceExpr(), PY->getAddrSpaceExpr()));
14379 return Ctx.getDependentAddressSpaceType(PointeeType: getCommonPointeeType(Ctx, X: PX, Y: PY),
14380 AddrSpaceExpr: PX->getAddrSpaceExpr(),
14381 AttrLoc: getCommonAttrLoc(X: PX, Y: PY));
14382 }
14383 case Type::FunctionNoProto: {
14384 const auto *FX = cast<FunctionNoProtoType>(Val: X),
14385 *FY = cast<FunctionNoProtoType>(Val: Y);
14386 assert(FX->getExtInfo() == FY->getExtInfo());
14387 return Ctx.getFunctionNoProtoType(
14388 ResultTy: Ctx.getCommonSugaredType(X: FX->getReturnType(), Y: FY->getReturnType()),
14389 Info: FX->getExtInfo());
14390 }
14391 case Type::FunctionProto: {
14392 const auto *FX = cast<FunctionProtoType>(Val: X),
14393 *FY = cast<FunctionProtoType>(Val: Y);
14394 FunctionProtoType::ExtProtoInfo EPIX = FX->getExtProtoInfo(),
14395 EPIY = FY->getExtProtoInfo();
14396 assert(EPIX.ExtInfo == EPIY.ExtInfo);
14397 assert(!EPIX.ExtParameterInfos == !EPIY.ExtParameterInfos);
14398 assert(!EPIX.ExtParameterInfos ||
14399 llvm::equal(
14400 llvm::ArrayRef(EPIX.ExtParameterInfos, FX->getNumParams()),
14401 llvm::ArrayRef(EPIY.ExtParameterInfos, FY->getNumParams())));
14402 assert(EPIX.RefQualifier == EPIY.RefQualifier);
14403 assert(EPIX.TypeQuals == EPIY.TypeQuals);
14404 assert(EPIX.Variadic == EPIY.Variadic);
14405
14406 // FIXME: Can we handle an empty EllipsisLoc?
14407 // Use emtpy EllipsisLoc if X and Y differ.
14408
14409 EPIX.HasTrailingReturn = EPIX.HasTrailingReturn && EPIY.HasTrailingReturn;
14410
14411 QualType R =
14412 Ctx.getCommonSugaredType(X: FX->getReturnType(), Y: FY->getReturnType());
14413 auto P = getCommonTypes(Ctx, Xs: FX->param_types(), Ys: FY->param_types(),
14414 /*Unqualified=*/true);
14415
14416 SmallVector<QualType, 8> Exceptions;
14417 EPIX.ExceptionSpec = Ctx.mergeExceptionSpecs(
14418 ESI1: EPIX.ExceptionSpec, ESI2: EPIY.ExceptionSpec, ExceptionTypeStorage&: Exceptions, AcceptDependent: true);
14419 return Ctx.getFunctionType(ResultTy: R, Args: P, EPI: EPIX);
14420 }
14421 case Type::ObjCObject: {
14422 const auto *OX = cast<ObjCObjectType>(Val: X), *OY = cast<ObjCObjectType>(Val: Y);
14423 assert(
14424 std::equal(OX->getProtocols().begin(), OX->getProtocols().end(),
14425 OY->getProtocols().begin(), OY->getProtocols().end(),
14426 [](const ObjCProtocolDecl *P0, const ObjCProtocolDecl *P1) {
14427 return P0->getCanonicalDecl() == P1->getCanonicalDecl();
14428 }) &&
14429 "protocol lists must be the same");
14430 auto TAs = getCommonTypes(Ctx, Xs: OX->getTypeArgsAsWritten(),
14431 Ys: OY->getTypeArgsAsWritten());
14432 return Ctx.getObjCObjectType(
14433 baseType: Ctx.getCommonSugaredType(X: OX->getBaseType(), Y: OY->getBaseType()), typeArgs: TAs,
14434 protocols: OX->getProtocols(),
14435 isKindOf: OX->isKindOfTypeAsWritten() && OY->isKindOfTypeAsWritten());
14436 }
14437 case Type::ConstantMatrix: {
14438 const auto *MX = cast<ConstantMatrixType>(Val: X),
14439 *MY = cast<ConstantMatrixType>(Val: Y);
14440 assert(MX->getNumRows() == MY->getNumRows());
14441 assert(MX->getNumColumns() == MY->getNumColumns());
14442 return Ctx.getConstantMatrixType(ElementTy: getCommonElementType(Ctx, X: MX, Y: MY),
14443 NumRows: MX->getNumRows(), NumColumns: MX->getNumColumns());
14444 }
14445 case Type::DependentSizedMatrix: {
14446 const auto *MX = cast<DependentSizedMatrixType>(Val: X),
14447 *MY = cast<DependentSizedMatrixType>(Val: Y);
14448 assert(Ctx.hasSameExpr(MX->getRowExpr(), MY->getRowExpr()));
14449 assert(Ctx.hasSameExpr(MX->getColumnExpr(), MY->getColumnExpr()));
14450 return Ctx.getDependentSizedMatrixType(
14451 ElementTy: getCommonElementType(Ctx, X: MX, Y: MY), RowExpr: MX->getRowExpr(),
14452 ColumnExpr: MX->getColumnExpr(), AttrLoc: getCommonAttrLoc(X: MX, Y: MY));
14453 }
14454 case Type::Vector: {
14455 const auto *VX = cast<VectorType>(Val: X), *VY = cast<VectorType>(Val: Y);
14456 assert(VX->getNumElements() == VY->getNumElements());
14457 assert(VX->getVectorKind() == VY->getVectorKind());
14458 return Ctx.getVectorType(vecType: getCommonElementType(Ctx, X: VX, Y: VY),
14459 NumElts: VX->getNumElements(), VecKind: VX->getVectorKind());
14460 }
14461 case Type::ExtVector: {
14462 const auto *VX = cast<ExtVectorType>(Val: X), *VY = cast<ExtVectorType>(Val: Y);
14463 assert(VX->getNumElements() == VY->getNumElements());
14464 return Ctx.getExtVectorType(vecType: getCommonElementType(Ctx, X: VX, Y: VY),
14465 NumElts: VX->getNumElements());
14466 }
14467 case Type::DependentSizedExtVector: {
14468 const auto *VX = cast<DependentSizedExtVectorType>(Val: X),
14469 *VY = cast<DependentSizedExtVectorType>(Val: Y);
14470 return Ctx.getDependentSizedExtVectorType(vecType: getCommonElementType(Ctx, X: VX, Y: VY),
14471 SizeExpr: getCommonSizeExpr(Ctx, X: VX, Y: VY),
14472 AttrLoc: getCommonAttrLoc(X: VX, Y: VY));
14473 }
14474 case Type::DependentVector: {
14475 const auto *VX = cast<DependentVectorType>(Val: X),
14476 *VY = cast<DependentVectorType>(Val: Y);
14477 assert(VX->getVectorKind() == VY->getVectorKind());
14478 return Ctx.getDependentVectorType(
14479 VecType: getCommonElementType(Ctx, X: VX, Y: VY), SizeExpr: getCommonSizeExpr(Ctx, X: VX, Y: VY),
14480 AttrLoc: getCommonAttrLoc(X: VX, Y: VY), VecKind: VX->getVectorKind());
14481 }
14482 case Type::Enum:
14483 case Type::Record:
14484 case Type::InjectedClassName: {
14485 const auto *TX = cast<TagType>(Val: X), *TY = cast<TagType>(Val: Y);
14486 return Ctx.getTagType(Keyword: ::getCommonTypeKeyword(X: TX, Y: TY, /*IsSame=*/false),
14487 Qualifier: ::getCommonQualifier(Ctx, X: TX, Y: TY, /*IsSame=*/false),
14488 TD: ::getCommonDeclChecked(X: TX->getDecl(), Y: TY->getDecl()),
14489 /*OwnedTag=*/OwnsTag: false);
14490 }
14491 case Type::TemplateSpecialization: {
14492 const auto *TX = cast<TemplateSpecializationType>(Val: X),
14493 *TY = cast<TemplateSpecializationType>(Val: Y);
14494 auto As = getCommonTemplateArguments(Ctx, Xs: TX->template_arguments(),
14495 Ys: TY->template_arguments());
14496 return Ctx.getTemplateSpecializationType(
14497 Keyword: getCommonTypeKeyword(X: TX, Y: TY, /*IsSame=*/false),
14498 Template: ::getCommonTemplateNameChecked(Ctx, X: TX->getTemplateName(),
14499 Y: TY->getTemplateName(),
14500 /*IgnoreDeduced=*/true),
14501 SpecifiedArgs: As, /*CanonicalArgs=*/{}, Underlying: X->getCanonicalTypeInternal());
14502 }
14503 case Type::Decltype: {
14504 const auto *DX = cast<DecltypeType>(Val: X);
14505 [[maybe_unused]] const auto *DY = cast<DecltypeType>(Val: Y);
14506 assert(DX->isDependentType());
14507 assert(DY->isDependentType());
14508 assert(Ctx.hasSameExpr(DX->getUnderlyingExpr(), DY->getUnderlyingExpr()));
14509 // As Decltype is not uniqued, building a common type would be wasteful.
14510 return QualType(DX, 0);
14511 }
14512 case Type::PackIndexing: {
14513 const auto *DX = cast<PackIndexingType>(Val: X);
14514 [[maybe_unused]] const auto *DY = cast<PackIndexingType>(Val: Y);
14515 assert(DX->isDependentType());
14516 assert(DY->isDependentType());
14517 assert(Ctx.hasSameExpr(DX->getIndexExpr(), DY->getIndexExpr()));
14518 return QualType(DX, 0);
14519 }
14520 case Type::DependentName: {
14521 const auto *NX = cast<DependentNameType>(Val: X),
14522 *NY = cast<DependentNameType>(Val: Y);
14523 assert(NX->getIdentifier() == NY->getIdentifier());
14524 return Ctx.getDependentNameType(
14525 Keyword: getCommonTypeKeyword(X: NX, Y: NY, /*IsSame=*/true),
14526 NNS: getCommonQualifier(Ctx, X: NX, Y: NY, /*IsSame=*/true), Name: NX->getIdentifier());
14527 }
14528 case Type::OverflowBehavior: {
14529 const auto *NX = cast<OverflowBehaviorType>(Val: X),
14530 *NY = cast<OverflowBehaviorType>(Val: Y);
14531 assert(NX->getBehaviorKind() == NY->getBehaviorKind());
14532 return Ctx.getOverflowBehaviorType(
14533 Kind: NX->getBehaviorKind(),
14534 Underlying: getCommonTypeWithQualifierLifting(Ctx, X: NX->getUnderlyingType(),
14535 Y: NY->getUnderlyingType(), QX, QY));
14536 }
14537 case Type::UnaryTransform: {
14538 const auto *TX = cast<UnaryTransformType>(Val: X),
14539 *TY = cast<UnaryTransformType>(Val: Y);
14540 assert(TX->getUTTKind() == TY->getUTTKind());
14541 return Ctx.getUnaryTransformType(
14542 BaseType: Ctx.getCommonSugaredType(X: TX->getBaseType(), Y: TY->getBaseType()),
14543 UnderlyingType: Ctx.getCommonSugaredType(X: TX->getUnderlyingType(),
14544 Y: TY->getUnderlyingType()),
14545 Kind: TX->getUTTKind());
14546 }
14547 case Type::PackExpansion: {
14548 const auto *PX = cast<PackExpansionType>(Val: X),
14549 *PY = cast<PackExpansionType>(Val: Y);
14550 assert(PX->getNumExpansions() == PY->getNumExpansions());
14551 return Ctx.getPackExpansionType(
14552 Pattern: Ctx.getCommonSugaredType(X: PX->getPattern(), Y: PY->getPattern()),
14553 NumExpansions: PX->getNumExpansions(), ExpectPackInType: false);
14554 }
14555 case Type::Pipe: {
14556 const auto *PX = cast<PipeType>(Val: X), *PY = cast<PipeType>(Val: Y);
14557 assert(PX->isReadOnly() == PY->isReadOnly());
14558 auto MP = PX->isReadOnly() ? &ASTContext::getReadPipeType
14559 : &ASTContext::getWritePipeType;
14560 return (Ctx.*MP)(getCommonElementType(Ctx, X: PX, Y: PY));
14561 }
14562 case Type::TemplateTypeParm: {
14563 const auto *TX = cast<TemplateTypeParmType>(Val: X),
14564 *TY = cast<TemplateTypeParmType>(Val: Y);
14565 assert(TX->getDepth() == TY->getDepth());
14566 assert(TX->getIndex() == TY->getIndex());
14567 assert(TX->isParameterPack() == TY->isParameterPack());
14568 return Ctx.getTemplateTypeParmType(
14569 Depth: TX->getDepth(), Index: TX->getIndex(), ParameterPack: TX->isParameterPack(),
14570 TTPDecl: getCommonDecl(X: TX->getDecl(), Y: TY->getDecl()));
14571 }
14572 }
14573 llvm_unreachable("Unknown Type Class");
14574}
14575
14576static QualType getCommonSugarTypeNode(const ASTContext &Ctx, const Type *X,
14577 const Type *Y,
14578 SplitQualType Underlying) {
14579 Type::TypeClass TC = X->getTypeClass();
14580 if (TC != Y->getTypeClass())
14581 return QualType();
14582 switch (TC) {
14583#define UNEXPECTED_TYPE(Class, Kind) \
14584 case Type::Class: \
14585 llvm_unreachable("Unexpected " Kind ": " #Class);
14586#define TYPE(Class, Base)
14587#define DEPENDENT_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "dependent")
14588#include "clang/AST/TypeNodes.inc"
14589
14590#define CANONICAL_TYPE(Class) UNEXPECTED_TYPE(Class, "canonical")
14591 CANONICAL_TYPE(Atomic)
14592 CANONICAL_TYPE(BitInt)
14593 CANONICAL_TYPE(BlockPointer)
14594 CANONICAL_TYPE(Builtin)
14595 CANONICAL_TYPE(Complex)
14596 CANONICAL_TYPE(ConstantArray)
14597 CANONICAL_TYPE(ArrayParameter)
14598 CANONICAL_TYPE(ConstantMatrix)
14599 CANONICAL_TYPE(Enum)
14600 CANONICAL_TYPE(ExtVector)
14601 CANONICAL_TYPE(FunctionNoProto)
14602 CANONICAL_TYPE(FunctionProto)
14603 CANONICAL_TYPE(IncompleteArray)
14604 CANONICAL_TYPE(HLSLAttributedResource)
14605 CANONICAL_TYPE(HLSLInlineSpirv)
14606 CANONICAL_TYPE(LValueReference)
14607 CANONICAL_TYPE(ObjCInterface)
14608 CANONICAL_TYPE(ObjCObject)
14609 CANONICAL_TYPE(ObjCObjectPointer)
14610 CANONICAL_TYPE(OverflowBehavior)
14611 CANONICAL_TYPE(Pipe)
14612 CANONICAL_TYPE(Pointer)
14613 CANONICAL_TYPE(Record)
14614 CANONICAL_TYPE(RValueReference)
14615 CANONICAL_TYPE(VariableArray)
14616 CANONICAL_TYPE(Vector)
14617#undef CANONICAL_TYPE
14618
14619#undef UNEXPECTED_TYPE
14620
14621 case Type::Adjusted: {
14622 const auto *AX = cast<AdjustedType>(Val: X), *AY = cast<AdjustedType>(Val: Y);
14623 QualType OX = AX->getOriginalType(), OY = AY->getOriginalType();
14624 if (!Ctx.hasSameType(T1: OX, T2: OY))
14625 return QualType();
14626 // FIXME: It's inefficient to have to unify the original types.
14627 return Ctx.getAdjustedType(Orig: Ctx.getCommonSugaredType(X: OX, Y: OY),
14628 New: Ctx.getQualifiedType(split: Underlying));
14629 }
14630 case Type::Decayed: {
14631 const auto *DX = cast<DecayedType>(Val: X), *DY = cast<DecayedType>(Val: Y);
14632 QualType OX = DX->getOriginalType(), OY = DY->getOriginalType();
14633 if (!Ctx.hasSameType(T1: OX, T2: OY))
14634 return QualType();
14635 // FIXME: It's inefficient to have to unify the original types.
14636 return Ctx.getDecayedType(Orig: Ctx.getCommonSugaredType(X: OX, Y: OY),
14637 Decayed: Ctx.getQualifiedType(split: Underlying));
14638 }
14639 case Type::Attributed: {
14640 const auto *AX = cast<AttributedType>(Val: X), *AY = cast<AttributedType>(Val: Y);
14641 AttributedType::Kind Kind = AX->getAttrKind();
14642 if (Kind != AY->getAttrKind())
14643 return QualType();
14644 QualType MX = AX->getModifiedType(), MY = AY->getModifiedType();
14645 if (!Ctx.hasSameType(T1: MX, T2: MY))
14646 return QualType();
14647 // FIXME: It's inefficient to have to unify the modified types.
14648 return Ctx.getAttributedType(attrKind: Kind, modifiedType: Ctx.getCommonSugaredType(X: MX, Y: MY),
14649 equivalentType: Ctx.getQualifiedType(split: Underlying),
14650 attr: AX->getAttr());
14651 }
14652 case Type::BTFTagAttributed: {
14653 const auto *BX = cast<BTFTagAttributedType>(Val: X);
14654 const BTFTypeTagAttr *AX = BX->getAttr();
14655 // The attribute is not uniqued, so just compare the tag.
14656 if (AX->getBTFTypeTag() !=
14657 cast<BTFTagAttributedType>(Val: Y)->getAttr()->getBTFTypeTag())
14658 return QualType();
14659 return Ctx.getBTFTagAttributedType(BTFAttr: AX, Wrapped: Ctx.getQualifiedType(split: Underlying));
14660 }
14661 case Type::Auto: {
14662 const auto *AX = cast<AutoType>(Val: X), *AY = cast<AutoType>(Val: Y);
14663 assert(AX->getDeducedKind() == DeducedKind::Deduced);
14664 assert(AY->getDeducedKind() == DeducedKind::Deduced);
14665
14666 AutoTypeKeyword KW = AX->getKeyword();
14667 if (KW != AY->getKeyword())
14668 return QualType();
14669
14670 TemplateDecl *CD = ::getCommonDecl(X: AX->getTypeConstraintConcept(),
14671 Y: AY->getTypeConstraintConcept());
14672 SmallVector<TemplateArgument, 8> As;
14673 if (CD &&
14674 getCommonTemplateArguments(Ctx, R&: As, Xs: AX->getTypeConstraintArguments(),
14675 Ys: AY->getTypeConstraintArguments())) {
14676 CD = nullptr; // The arguments differ, so make it unconstrained.
14677 As.clear();
14678 }
14679
14680 // Both auto types can't be dependent, otherwise they wouldn't have been
14681 // sugar. This implies they can't contain unexpanded packs either.
14682 return Ctx.getAutoType(DK: DeducedKind::Deduced,
14683 DeducedAsType: Ctx.getQualifiedType(split: Underlying), Keyword: AX->getKeyword(),
14684 TypeConstraintConcept: CD, TypeConstraintArgs: As);
14685 }
14686 case Type::PackIndexing:
14687 case Type::Decltype:
14688 return QualType();
14689 case Type::DeducedTemplateSpecialization:
14690 // FIXME: Try to merge these.
14691 return QualType();
14692 case Type::MacroQualified: {
14693 const auto *MX = cast<MacroQualifiedType>(Val: X),
14694 *MY = cast<MacroQualifiedType>(Val: Y);
14695 const IdentifierInfo *IX = MX->getMacroIdentifier();
14696 if (IX != MY->getMacroIdentifier())
14697 return QualType();
14698 return Ctx.getMacroQualifiedType(UnderlyingTy: Ctx.getQualifiedType(split: Underlying), MacroII: IX);
14699 }
14700 case Type::SubstTemplateTypeParm: {
14701 const auto *SX = cast<SubstTemplateTypeParmType>(Val: X),
14702 *SY = cast<SubstTemplateTypeParmType>(Val: Y);
14703 Decl *CD =
14704 ::getCommonDecl(X: SX->getAssociatedDecl(), Y: SY->getAssociatedDecl());
14705 if (!CD)
14706 return QualType();
14707 unsigned Index = SX->getIndex();
14708 if (Index != SY->getIndex())
14709 return QualType();
14710 auto PackIndex = SX->getPackIndex();
14711 if (PackIndex != SY->getPackIndex())
14712 return QualType();
14713 return Ctx.getSubstTemplateTypeParmType(Replacement: Ctx.getQualifiedType(split: Underlying),
14714 AssociatedDecl: CD, Index, PackIndex,
14715 Final: SX->getFinal() && SY->getFinal());
14716 }
14717 case Type::ObjCTypeParam:
14718 // FIXME: Try to merge these.
14719 return QualType();
14720 case Type::Paren:
14721 return Ctx.getParenType(InnerType: Ctx.getQualifiedType(split: Underlying));
14722
14723 case Type::TemplateSpecialization: {
14724 const auto *TX = cast<TemplateSpecializationType>(Val: X),
14725 *TY = cast<TemplateSpecializationType>(Val: Y);
14726 TemplateName CTN =
14727 ::getCommonTemplateName(Ctx, X: TX->getTemplateName(),
14728 Y: TY->getTemplateName(), /*IgnoreDeduced=*/true);
14729 if (!CTN.getAsVoidPointer())
14730 return QualType();
14731 SmallVector<TemplateArgument, 8> As;
14732 if (getCommonTemplateArguments(Ctx, R&: As, Xs: TX->template_arguments(),
14733 Ys: TY->template_arguments()))
14734 return QualType();
14735 return Ctx.getTemplateSpecializationType(
14736 Keyword: getCommonTypeKeyword(X: TX, Y: TY, /*IsSame=*/false), Template: CTN, SpecifiedArgs: As,
14737 /*CanonicalArgs=*/{}, Underlying: Ctx.getQualifiedType(split: Underlying));
14738 }
14739 case Type::Typedef: {
14740 const auto *TX = cast<TypedefType>(Val: X), *TY = cast<TypedefType>(Val: Y);
14741 const TypedefNameDecl *CD = ::getCommonDecl(X: TX->getDecl(), Y: TY->getDecl());
14742 if (!CD)
14743 return QualType();
14744 return Ctx.getTypedefType(
14745 Keyword: ::getCommonTypeKeyword(X: TX, Y: TY, /*IsSame=*/false),
14746 Qualifier: ::getCommonQualifier(Ctx, X: TX, Y: TY, /*IsSame=*/false), Decl: CD,
14747 UnderlyingType: Ctx.getQualifiedType(split: Underlying));
14748 }
14749 case Type::TypeOf: {
14750 // The common sugar between two typeof expressions, where one is
14751 // potentially a typeof_unqual and the other is not, we unify to the
14752 // qualified type as that retains the most information along with the type.
14753 // We only return a typeof_unqual type when both types are unqual types.
14754 TypeOfKind Kind = TypeOfKind::Qualified;
14755 if (cast<TypeOfType>(Val: X)->getKind() == cast<TypeOfType>(Val: Y)->getKind() &&
14756 cast<TypeOfType>(Val: X)->getKind() == TypeOfKind::Unqualified)
14757 Kind = TypeOfKind::Unqualified;
14758 return Ctx.getTypeOfType(tofType: Ctx.getQualifiedType(split: Underlying), Kind);
14759 }
14760 case Type::TypeOfExpr:
14761 return QualType();
14762
14763 case Type::UnaryTransform: {
14764 const auto *UX = cast<UnaryTransformType>(Val: X),
14765 *UY = cast<UnaryTransformType>(Val: Y);
14766 UnaryTransformType::UTTKind KX = UX->getUTTKind();
14767 if (KX != UY->getUTTKind())
14768 return QualType();
14769 QualType BX = UX->getBaseType(), BY = UY->getBaseType();
14770 if (!Ctx.hasSameType(T1: BX, T2: BY))
14771 return QualType();
14772 // FIXME: It's inefficient to have to unify the base types.
14773 return Ctx.getUnaryTransformType(BaseType: Ctx.getCommonSugaredType(X: BX, Y: BY),
14774 UnderlyingType: Ctx.getQualifiedType(split: Underlying), Kind: KX);
14775 }
14776 case Type::Using: {
14777 const auto *UX = cast<UsingType>(Val: X), *UY = cast<UsingType>(Val: Y);
14778 const UsingShadowDecl *CD = ::getCommonDecl(X: UX->getDecl(), Y: UY->getDecl());
14779 if (!CD)
14780 return QualType();
14781 return Ctx.getUsingType(Keyword: ::getCommonTypeKeyword(X: UX, Y: UY, /*IsSame=*/false),
14782 Qualifier: ::getCommonQualifier(Ctx, X: UX, Y: UY, /*IsSame=*/false),
14783 D: CD, UnderlyingType: Ctx.getQualifiedType(split: Underlying));
14784 }
14785 case Type::MemberPointer: {
14786 const auto *PX = cast<MemberPointerType>(Val: X),
14787 *PY = cast<MemberPointerType>(Val: Y);
14788 CXXRecordDecl *Cls = PX->getMostRecentCXXRecordDecl();
14789 assert(Cls == PY->getMostRecentCXXRecordDecl());
14790 return Ctx.getMemberPointerType(
14791 T: ::getCommonPointeeType(Ctx, X: PX, Y: PY),
14792 Qualifier: ::getCommonQualifier(Ctx, X: PX, Y: PY, /*IsSame=*/false), Cls);
14793 }
14794 case Type::CountAttributed: {
14795 const auto *DX = cast<CountAttributedType>(Val: X),
14796 *DY = cast<CountAttributedType>(Val: Y);
14797 if (DX->isCountInBytes() != DY->isCountInBytes())
14798 return QualType();
14799 if (DX->isOrNull() != DY->isOrNull())
14800 return QualType();
14801 Expr *CEX = DX->getCountExpr();
14802 Expr *CEY = DY->getCountExpr();
14803 ArrayRef<clang::TypeCoupledDeclRefInfo> CDX = DX->getCoupledDecls();
14804 if (Ctx.hasSameExpr(X: CEX, Y: CEY))
14805 return Ctx.getCountAttributedType(WrappedTy: Ctx.getQualifiedType(split: Underlying), CountExpr: CEX,
14806 CountInBytes: DX->isCountInBytes(), OrNull: DX->isOrNull(),
14807 DependentDecls: CDX);
14808 if (!CEX->isIntegerConstantExpr(Ctx) || !CEY->isIntegerConstantExpr(Ctx))
14809 return QualType();
14810 // Two declarations with the same integer constant may still differ in their
14811 // expression pointers, so we need to evaluate them.
14812 llvm::APSInt VX = *CEX->getIntegerConstantExpr(Ctx);
14813 llvm::APSInt VY = *CEY->getIntegerConstantExpr(Ctx);
14814 if (VX != VY)
14815 return QualType();
14816 return Ctx.getCountAttributedType(WrappedTy: Ctx.getQualifiedType(split: Underlying), CountExpr: CEX,
14817 CountInBytes: DX->isCountInBytes(), OrNull: DX->isOrNull(),
14818 DependentDecls: CDX);
14819 }
14820 case Type::PredefinedSugar:
14821 assert(cast<PredefinedSugarType>(X)->getKind() !=
14822 cast<PredefinedSugarType>(Y)->getKind());
14823 return QualType();
14824 }
14825 llvm_unreachable("Unhandled Type Class");
14826}
14827
14828static auto unwrapSugar(SplitQualType &T, Qualifiers &QTotal) {
14829 SmallVector<SplitQualType, 8> R;
14830 while (true) {
14831 QTotal.addConsistentQualifiers(qs: T.Quals);
14832 QualType NT = T.Ty->getLocallyUnqualifiedSingleStepDesugaredType();
14833 if (NT == QualType(T.Ty, 0))
14834 break;
14835 R.push_back(Elt: T);
14836 T = NT.split();
14837 }
14838 return R;
14839}
14840
14841QualType ASTContext::getCommonSugaredType(QualType X, QualType Y,
14842 bool Unqualified) const {
14843 assert(Unqualified ? hasSameUnqualifiedType(X, Y) : hasSameType(X, Y));
14844 if (X == Y)
14845 return X;
14846 if (!Unqualified) {
14847 if (X.isCanonical())
14848 return X;
14849 if (Y.isCanonical())
14850 return Y;
14851 }
14852
14853 SplitQualType SX = X.split(), SY = Y.split();
14854 Qualifiers QX, QY;
14855 // Desugar SX and SY, setting the sugar and qualifiers aside into Xs and Ys,
14856 // until we reach their underlying "canonical nodes". Note these are not
14857 // necessarily canonical types, as they may still have sugared properties.
14858 // QX and QY will store the sum of all qualifiers in Xs and Ys respectively.
14859 auto Xs = ::unwrapSugar(T&: SX, QTotal&: QX), Ys = ::unwrapSugar(T&: SY, QTotal&: QY);
14860
14861 // If this is an ArrayType, the element qualifiers are interchangeable with
14862 // the top level qualifiers.
14863 // * In case the canonical nodes are the same, the elements types are already
14864 // the same.
14865 // * Otherwise, the element types will be made the same, and any different
14866 // element qualifiers will be moved up to the top level qualifiers, per
14867 // 'getCommonArrayElementType'.
14868 // In both cases, this means there may be top level qualifiers which differ
14869 // between X and Y. If so, these differing qualifiers are redundant with the
14870 // element qualifiers, and can be removed without changing the canonical type.
14871 // The desired behaviour is the same as for the 'Unqualified' case here:
14872 // treat the redundant qualifiers as sugar, remove the ones which are not
14873 // common to both sides.
14874 bool KeepCommonQualifiers =
14875 Unqualified || isa<ArrayType, OverflowBehaviorType>(Val: SX.Ty);
14876
14877 if (SX.Ty != SY.Ty) {
14878 // The canonical nodes differ. Build a common canonical node out of the two,
14879 // unifying their sugar. This may recurse back here.
14880 SX.Ty =
14881 ::getCommonNonSugarTypeNode(Ctx: *this, X: SX.Ty, QX, Y: SY.Ty, QY).getTypePtr();
14882 } else {
14883 // The canonical nodes were identical: We may have desugared too much.
14884 // Add any common sugar back in.
14885 while (!Xs.empty() && !Ys.empty() && Xs.back().Ty == Ys.back().Ty) {
14886 QX -= SX.Quals;
14887 QY -= SY.Quals;
14888 SX = Xs.pop_back_val();
14889 SY = Ys.pop_back_val();
14890 }
14891 }
14892 if (KeepCommonQualifiers)
14893 QX = Qualifiers::removeCommonQualifiers(L&: QX, R&: QY);
14894 else
14895 assert(QX == QY);
14896
14897 // Even though the remaining sugar nodes in Xs and Ys differ, some may be
14898 // related. Walk up these nodes, unifying them and adding the result.
14899 while (!Xs.empty() && !Ys.empty()) {
14900 auto Underlying = SplitQualType(
14901 SX.Ty, Qualifiers::removeCommonQualifiers(L&: SX.Quals, R&: SY.Quals));
14902 SX = Xs.pop_back_val();
14903 SY = Ys.pop_back_val();
14904 SX.Ty = ::getCommonSugarTypeNode(Ctx: *this, X: SX.Ty, Y: SY.Ty, Underlying)
14905 .getTypePtrOrNull();
14906 // Stop at the first pair which is unrelated.
14907 if (!SX.Ty) {
14908 SX.Ty = Underlying.Ty;
14909 break;
14910 }
14911 QX -= Underlying.Quals;
14912 };
14913
14914 // Add back the missing accumulated qualifiers, which were stripped off
14915 // with the sugar nodes we could not unify.
14916 QualType R = getQualifiedType(T: SX.Ty, Qs: QX);
14917 assert(Unqualified ? hasSameUnqualifiedType(R, X) : hasSameType(R, X));
14918 return R;
14919}
14920
14921QualType ASTContext::getCorrespondingUnsaturatedType(QualType Ty) const {
14922 assert(Ty->isFixedPointType());
14923
14924 if (Ty->isUnsaturatedFixedPointType())
14925 return Ty;
14926
14927 switch (Ty->castAs<BuiltinType>()->getKind()) {
14928 default:
14929 llvm_unreachable("Not a saturated fixed point type!");
14930 case BuiltinType::SatShortAccum:
14931 return ShortAccumTy;
14932 case BuiltinType::SatAccum:
14933 return AccumTy;
14934 case BuiltinType::SatLongAccum:
14935 return LongAccumTy;
14936 case BuiltinType::SatUShortAccum:
14937 return UnsignedShortAccumTy;
14938 case BuiltinType::SatUAccum:
14939 return UnsignedAccumTy;
14940 case BuiltinType::SatULongAccum:
14941 return UnsignedLongAccumTy;
14942 case BuiltinType::SatShortFract:
14943 return ShortFractTy;
14944 case BuiltinType::SatFract:
14945 return FractTy;
14946 case BuiltinType::SatLongFract:
14947 return LongFractTy;
14948 case BuiltinType::SatUShortFract:
14949 return UnsignedShortFractTy;
14950 case BuiltinType::SatUFract:
14951 return UnsignedFractTy;
14952 case BuiltinType::SatULongFract:
14953 return UnsignedLongFractTy;
14954 }
14955}
14956
14957QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const {
14958 assert(Ty->isFixedPointType());
14959
14960 if (Ty->isSaturatedFixedPointType()) return Ty;
14961
14962 switch (Ty->castAs<BuiltinType>()->getKind()) {
14963 default:
14964 llvm_unreachable("Not a fixed point type!");
14965 case BuiltinType::ShortAccum:
14966 return SatShortAccumTy;
14967 case BuiltinType::Accum:
14968 return SatAccumTy;
14969 case BuiltinType::LongAccum:
14970 return SatLongAccumTy;
14971 case BuiltinType::UShortAccum:
14972 return SatUnsignedShortAccumTy;
14973 case BuiltinType::UAccum:
14974 return SatUnsignedAccumTy;
14975 case BuiltinType::ULongAccum:
14976 return SatUnsignedLongAccumTy;
14977 case BuiltinType::ShortFract:
14978 return SatShortFractTy;
14979 case BuiltinType::Fract:
14980 return SatFractTy;
14981 case BuiltinType::LongFract:
14982 return SatLongFractTy;
14983 case BuiltinType::UShortFract:
14984 return SatUnsignedShortFractTy;
14985 case BuiltinType::UFract:
14986 return SatUnsignedFractTy;
14987 case BuiltinType::ULongFract:
14988 return SatUnsignedLongFractTy;
14989 }
14990}
14991
14992LangAS ASTContext::getLangASForBuiltinAddressSpace(unsigned AS) const {
14993 if (LangOpts.OpenCL)
14994 return getTargetInfo().getOpenCLBuiltinAddressSpace(AS);
14995
14996 if (LangOpts.CUDA)
14997 return getTargetInfo().getCUDABuiltinAddressSpace(AS);
14998
14999 return getLangASFromTargetAS(TargetAS: AS);
15000}
15001
15002// Explicitly instantiate this in case a Redeclarable<T> is used from a TU that
15003// doesn't include ASTContext.h
15004template
15005clang::LazyGenerationalUpdatePtr<
15006 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::ValueType
15007clang::LazyGenerationalUpdatePtr<
15008 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue(
15009 const clang::ASTContext &Ctx, Decl *Value);
15010
15011unsigned char ASTContext::getFixedPointScale(QualType Ty) const {
15012 assert(Ty->isFixedPointType());
15013
15014 const TargetInfo &Target = getTargetInfo();
15015 switch (Ty->castAs<BuiltinType>()->getKind()) {
15016 default:
15017 llvm_unreachable("Not a fixed point type!");
15018 case BuiltinType::ShortAccum:
15019 case BuiltinType::SatShortAccum:
15020 return Target.getShortAccumScale();
15021 case BuiltinType::Accum:
15022 case BuiltinType::SatAccum:
15023 return Target.getAccumScale();
15024 case BuiltinType::LongAccum:
15025 case BuiltinType::SatLongAccum:
15026 return Target.getLongAccumScale();
15027 case BuiltinType::UShortAccum:
15028 case BuiltinType::SatUShortAccum:
15029 return Target.getUnsignedShortAccumScale();
15030 case BuiltinType::UAccum:
15031 case BuiltinType::SatUAccum:
15032 return Target.getUnsignedAccumScale();
15033 case BuiltinType::ULongAccum:
15034 case BuiltinType::SatULongAccum:
15035 return Target.getUnsignedLongAccumScale();
15036 case BuiltinType::ShortFract:
15037 case BuiltinType::SatShortFract:
15038 return Target.getShortFractScale();
15039 case BuiltinType::Fract:
15040 case BuiltinType::SatFract:
15041 return Target.getFractScale();
15042 case BuiltinType::LongFract:
15043 case BuiltinType::SatLongFract:
15044 return Target.getLongFractScale();
15045 case BuiltinType::UShortFract:
15046 case BuiltinType::SatUShortFract:
15047 return Target.getUnsignedShortFractScale();
15048 case BuiltinType::UFract:
15049 case BuiltinType::SatUFract:
15050 return Target.getUnsignedFractScale();
15051 case BuiltinType::ULongFract:
15052 case BuiltinType::SatULongFract:
15053 return Target.getUnsignedLongFractScale();
15054 }
15055}
15056
15057unsigned char ASTContext::getFixedPointIBits(QualType Ty) const {
15058 assert(Ty->isFixedPointType());
15059
15060 const TargetInfo &Target = getTargetInfo();
15061 switch (Ty->castAs<BuiltinType>()->getKind()) {
15062 default:
15063 llvm_unreachable("Not a fixed point type!");
15064 case BuiltinType::ShortAccum:
15065 case BuiltinType::SatShortAccum:
15066 return Target.getShortAccumIBits();
15067 case BuiltinType::Accum:
15068 case BuiltinType::SatAccum:
15069 return Target.getAccumIBits();
15070 case BuiltinType::LongAccum:
15071 case BuiltinType::SatLongAccum:
15072 return Target.getLongAccumIBits();
15073 case BuiltinType::UShortAccum:
15074 case BuiltinType::SatUShortAccum:
15075 return Target.getUnsignedShortAccumIBits();
15076 case BuiltinType::UAccum:
15077 case BuiltinType::SatUAccum:
15078 return Target.getUnsignedAccumIBits();
15079 case BuiltinType::ULongAccum:
15080 case BuiltinType::SatULongAccum:
15081 return Target.getUnsignedLongAccumIBits();
15082 case BuiltinType::ShortFract:
15083 case BuiltinType::SatShortFract:
15084 case BuiltinType::Fract:
15085 case BuiltinType::SatFract:
15086 case BuiltinType::LongFract:
15087 case BuiltinType::SatLongFract:
15088 case BuiltinType::UShortFract:
15089 case BuiltinType::SatUShortFract:
15090 case BuiltinType::UFract:
15091 case BuiltinType::SatUFract:
15092 case BuiltinType::ULongFract:
15093 case BuiltinType::SatULongFract:
15094 return 0;
15095 }
15096}
15097
15098llvm::FixedPointSemantics
15099ASTContext::getFixedPointSemantics(QualType Ty) const {
15100 assert((Ty->isFixedPointType() || Ty->isIntegerType()) &&
15101 "Can only get the fixed point semantics for a "
15102 "fixed point or integer type.");
15103 if (Ty->isIntegerType())
15104 return llvm::FixedPointSemantics::GetIntegerSemantics(
15105 Width: getIntWidth(T: Ty), IsSigned: Ty->isSignedIntegerType());
15106
15107 bool isSigned = Ty->isSignedFixedPointType();
15108 return llvm::FixedPointSemantics(
15109 static_cast<unsigned>(getTypeSize(T: Ty)), getFixedPointScale(Ty), isSigned,
15110 Ty->isSaturatedFixedPointType(),
15111 !isSigned && getTargetInfo().doUnsignedFixedPointTypesHavePadding());
15112}
15113
15114llvm::APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const {
15115 assert(Ty->isFixedPointType());
15116 return llvm::APFixedPoint::getMax(Sema: getFixedPointSemantics(Ty));
15117}
15118
15119llvm::APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const {
15120 assert(Ty->isFixedPointType());
15121 return llvm::APFixedPoint::getMin(Sema: getFixedPointSemantics(Ty));
15122}
15123
15124QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const {
15125 assert(Ty->isUnsignedFixedPointType() &&
15126 "Expected unsigned fixed point type");
15127
15128 switch (Ty->castAs<BuiltinType>()->getKind()) {
15129 case BuiltinType::UShortAccum:
15130 return ShortAccumTy;
15131 case BuiltinType::UAccum:
15132 return AccumTy;
15133 case BuiltinType::ULongAccum:
15134 return LongAccumTy;
15135 case BuiltinType::SatUShortAccum:
15136 return SatShortAccumTy;
15137 case BuiltinType::SatUAccum:
15138 return SatAccumTy;
15139 case BuiltinType::SatULongAccum:
15140 return SatLongAccumTy;
15141 case BuiltinType::UShortFract:
15142 return ShortFractTy;
15143 case BuiltinType::UFract:
15144 return FractTy;
15145 case BuiltinType::ULongFract:
15146 return LongFractTy;
15147 case BuiltinType::SatUShortFract:
15148 return SatShortFractTy;
15149 case BuiltinType::SatUFract:
15150 return SatFractTy;
15151 case BuiltinType::SatULongFract:
15152 return SatLongFractTy;
15153 default:
15154 llvm_unreachable("Unexpected unsigned fixed point type");
15155 }
15156}
15157
15158// Given a list of FMV features, return a concatenated list of the
15159// corresponding backend features (which may contain duplicates).
15160static std::vector<std::string> getFMVBackendFeaturesFor(
15161 const llvm::SmallVectorImpl<StringRef> &FMVFeatStrings) {
15162 std::vector<std::string> BackendFeats;
15163 llvm::AArch64::ExtensionSet FeatureBits;
15164 for (StringRef F : FMVFeatStrings)
15165 if (auto FMVExt = llvm::AArch64::parseFMVExtension(Extension: F))
15166 if (FMVExt->ID)
15167 FeatureBits.enable(E: *FMVExt->ID);
15168 FeatureBits.toLLVMFeatureList(Features&: BackendFeats);
15169 return BackendFeats;
15170}
15171
15172ParsedTargetAttr
15173ASTContext::filterFunctionTargetAttrs(const TargetAttr *TD) const {
15174 assert(TD != nullptr);
15175 ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(Str: TD->getFeaturesStr());
15176
15177 llvm::erase_if(C&: ParsedAttr.Features, P: [&](const std::string &Feat) {
15178 return !Target->isValidFeatureName(Feature: StringRef{Feat}.substr(Start: 1));
15179 });
15180 return ParsedAttr;
15181}
15182
15183void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
15184 const FunctionDecl *FD) const {
15185 if (FD)
15186 getFunctionFeatureMap(FeatureMap, GD: GlobalDecl().getWithDecl(D: FD));
15187 else
15188 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(),
15189 CPU: Target->getTargetOpts().CPU,
15190 FeatureVec: Target->getTargetOpts().Features);
15191}
15192
15193// Fills in the supplied string map with the set of target features for the
15194// passed in function.
15195void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
15196 GlobalDecl GD) const {
15197 StringRef TargetCPU = Target->getTargetOpts().CPU;
15198 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
15199 if (const auto *TD = FD->getAttr<TargetAttr>()) {
15200 ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD);
15201
15202 // Make a copy of the features as passed on the command line into the
15203 // beginning of the additional features from the function to override.
15204 // AArch64 handles command line option features in parseTargetAttr().
15205 if (!Target->getTriple().isAArch64())
15206 ParsedAttr.Features.insert(
15207 position: ParsedAttr.Features.begin(),
15208 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
15209 last: Target->getTargetOpts().FeaturesAsWritten.end());
15210
15211 if (ParsedAttr.CPU != "" && Target->isValidCPUName(Name: ParsedAttr.CPU))
15212 TargetCPU = ParsedAttr.CPU;
15213
15214 // Now populate the feature map, first with the TargetCPU which is either
15215 // the default or a new one from the target attribute string. Then we'll use
15216 // the passed in features (FeaturesAsWritten) along with the new ones from
15217 // the attribute.
15218 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU,
15219 FeatureVec: ParsedAttr.Features);
15220 } else if (const auto *SD = FD->getAttr<CPUSpecificAttr>()) {
15221 llvm::SmallVector<StringRef, 32> FeaturesTmp;
15222 Target->getCPUSpecificCPUDispatchFeatures(
15223 Name: SD->getCPUName(Index: GD.getMultiVersionIndex())->getName(), Features&: FeaturesTmp);
15224 std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end());
15225 Features.insert(position: Features.begin(),
15226 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
15227 last: Target->getTargetOpts().FeaturesAsWritten.end());
15228 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
15229 } else if (const auto *TC = FD->getAttr<TargetClonesAttr>()) {
15230 if (Target->getTriple().isAArch64()) {
15231 llvm::SmallVector<StringRef, 8> Feats;
15232 TC->getFeatures(Out&: Feats, Index: GD.getMultiVersionIndex());
15233 std::vector<std::string> Features = getFMVBackendFeaturesFor(FMVFeatStrings: Feats);
15234 Features.insert(position: Features.begin(),
15235 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
15236 last: Target->getTargetOpts().FeaturesAsWritten.end());
15237 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
15238 } else if (Target->getTriple().isRISCV()) {
15239 StringRef VersionStr = TC->getFeatureStr(Index: GD.getMultiVersionIndex());
15240 std::vector<std::string> Features;
15241 if (VersionStr != "default") {
15242 ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(Str: VersionStr);
15243 Features.insert(position: Features.begin(), first: ParsedAttr.Features.begin(),
15244 last: ParsedAttr.Features.end());
15245 }
15246 Features.insert(position: Features.begin(),
15247 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
15248 last: Target->getTargetOpts().FeaturesAsWritten.end());
15249 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
15250 } else {
15251 std::vector<std::string> Features;
15252 StringRef VersionStr = TC->getFeatureStr(Index: GD.getMultiVersionIndex());
15253 if (VersionStr.starts_with(Prefix: "arch="))
15254 TargetCPU = VersionStr.drop_front(N: sizeof("arch=") - 1);
15255 else if (VersionStr != "default")
15256 Features.push_back(x: (StringRef{"+"} + VersionStr).str());
15257 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
15258 }
15259 } else if (const auto *TV = FD->getAttr<TargetVersionAttr>()) {
15260 std::vector<std::string> Features;
15261 if (Target->getTriple().isRISCV()) {
15262 ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(Str: TV->getName());
15263 Features.insert(position: Features.begin(), first: ParsedAttr.Features.begin(),
15264 last: ParsedAttr.Features.end());
15265 } else {
15266 assert(Target->getTriple().isAArch64());
15267 llvm::SmallVector<StringRef, 8> Feats;
15268 TV->getFeatures(Out&: Feats);
15269 Features = getFMVBackendFeaturesFor(FMVFeatStrings: Feats);
15270 }
15271 Features.insert(position: Features.begin(),
15272 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
15273 last: Target->getTargetOpts().FeaturesAsWritten.end());
15274 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
15275 } else {
15276 FeatureMap = Target->getTargetOpts().FeatureMap;
15277 }
15278}
15279
15280static SYCLKernelInfo BuildSYCLKernelInfo(ASTContext &Context,
15281 CanQualType KernelNameType,
15282 const FunctionDecl *FD) {
15283 // Host and device compilation may use different ABIs and different ABIs
15284 // may allocate name mangling discriminators differently. A discriminator
15285 // override is used to ensure consistent discriminator allocation across
15286 // host and device compilation.
15287 auto DeviceDiscriminatorOverrider =
15288 [](ASTContext &Ctx, const NamedDecl *ND) -> UnsignedOrNone {
15289 if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: ND))
15290 if (RD->isLambda())
15291 return RD->getDeviceLambdaManglingNumber();
15292 return std::nullopt;
15293 };
15294 std::unique_ptr<MangleContext> MC{ItaniumMangleContext::create(
15295 Context, Diags&: Context.getDiagnostics(), Discriminator: DeviceDiscriminatorOverrider)};
15296
15297 // Construct a mangled name for the SYCL kernel caller offload entry point.
15298 // FIXME: The Itanium typeinfo mangling (_ZTS<type>) is currently used to
15299 // name the SYCL kernel caller offload entry point function. This mangling
15300 // does not suffice to clearly identify symbols that correspond to SYCL
15301 // kernel caller functions, nor is this mangling natural for targets that
15302 // use a non-Itanium ABI.
15303 std::string Buffer;
15304 Buffer.reserve(res_arg: 128);
15305 llvm::raw_string_ostream Out(Buffer);
15306 MC->mangleCanonicalTypeName(T: KernelNameType, Out);
15307 std::string KernelName = Out.str();
15308
15309 return {KernelNameType, FD, KernelName};
15310}
15311
15312void ASTContext::registerSYCLEntryPointFunction(FunctionDecl *FD) {
15313 // If the function declaration to register is invalid or dependent, the
15314 // registration attempt is ignored.
15315 if (FD->isInvalidDecl() || FD->isTemplated())
15316 return;
15317
15318 const auto *SKEPAttr = FD->getAttr<SYCLKernelEntryPointAttr>();
15319 assert(SKEPAttr && "Missing sycl_kernel_entry_point attribute");
15320
15321 // Be tolerant of multiple registration attempts so long as each attempt
15322 // is for the same entity. Callers are obligated to detect and diagnose
15323 // conflicting kernel names prior to calling this function.
15324 CanQualType KernelNameType = getCanonicalType(T: SKEPAttr->getKernelName());
15325 auto IT = SYCLKernels.find(Val: KernelNameType);
15326 assert((IT == SYCLKernels.end() ||
15327 declaresSameEntity(FD, IT->second.getKernelEntryPointDecl())) &&
15328 "SYCL kernel name conflict");
15329 (void)IT;
15330 SYCLKernels.insert(KV: std::make_pair(
15331 x&: KernelNameType, y: BuildSYCLKernelInfo(Context&: *this, KernelNameType, FD)));
15332}
15333
15334const SYCLKernelInfo &ASTContext::getSYCLKernelInfo(QualType T) const {
15335 CanQualType KernelNameType = getCanonicalType(T);
15336 return SYCLKernels.at(Val: KernelNameType);
15337}
15338
15339const SYCLKernelInfo *ASTContext::findSYCLKernelInfo(QualType T) const {
15340 CanQualType KernelNameType = getCanonicalType(T);
15341 auto IT = SYCLKernels.find(Val: KernelNameType);
15342 if (IT != SYCLKernels.end())
15343 return &IT->second;
15344 return nullptr;
15345}
15346
15347OMPTraitInfo &ASTContext::getNewOMPTraitInfo() {
15348 OMPTraitInfoVector.emplace_back(Args: new OMPTraitInfo());
15349 return *OMPTraitInfoVector.back();
15350}
15351
15352const StreamingDiagnostic &clang::
15353operator<<(const StreamingDiagnostic &DB,
15354 const ASTContext::SectionInfo &Section) {
15355 if (Section.Decl)
15356 return DB << Section.Decl;
15357 return DB << "a prior #pragma section";
15358}
15359
15360bool ASTContext::mayExternalize(const Decl *D) const {
15361 bool IsInternalVar =
15362 isa<VarDecl>(Val: D) &&
15363 basicGVALinkageForVariable(Context: *this, VD: cast<VarDecl>(Val: D)) == GVA_Internal;
15364 bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() &&
15365 !D->getAttr<CUDADeviceAttr>()->isImplicit()) ||
15366 (D->hasAttr<CUDAConstantAttr>() &&
15367 !D->getAttr<CUDAConstantAttr>()->isImplicit());
15368 // CUDA/HIP: managed variables need to be externalized since it is
15369 // a declaration in IR, therefore cannot have internal linkage. Kernels in
15370 // anonymous name space needs to be externalized to avoid duplicate symbols.
15371 return (IsInternalVar &&
15372 (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar)) ||
15373 (D->hasAttr<CUDAGlobalAttr>() &&
15374 basicGVALinkageForFunction(Context: *this, FD: cast<FunctionDecl>(Val: D)) ==
15375 GVA_Internal);
15376}
15377
15378bool ASTContext::shouldExternalize(const Decl *D) const {
15379 return mayExternalize(D) &&
15380 (D->hasAttr<HIPManagedAttr>() || D->hasAttr<CUDAGlobalAttr>() ||
15381 CUDADeviceVarODRUsedByHost.count(V: cast<VarDecl>(Val: D)));
15382}
15383
15384StringRef ASTContext::getCUIDHash() const {
15385 if (!CUIDHash.empty())
15386 return CUIDHash;
15387 if (LangOpts.CUID.empty())
15388 return StringRef();
15389 CUIDHash = llvm::utohexstr(X: llvm::MD5Hash(Str: LangOpts.CUID), /*LowerCase=*/true);
15390 return CUIDHash;
15391}
15392
15393const CXXRecordDecl *
15394ASTContext::baseForVTableAuthentication(const CXXRecordDecl *ThisClass) const {
15395 assert(ThisClass);
15396 assert(ThisClass->isPolymorphic());
15397 const CXXRecordDecl *PrimaryBase = ThisClass;
15398 while (1) {
15399 assert(PrimaryBase);
15400 assert(PrimaryBase->isPolymorphic());
15401 auto &Layout = getASTRecordLayout(D: PrimaryBase);
15402 auto Base = Layout.getPrimaryBase();
15403 if (!Base || Base == PrimaryBase || !Base->isPolymorphic())
15404 break;
15405 PrimaryBase = Base;
15406 }
15407 return PrimaryBase;
15408}
15409
15410bool ASTContext::useAbbreviatedThunkName(GlobalDecl VirtualMethodDecl,
15411 StringRef MangledName) {
15412 auto *Method = cast<CXXMethodDecl>(Val: VirtualMethodDecl.getDecl());
15413 assert(Method->isVirtual());
15414 bool DefaultIncludesPointerAuth =
15415 LangOpts.PointerAuthCalls || LangOpts.PointerAuthIntrinsics;
15416
15417 if (!DefaultIncludesPointerAuth)
15418 return true;
15419
15420 auto Existing = ThunksToBeAbbreviated.find(Val: VirtualMethodDecl);
15421 if (Existing != ThunksToBeAbbreviated.end())
15422 return Existing->second.contains(key: MangledName.str());
15423
15424 std::unique_ptr<MangleContext> Mangler(createMangleContext());
15425 llvm::StringMap<llvm::SmallVector<std::string, 2>> Thunks;
15426 auto VtableContext = getVTableContext();
15427 if (const auto *ThunkInfos = VtableContext->getThunkInfo(GD: VirtualMethodDecl)) {
15428 auto *Destructor = dyn_cast<CXXDestructorDecl>(Val: Method);
15429 for (const auto &Thunk : *ThunkInfos) {
15430 SmallString<256> ElidedName;
15431 llvm::raw_svector_ostream ElidedNameStream(ElidedName);
15432 if (Destructor)
15433 Mangler->mangleCXXDtorThunk(DD: Destructor, Type: VirtualMethodDecl.getDtorType(),
15434 Thunk, /* elideOverrideInfo */ ElideOverrideInfo: true,
15435 ElidedNameStream);
15436 else
15437 Mangler->mangleThunk(MD: Method, Thunk, /* elideOverrideInfo */ ElideOverrideInfo: true,
15438 ElidedNameStream);
15439 SmallString<256> MangledName;
15440 llvm::raw_svector_ostream mangledNameStream(MangledName);
15441 if (Destructor)
15442 Mangler->mangleCXXDtorThunk(DD: Destructor, Type: VirtualMethodDecl.getDtorType(),
15443 Thunk, /* elideOverrideInfo */ ElideOverrideInfo: false,
15444 mangledNameStream);
15445 else
15446 Mangler->mangleThunk(MD: Method, Thunk, /* elideOverrideInfo */ ElideOverrideInfo: false,
15447 mangledNameStream);
15448
15449 Thunks[ElidedName].push_back(Elt: std::string(MangledName));
15450 }
15451 }
15452 llvm::StringSet<> SimplifiedThunkNames;
15453 for (auto &ThunkList : Thunks) {
15454 llvm::sort(C&: ThunkList.second);
15455 SimplifiedThunkNames.insert(key: ThunkList.second[0]);
15456 }
15457 bool Result = SimplifiedThunkNames.contains(key: MangledName);
15458 ThunksToBeAbbreviated[VirtualMethodDecl] = std::move(SimplifiedThunkNames);
15459 return Result;
15460}
15461
15462bool ASTContext::arePFPFieldsTriviallyCopyable(const RecordDecl *RD) const {
15463 // Check for trivially-destructible here because non-trivially-destructible
15464 // types will always cause the type and any types derived from it to be
15465 // considered non-trivially-copyable. The same cannot be said for
15466 // trivially-copyable because deleting special members of a type derived from
15467 // a non-trivially-copyable type can cause the derived type to be considered
15468 // trivially copyable.
15469 if (getLangOpts().PointerFieldProtectionTagged)
15470 return !isa<CXXRecordDecl>(Val: RD) ||
15471 cast<CXXRecordDecl>(Val: RD)->hasTrivialDestructor();
15472 return true;
15473}
15474
15475static void findPFPFields(const ASTContext &Ctx, QualType Ty, CharUnits Offset,
15476 std::vector<PFPField> &Fields, bool IncludeVBases) {
15477 if (auto *AT = Ctx.getAsConstantArrayType(T: Ty)) {
15478 if (auto *ElemDecl = AT->getElementType()->getAsCXXRecordDecl()) {
15479 const ASTRecordLayout &ElemRL = Ctx.getASTRecordLayout(D: ElemDecl);
15480 for (unsigned i = 0; i != AT->getSize(); ++i)
15481 findPFPFields(Ctx, Ty: AT->getElementType(), Offset: Offset + i * ElemRL.getSize(),
15482 Fields, IncludeVBases: true);
15483 }
15484 }
15485 auto *Decl = Ty->getAsCXXRecordDecl();
15486 // isPFPType() is inherited from bases and members (including via arrays), so
15487 // we can early exit if it is false. Unions are excluded per the API
15488 // documentation.
15489 if (!Decl || !Decl->isPFPType() || Decl->isUnion())
15490 return;
15491 const ASTRecordLayout &RL = Ctx.getASTRecordLayout(D: Decl);
15492 for (FieldDecl *Field : Decl->fields()) {
15493 CharUnits FieldOffset =
15494 Offset +
15495 Ctx.toCharUnitsFromBits(BitSize: RL.getFieldOffset(FieldNo: Field->getFieldIndex()));
15496 if (Ctx.isPFPField(Field))
15497 Fields.push_back(x: {.Offset: FieldOffset, .Field: Field});
15498 findPFPFields(Ctx, Ty: Field->getType(), Offset: FieldOffset, Fields,
15499 /*IncludeVBases=*/true);
15500 }
15501 // Pass false for IncludeVBases below because vbases are only included in
15502 // layout for top-level types, i.e. not bases or vbases.
15503 for (CXXBaseSpecifier &Base : Decl->bases()) {
15504 if (Base.isVirtual())
15505 continue;
15506 CharUnits BaseOffset =
15507 Offset + RL.getBaseClassOffset(Base: Base.getType()->getAsCXXRecordDecl());
15508 findPFPFields(Ctx, Ty: Base.getType(), Offset: BaseOffset, Fields,
15509 /*IncludeVBases=*/false);
15510 }
15511 if (IncludeVBases) {
15512 for (CXXBaseSpecifier &Base : Decl->vbases()) {
15513 CharUnits BaseOffset =
15514 Offset + RL.getVBaseClassOffset(VBase: Base.getType()->getAsCXXRecordDecl());
15515 findPFPFields(Ctx, Ty: Base.getType(), Offset: BaseOffset, Fields,
15516 /*IncludeVBases=*/false);
15517 }
15518 }
15519}
15520
15521std::vector<PFPField> ASTContext::findPFPFields(QualType Ty) const {
15522 std::vector<PFPField> PFPFields;
15523 ::findPFPFields(Ctx: *this, Ty, Offset: CharUnits::Zero(), Fields&: PFPFields, IncludeVBases: true);
15524 return PFPFields;
15525}
15526
15527bool ASTContext::hasPFPFields(QualType Ty) const {
15528 return !findPFPFields(Ty).empty();
15529}
15530
15531bool ASTContext::isPFPField(const FieldDecl *FD) const {
15532 if (auto *RD = dyn_cast<CXXRecordDecl>(Val: FD->getParent()))
15533 return RD->isPFPType() && FD->getType()->isPointerType() &&
15534 !FD->hasAttr<NoFieldProtectionAttr>();
15535 return false;
15536}
15537
15538void ASTContext::recordMemberDataPointerEvaluation(const ValueDecl *VD) {
15539 auto *FD = dyn_cast<FieldDecl>(Val: VD);
15540 if (!FD)
15541 FD = cast<FieldDecl>(Val: cast<IndirectFieldDecl>(Val: VD)->chain().back());
15542 if (isPFPField(FD))
15543 PFPFieldsWithEvaluatedOffset.insert(X: FD);
15544}
15545
15546void ASTContext::recordOffsetOfEvaluation(const OffsetOfExpr *E) {
15547 if (E->getNumComponents() == 0)
15548 return;
15549 OffsetOfNode Comp = E->getComponent(Idx: E->getNumComponents() - 1);
15550 if (Comp.getKind() != OffsetOfNode::Field)
15551 return;
15552 if (FieldDecl *FD = Comp.getField(); isPFPField(FD))
15553 PFPFieldsWithEvaluatedOffset.insert(X: FD);
15554}
15555