1//===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the ASTContext interface.
10//
11//===----------------------------------------------------------------------===//
12
13#include "clang/AST/ASTContext.h"
14#include "ByteCode/Context.h"
15#include "CXXABI.h"
16#include "clang/AST/APValue.h"
17#include "clang/AST/ASTConcept.h"
18#include "clang/AST/ASTMutationListener.h"
19#include "clang/AST/ASTStructuralEquivalence.h"
20#include "clang/AST/ASTTypeTraits.h"
21#include "clang/AST/Attr.h"
22#include "clang/AST/AttrIterator.h"
23#include "clang/AST/CharUnits.h"
24#include "clang/AST/Comment.h"
25#include "clang/AST/Decl.h"
26#include "clang/AST/DeclBase.h"
27#include "clang/AST/DeclCXX.h"
28#include "clang/AST/DeclContextInternals.h"
29#include "clang/AST/DeclObjC.h"
30#include "clang/AST/DeclOpenMP.h"
31#include "clang/AST/DeclTemplate.h"
32#include "clang/AST/DeclarationName.h"
33#include "clang/AST/DependenceFlags.h"
34#include "clang/AST/Expr.h"
35#include "clang/AST/ExprCXX.h"
36#include "clang/AST/ExternalASTSource.h"
37#include "clang/AST/Mangle.h"
38#include "clang/AST/MangleNumberingContext.h"
39#include "clang/AST/NestedNameSpecifier.h"
40#include "clang/AST/ParentMapContext.h"
41#include "clang/AST/RawCommentList.h"
42#include "clang/AST/RecordLayout.h"
43#include "clang/AST/Stmt.h"
44#include "clang/AST/TemplateBase.h"
45#include "clang/AST/TemplateName.h"
46#include "clang/AST/Type.h"
47#include "clang/AST/TypeLoc.h"
48#include "clang/AST/UnresolvedSet.h"
49#include "clang/AST/VTableBuilder.h"
50#include "clang/Basic/AddressSpaces.h"
51#include "clang/Basic/Builtins.h"
52#include "clang/Basic/CommentOptions.h"
53#include "clang/Basic/DiagnosticAST.h"
54#include "clang/Basic/ExceptionSpecificationType.h"
55#include "clang/Basic/IdentifierTable.h"
56#include "clang/Basic/LLVM.h"
57#include "clang/Basic/LangOptions.h"
58#include "clang/Basic/Linkage.h"
59#include "clang/Basic/Module.h"
60#include "clang/Basic/NoSanitizeList.h"
61#include "clang/Basic/ObjCRuntime.h"
62#include "clang/Basic/ProfileList.h"
63#include "clang/Basic/SourceLocation.h"
64#include "clang/Basic/SourceManager.h"
65#include "clang/Basic/Specifiers.h"
66#include "clang/Basic/TargetCXXABI.h"
67#include "clang/Basic/TargetInfo.h"
68#include "clang/Basic/XRayLists.h"
69#include "llvm/ADT/APFixedPoint.h"
70#include "llvm/ADT/APInt.h"
71#include "llvm/ADT/APSInt.h"
72#include "llvm/ADT/ArrayRef.h"
73#include "llvm/ADT/DenseMap.h"
74#include "llvm/ADT/DenseSet.h"
75#include "llvm/ADT/FoldingSet.h"
76#include "llvm/ADT/PointerUnion.h"
77#include "llvm/ADT/STLExtras.h"
78#include "llvm/ADT/SmallPtrSet.h"
79#include "llvm/ADT/SmallVector.h"
80#include "llvm/ADT/StringExtras.h"
81#include "llvm/ADT/StringRef.h"
82#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
83#include "llvm/Support/Capacity.h"
84#include "llvm/Support/Casting.h"
85#include "llvm/Support/Compiler.h"
86#include "llvm/Support/ErrorHandling.h"
87#include "llvm/Support/MD5.h"
88#include "llvm/Support/MathExtras.h"
89#include "llvm/Support/SipHash.h"
90#include "llvm/Support/raw_ostream.h"
91#include "llvm/TargetParser/AArch64TargetParser.h"
92#include "llvm/TargetParser/Triple.h"
93#include <algorithm>
94#include <cassert>
95#include <cstddef>
96#include <cstdint>
97#include <cstdlib>
98#include <map>
99#include <memory>
100#include <optional>
101#include <string>
102#include <tuple>
103#include <utility>
104
105using namespace clang;
106
107enum FloatingRank {
108 BFloat16Rank,
109 Float16Rank,
110 HalfRank,
111 FloatRank,
112 DoubleRank,
113 LongDoubleRank,
114 Float128Rank,
115 Ibm128Rank
116};
117
118/// \returns The locations that are relevant when searching for Doc comments
119/// related to \p D.
120static SmallVector<SourceLocation, 2>
121getDeclLocsForCommentSearch(const Decl *D, SourceManager &SourceMgr) {
122 assert(D);
123
124 // User can not attach documentation to implicit declarations.
125 if (D->isImplicit())
126 return {};
127
128 // User can not attach documentation to implicit instantiations.
129 if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
130 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
131 return {};
132 }
133
134 if (const auto *VD = dyn_cast<VarDecl>(Val: D)) {
135 if (VD->isStaticDataMember() &&
136 VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
137 return {};
138 }
139
140 if (const auto *CRD = dyn_cast<CXXRecordDecl>(Val: D)) {
141 if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
142 return {};
143 }
144
145 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(Val: D)) {
146 TemplateSpecializationKind TSK = CTSD->getSpecializationKind();
147 if (TSK == TSK_ImplicitInstantiation ||
148 TSK == TSK_Undeclared)
149 return {};
150 }
151
152 if (const auto *ED = dyn_cast<EnumDecl>(Val: D)) {
153 if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
154 return {};
155 }
156 if (const auto *TD = dyn_cast<TagDecl>(Val: D)) {
157 // When tag declaration (but not definition!) is part of the
158 // decl-specifier-seq of some other declaration, it doesn't get comment
159 if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition())
160 return {};
161 }
162 // TODO: handle comments for function parameters properly.
163 if (isa<ParmVarDecl>(Val: D))
164 return {};
165
166 // TODO: we could look up template parameter documentation in the template
167 // documentation.
168 if (isa<TemplateTypeParmDecl>(Val: D) ||
169 isa<NonTypeTemplateParmDecl>(Val: D) ||
170 isa<TemplateTemplateParmDecl>(Val: D))
171 return {};
172
173 SmallVector<SourceLocation, 2> Locations;
174 // Find declaration location.
175 // For Objective-C declarations we generally don't expect to have multiple
176 // declarators, thus use declaration starting location as the "declaration
177 // location".
178 // For all other declarations multiple declarators are used quite frequently,
179 // so we use the location of the identifier as the "declaration location".
180 SourceLocation BaseLocation;
181 if (isa<ObjCMethodDecl>(Val: D) || isa<ObjCContainerDecl>(Val: D) ||
182 isa<ObjCPropertyDecl>(Val: D) || isa<RedeclarableTemplateDecl>(Val: D) ||
183 isa<ClassTemplateSpecializationDecl>(Val: D) ||
184 // Allow association with Y across {} in `typedef struct X {} Y`.
185 isa<TypedefDecl>(Val: D))
186 BaseLocation = D->getBeginLoc();
187 else
188 BaseLocation = D->getLocation();
189
190 if (!D->getLocation().isMacroID()) {
191 Locations.emplace_back(Args&: BaseLocation);
192 } else {
193 const auto *DeclCtx = D->getDeclContext();
194
195 // When encountering definitions generated from a macro (that are not
196 // contained by another declaration in the macro) we need to try and find
197 // the comment at the location of the expansion but if there is no comment
198 // there we should retry to see if there is a comment inside the macro as
199 // well. To this end we return first BaseLocation to first look at the
200 // expansion site, the second value is the spelling location of the
201 // beginning of the declaration defined inside the macro.
202 if (!(DeclCtx &&
203 Decl::castFromDeclContext(DeclCtx)->getLocation().isMacroID())) {
204 Locations.emplace_back(Args: SourceMgr.getExpansionLoc(Loc: BaseLocation));
205 }
206
207 // We use Decl::getBeginLoc() and not just BaseLocation here to ensure that
208 // we don't refer to the macro argument location at the expansion site (this
209 // can happen if the name's spelling is provided via macro argument), and
210 // always to the declaration itself.
211 Locations.emplace_back(Args: SourceMgr.getSpellingLoc(Loc: D->getBeginLoc()));
212 }
213
214 return Locations;
215}
216
217RawComment *ASTContext::getRawCommentForDeclNoCacheImpl(
218 const Decl *D, const SourceLocation RepresentativeLocForDecl,
219 const std::map<unsigned, RawComment *> &CommentsInTheFile) const {
220 // If the declaration doesn't map directly to a location in a file, we
221 // can't find the comment.
222 if (RepresentativeLocForDecl.isInvalid() ||
223 !RepresentativeLocForDecl.isFileID())
224 return nullptr;
225
226 // If there are no comments anywhere, we won't find anything.
227 if (CommentsInTheFile.empty())
228 return nullptr;
229
230 // Decompose the location for the declaration and find the beginning of the
231 // file buffer.
232 const FileIDAndOffset DeclLocDecomp =
233 SourceMgr.getDecomposedLoc(Loc: RepresentativeLocForDecl);
234
235 // Slow path.
236 auto OffsetCommentBehindDecl =
237 CommentsInTheFile.lower_bound(x: DeclLocDecomp.second);
238
239 // First check whether we have a trailing comment.
240 if (OffsetCommentBehindDecl != CommentsInTheFile.end()) {
241 RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second;
242 if ((CommentBehindDecl->isDocumentation() ||
243 LangOpts.CommentOpts.ParseAllComments) &&
244 CommentBehindDecl->isTrailingComment() &&
245 (isa<FieldDecl>(Val: D) || isa<EnumConstantDecl>(Val: D) || isa<VarDecl>(Val: D) ||
246 isa<ObjCMethodDecl>(Val: D) || isa<ObjCPropertyDecl>(Val: D))) {
247
248 // Check that Doxygen trailing comment comes after the declaration, starts
249 // on the same line and in the same file as the declaration.
250 if (SourceMgr.getLineNumber(FID: DeclLocDecomp.first, FilePos: DeclLocDecomp.second) ==
251 Comments.getCommentBeginLine(C: CommentBehindDecl, File: DeclLocDecomp.first,
252 Offset: OffsetCommentBehindDecl->first)) {
253 return CommentBehindDecl;
254 }
255 }
256 }
257
258 // The comment just after the declaration was not a trailing comment.
259 // Let's look at the previous comment.
260 if (OffsetCommentBehindDecl == CommentsInTheFile.begin())
261 return nullptr;
262
263 auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl;
264 RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second;
265
266 // Check that we actually have a non-member Doxygen comment.
267 if (!(CommentBeforeDecl->isDocumentation() ||
268 LangOpts.CommentOpts.ParseAllComments) ||
269 CommentBeforeDecl->isTrailingComment())
270 return nullptr;
271
272 // Decompose the end of the comment.
273 const unsigned CommentEndOffset =
274 Comments.getCommentEndOffset(C: CommentBeforeDecl);
275
276 // Get the corresponding buffer.
277 bool Invalid = false;
278 const char *Buffer = SourceMgr.getBufferData(FID: DeclLocDecomp.first,
279 Invalid: &Invalid).data();
280 if (Invalid)
281 return nullptr;
282
283 // Extract text between the comment and declaration.
284 StringRef Text(Buffer + CommentEndOffset,
285 DeclLocDecomp.second - CommentEndOffset);
286
287 // There should be no other declarations or preprocessor directives between
288 // comment and declaration.
289 if (Text.find_last_of(Chars: ";{}#@") != StringRef::npos)
290 return nullptr;
291
292 return CommentBeforeDecl;
293}
294
295RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
296 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr);
297
298 for (const auto DeclLoc : DeclLocs) {
299 // If the declaration doesn't map directly to a location in a file, we
300 // can't find the comment.
301 if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
302 continue;
303
304 if (ExternalSource && !CommentsLoaded) {
305 ExternalSource->ReadComments();
306 CommentsLoaded = true;
307 }
308
309 if (Comments.empty())
310 continue;
311
312 const FileID File = SourceMgr.getDecomposedLoc(Loc: DeclLoc).first;
313 if (!File.isValid())
314 continue;
315
316 const auto CommentsInThisFile = Comments.getCommentsInFile(File);
317 if (!CommentsInThisFile || CommentsInThisFile->empty())
318 continue;
319
320 if (RawComment *Comment =
321 getRawCommentForDeclNoCacheImpl(D, RepresentativeLocForDecl: DeclLoc, CommentsInTheFile: *CommentsInThisFile))
322 return Comment;
323 }
324
325 return nullptr;
326}
327
328void ASTContext::addComment(const RawComment &RC) {
329 assert(LangOpts.RetainCommentsFromSystemHeaders ||
330 !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin()));
331 Comments.addComment(RC, CommentOpts: LangOpts.CommentOpts, Allocator&: BumpAlloc);
332}
333
334const RawComment *ASTContext::getRawCommentForAnyRedecl(
335 const Decl *D,
336 const Decl **OriginalDecl) const {
337 if (!D) {
338 if (OriginalDecl)
339 OriginalDecl = nullptr;
340 return nullptr;
341 }
342
343 D = &adjustDeclToTemplate(D: *D);
344
345 // Any comment directly attached to D?
346 {
347 auto DeclComment = DeclRawComments.find(Val: D);
348 if (DeclComment != DeclRawComments.end()) {
349 if (OriginalDecl)
350 *OriginalDecl = D;
351 return DeclComment->second;
352 }
353 }
354
355 // Any comment attached to any redeclaration of D?
356 const Decl *CanonicalD = D->getCanonicalDecl();
357 if (!CanonicalD)
358 return nullptr;
359
360 {
361 auto RedeclComment = RedeclChainComments.find(Val: CanonicalD);
362 if (RedeclComment != RedeclChainComments.end()) {
363 if (OriginalDecl)
364 *OriginalDecl = RedeclComment->second;
365 auto CommentAtRedecl = DeclRawComments.find(Val: RedeclComment->second);
366 assert(CommentAtRedecl != DeclRawComments.end() &&
367 "This decl is supposed to have comment attached.");
368 return CommentAtRedecl->second;
369 }
370 }
371
372 // Any redeclarations of D that we haven't checked for comments yet?
373 const Decl *LastCheckedRedecl = [&]() {
374 const Decl *LastChecked = CommentlessRedeclChains.lookup(Val: CanonicalD);
375 bool CanUseCommentlessCache = false;
376 if (LastChecked) {
377 for (auto *Redecl : CanonicalD->redecls()) {
378 if (Redecl == D) {
379 CanUseCommentlessCache = true;
380 break;
381 }
382 if (Redecl == LastChecked)
383 break;
384 }
385 }
386 // FIXME: This could be improved so that even if CanUseCommentlessCache
387 // is false, once we've traversed past CanonicalD we still skip ahead
388 // LastChecked.
389 return CanUseCommentlessCache ? LastChecked : nullptr;
390 }();
391
392 for (const Decl *Redecl : D->redecls()) {
393 assert(Redecl);
394 // Skip all redeclarations that have been checked previously.
395 if (LastCheckedRedecl) {
396 if (LastCheckedRedecl == Redecl) {
397 LastCheckedRedecl = nullptr;
398 }
399 continue;
400 }
401 const RawComment *RedeclComment = getRawCommentForDeclNoCache(D: Redecl);
402 if (RedeclComment) {
403 cacheRawCommentForDecl(OriginalD: *Redecl, Comment: *RedeclComment);
404 if (OriginalDecl)
405 *OriginalDecl = Redecl;
406 return RedeclComment;
407 }
408 CommentlessRedeclChains[CanonicalD] = Redecl;
409 }
410
411 if (OriginalDecl)
412 *OriginalDecl = nullptr;
413 return nullptr;
414}
415
416void ASTContext::cacheRawCommentForDecl(const Decl &OriginalD,
417 const RawComment &Comment) const {
418 assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments);
419 DeclRawComments.try_emplace(Key: &OriginalD, Args: &Comment);
420 const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl();
421 RedeclChainComments.try_emplace(Key: CanonicalDecl, Args: &OriginalD);
422 CommentlessRedeclChains.erase(Val: CanonicalDecl);
423}
424
425static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod,
426 SmallVectorImpl<const NamedDecl *> &Redeclared) {
427 const DeclContext *DC = ObjCMethod->getDeclContext();
428 if (const auto *IMD = dyn_cast<ObjCImplDecl>(Val: DC)) {
429 const ObjCInterfaceDecl *ID = IMD->getClassInterface();
430 if (!ID)
431 return;
432 // Add redeclared method here.
433 for (const auto *Ext : ID->known_extensions()) {
434 if (ObjCMethodDecl *RedeclaredMethod =
435 Ext->getMethod(Sel: ObjCMethod->getSelector(),
436 isInstance: ObjCMethod->isInstanceMethod()))
437 Redeclared.push_back(Elt: RedeclaredMethod);
438 }
439 }
440}
441
442void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls,
443 const Preprocessor *PP) {
444 if (Comments.empty() || Decls.empty())
445 return;
446
447 FileID File;
448 for (const Decl *D : Decls) {
449 if (D->isInvalidDecl())
450 continue;
451
452 D = &adjustDeclToTemplate(D: *D);
453 SourceLocation Loc = D->getLocation();
454 if (Loc.isValid()) {
455 // See if there are any new comments that are not attached to a decl.
456 // The location doesn't have to be precise - we care only about the file.
457 File = SourceMgr.getDecomposedLoc(Loc).first;
458 break;
459 }
460 }
461
462 if (File.isInvalid())
463 return;
464
465 auto CommentsInThisFile = Comments.getCommentsInFile(File);
466 if (!CommentsInThisFile || CommentsInThisFile->empty() ||
467 CommentsInThisFile->rbegin()->second->isAttached())
468 return;
469
470 // There is at least one comment not attached to a decl.
471 // Maybe it should be attached to one of Decls?
472 //
473 // Note that this way we pick up not only comments that precede the
474 // declaration, but also comments that *follow* the declaration -- thanks to
475 // the lookahead in the lexer: we've consumed the semicolon and looked
476 // ahead through comments.
477 for (const Decl *D : Decls) {
478 assert(D);
479 if (D->isInvalidDecl())
480 continue;
481
482 D = &adjustDeclToTemplate(D: *D);
483
484 if (DeclRawComments.count(Val: D) > 0)
485 continue;
486
487 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr);
488
489 for (const auto DeclLoc : DeclLocs) {
490 if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
491 continue;
492
493 if (RawComment *const DocComment = getRawCommentForDeclNoCacheImpl(
494 D, RepresentativeLocForDecl: DeclLoc, CommentsInTheFile: *CommentsInThisFile)) {
495 cacheRawCommentForDecl(OriginalD: *D, Comment: *DocComment);
496 comments::FullComment *FC = DocComment->parse(Context: *this, PP, D);
497 ParsedComments[D->getCanonicalDecl()] = FC;
498 break;
499 }
500 }
501 }
502}
503
504comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC,
505 const Decl *D) const {
506 auto *ThisDeclInfo = new (*this) comments::DeclInfo;
507 ThisDeclInfo->CommentDecl = D;
508 ThisDeclInfo->IsFilled = false;
509 ThisDeclInfo->fill();
510 ThisDeclInfo->CommentDecl = FC->getDecl();
511 if (!ThisDeclInfo->TemplateParameters)
512 ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters;
513 comments::FullComment *CFC =
514 new (*this) comments::FullComment(FC->getBlocks(),
515 ThisDeclInfo);
516 return CFC;
517}
518
519comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const {
520 const RawComment *RC = getRawCommentForDeclNoCache(D);
521 return RC ? RC->parse(Context: *this, PP: nullptr, D) : nullptr;
522}
523
524comments::FullComment *ASTContext::getCommentForDecl(
525 const Decl *D,
526 const Preprocessor *PP) const {
527 if (!D || D->isInvalidDecl())
528 return nullptr;
529 D = &adjustDeclToTemplate(D: *D);
530
531 const Decl *Canonical = D->getCanonicalDecl();
532 llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos =
533 ParsedComments.find(Val: Canonical);
534
535 if (Pos != ParsedComments.end()) {
536 if (Canonical != D) {
537 comments::FullComment *FC = Pos->second;
538 comments::FullComment *CFC = cloneFullComment(FC, D);
539 return CFC;
540 }
541 return Pos->second;
542 }
543
544 const Decl *OriginalDecl = nullptr;
545
546 const RawComment *RC = getRawCommentForAnyRedecl(D, OriginalDecl: &OriginalDecl);
547 if (!RC) {
548 if (isa<ObjCMethodDecl>(Val: D) || isa<FunctionDecl>(Val: D)) {
549 SmallVector<const NamedDecl*, 8> Overridden;
550 const auto *OMD = dyn_cast<ObjCMethodDecl>(Val: D);
551 if (OMD && OMD->isPropertyAccessor())
552 if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl())
553 if (comments::FullComment *FC = getCommentForDecl(D: PDecl, PP))
554 return cloneFullComment(FC, D);
555 if (OMD)
556 addRedeclaredMethods(ObjCMethod: OMD, Redeclared&: Overridden);
557 getOverriddenMethods(Method: dyn_cast<NamedDecl>(Val: D), Overridden);
558 for (unsigned i = 0, e = Overridden.size(); i < e; i++)
559 if (comments::FullComment *FC = getCommentForDecl(D: Overridden[i], PP))
560 return cloneFullComment(FC, D);
561 }
562 else if (const auto *TD = dyn_cast<TypedefNameDecl>(Val: D)) {
563 // Attach any tag type's documentation to its typedef if latter
564 // does not have one of its own.
565 QualType QT = TD->getUnderlyingType();
566 if (const auto *TT = QT->getAs<TagType>())
567 if (comments::FullComment *FC = getCommentForDecl(D: TT->getDecl(), PP))
568 return cloneFullComment(FC, D);
569 }
570 else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(Val: D)) {
571 while (IC->getSuperClass()) {
572 IC = IC->getSuperClass();
573 if (comments::FullComment *FC = getCommentForDecl(D: IC, PP))
574 return cloneFullComment(FC, D);
575 }
576 }
577 else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(Val: D)) {
578 if (const ObjCInterfaceDecl *IC = CD->getClassInterface())
579 if (comments::FullComment *FC = getCommentForDecl(D: IC, PP))
580 return cloneFullComment(FC, D);
581 }
582 else if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: D)) {
583 if (!(RD = RD->getDefinition()))
584 return nullptr;
585 // Check non-virtual bases.
586 for (const auto &I : RD->bases()) {
587 if (I.isVirtual() || (I.getAccessSpecifier() != AS_public))
588 continue;
589 QualType Ty = I.getType();
590 if (Ty.isNull())
591 continue;
592 if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) {
593 if (!(NonVirtualBase= NonVirtualBase->getDefinition()))
594 continue;
595
596 if (comments::FullComment *FC = getCommentForDecl(D: (NonVirtualBase), PP))
597 return cloneFullComment(FC, D);
598 }
599 }
600 // Check virtual bases.
601 for (const auto &I : RD->vbases()) {
602 if (I.getAccessSpecifier() != AS_public)
603 continue;
604 QualType Ty = I.getType();
605 if (Ty.isNull())
606 continue;
607 if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) {
608 if (!(VirtualBase= VirtualBase->getDefinition()))
609 continue;
610 if (comments::FullComment *FC = getCommentForDecl(D: (VirtualBase), PP))
611 return cloneFullComment(FC, D);
612 }
613 }
614 }
615 return nullptr;
616 }
617
618 // If the RawComment was attached to other redeclaration of this Decl, we
619 // should parse the comment in context of that other Decl. This is important
620 // because comments can contain references to parameter names which can be
621 // different across redeclarations.
622 if (D != OriginalDecl && OriginalDecl)
623 return getCommentForDecl(D: OriginalDecl, PP);
624
625 comments::FullComment *FC = RC->parse(Context: *this, PP, D);
626 ParsedComments[Canonical] = FC;
627 return FC;
628}
629
630void ASTContext::CanonicalTemplateTemplateParm::Profile(
631 llvm::FoldingSetNodeID &ID, const ASTContext &C,
632 TemplateTemplateParmDecl *Parm) {
633 ID.AddInteger(I: Parm->getDepth());
634 ID.AddInteger(I: Parm->getPosition());
635 ID.AddBoolean(B: Parm->isParameterPack());
636 ID.AddInteger(I: Parm->templateParameterKind());
637
638 TemplateParameterList *Params = Parm->getTemplateParameters();
639 ID.AddInteger(I: Params->size());
640 for (TemplateParameterList::const_iterator P = Params->begin(),
641 PEnd = Params->end();
642 P != PEnd; ++P) {
643 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Val: *P)) {
644 ID.AddInteger(I: 0);
645 ID.AddBoolean(B: TTP->isParameterPack());
646 ID.AddInteger(
647 I: TTP->getNumExpansionParameters().toInternalRepresentation());
648 continue;
649 }
650
651 if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Val: *P)) {
652 ID.AddInteger(I: 1);
653 ID.AddBoolean(B: NTTP->isParameterPack());
654 ID.AddPointer(Ptr: C.getUnconstrainedType(T: C.getCanonicalType(T: NTTP->getType()))
655 .getAsOpaquePtr());
656 if (NTTP->isExpandedParameterPack()) {
657 ID.AddBoolean(B: true);
658 ID.AddInteger(I: NTTP->getNumExpansionTypes());
659 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
660 QualType T = NTTP->getExpansionType(I);
661 ID.AddPointer(Ptr: T.getCanonicalType().getAsOpaquePtr());
662 }
663 } else
664 ID.AddBoolean(B: false);
665 continue;
666 }
667
668 auto *TTP = cast<TemplateTemplateParmDecl>(Val: *P);
669 ID.AddInteger(I: 2);
670 Profile(ID, C, Parm: TTP);
671 }
672}
673
674TemplateTemplateParmDecl *
675ASTContext::getCanonicalTemplateTemplateParmDecl(
676 TemplateTemplateParmDecl *TTP) const {
677 // Check if we already have a canonical template template parameter.
678 llvm::FoldingSetNodeID ID;
679 CanonicalTemplateTemplateParm::Profile(ID, C: *this, Parm: TTP);
680 void *InsertPos = nullptr;
681 CanonicalTemplateTemplateParm *Canonical
682 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
683 if (Canonical)
684 return Canonical->getParam();
685
686 // Build a canonical template parameter list.
687 TemplateParameterList *Params = TTP->getTemplateParameters();
688 SmallVector<NamedDecl *, 4> CanonParams;
689 CanonParams.reserve(N: Params->size());
690 for (TemplateParameterList::const_iterator P = Params->begin(),
691 PEnd = Params->end();
692 P != PEnd; ++P) {
693 // Note that, per C++20 [temp.over.link]/6, when determining whether
694 // template-parameters are equivalent, constraints are ignored.
695 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Val: *P)) {
696 TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create(
697 C: *this, DC: getTranslationUnitDecl(), KeyLoc: SourceLocation(), NameLoc: SourceLocation(),
698 D: TTP->getDepth(), P: TTP->getIndex(), Id: nullptr, Typename: false,
699 ParameterPack: TTP->isParameterPack(), /*HasTypeConstraint=*/false,
700 NumExpanded: TTP->getNumExpansionParameters());
701 CanonParams.push_back(Elt: NewTTP);
702 } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Val: *P)) {
703 QualType T = getUnconstrainedType(T: getCanonicalType(T: NTTP->getType()));
704 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T);
705 NonTypeTemplateParmDecl *Param;
706 if (NTTP->isExpandedParameterPack()) {
707 SmallVector<QualType, 2> ExpandedTypes;
708 SmallVector<TypeSourceInfo *, 2> ExpandedTInfos;
709 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
710 ExpandedTypes.push_back(Elt: getCanonicalType(T: NTTP->getExpansionType(I)));
711 ExpandedTInfos.push_back(
712 Elt: getTrivialTypeSourceInfo(T: ExpandedTypes.back()));
713 }
714
715 Param = NonTypeTemplateParmDecl::Create(C: *this, DC: getTranslationUnitDecl(),
716 StartLoc: SourceLocation(),
717 IdLoc: SourceLocation(),
718 D: NTTP->getDepth(),
719 P: NTTP->getPosition(), Id: nullptr,
720 T,
721 TInfo,
722 ExpandedTypes,
723 ExpandedTInfos);
724 } else {
725 Param = NonTypeTemplateParmDecl::Create(C: *this, DC: getTranslationUnitDecl(),
726 StartLoc: SourceLocation(),
727 IdLoc: SourceLocation(),
728 D: NTTP->getDepth(),
729 P: NTTP->getPosition(), Id: nullptr,
730 T,
731 ParameterPack: NTTP->isParameterPack(),
732 TInfo);
733 }
734 CanonParams.push_back(Elt: Param);
735 } else
736 CanonParams.push_back(Elt: getCanonicalTemplateTemplateParmDecl(
737 TTP: cast<TemplateTemplateParmDecl>(Val: *P)));
738 }
739
740 TemplateTemplateParmDecl *CanonTTP = TemplateTemplateParmDecl::Create(
741 C: *this, DC: getTranslationUnitDecl(), L: SourceLocation(), D: TTP->getDepth(),
742 P: TTP->getPosition(), ParameterPack: TTP->isParameterPack(), Id: nullptr,
743 ParameterKind: TTP->templateParameterKind(),
744 /*Typename=*/false,
745 Params: TemplateParameterList::Create(C: *this, TemplateLoc: SourceLocation(), LAngleLoc: SourceLocation(),
746 Params: CanonParams, RAngleLoc: SourceLocation(),
747 /*RequiresClause=*/nullptr));
748
749 // Get the new insert position for the node we care about.
750 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
751 assert(!Canonical && "Shouldn't be in the map!");
752 (void)Canonical;
753
754 // Create the canonical template template parameter entry.
755 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP);
756 CanonTemplateTemplateParms.InsertNode(N: Canonical, InsertPos);
757 return CanonTTP;
758}
759
760TemplateTemplateParmDecl *
761ASTContext::findCanonicalTemplateTemplateParmDeclInternal(
762 TemplateTemplateParmDecl *TTP) const {
763 llvm::FoldingSetNodeID ID;
764 CanonicalTemplateTemplateParm::Profile(ID, C: *this, Parm: TTP);
765 void *InsertPos = nullptr;
766 CanonicalTemplateTemplateParm *Canonical =
767 CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
768 return Canonical ? Canonical->getParam() : nullptr;
769}
770
771TemplateTemplateParmDecl *
772ASTContext::insertCanonicalTemplateTemplateParmDeclInternal(
773 TemplateTemplateParmDecl *CanonTTP) const {
774 llvm::FoldingSetNodeID ID;
775 CanonicalTemplateTemplateParm::Profile(ID, C: *this, Parm: CanonTTP);
776 void *InsertPos = nullptr;
777 if (auto *Existing =
778 CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos))
779 return Existing->getParam();
780 CanonTemplateTemplateParms.InsertNode(
781 N: new (*this) CanonicalTemplateTemplateParm(CanonTTP), InsertPos);
782 return CanonTTP;
783}
784
785/// For the purposes of overflow pattern exclusion, does this match the
786/// while(i--) pattern?
787static bool matchesPostDecrInWhile(const UnaryOperator *UO, ASTContext &Ctx) {
788 if (UO->getOpcode() != UO_PostDec)
789 return false;
790
791 if (!UO->getType()->isUnsignedIntegerType())
792 return false;
793
794 // -fsanitize-undefined-ignore-overflow-pattern=unsigned-post-decr-while
795 if (!Ctx.getLangOpts().isOverflowPatternExcluded(
796 Kind: LangOptions::OverflowPatternExclusionKind::PostDecrInWhile))
797 return false;
798
799 // all Parents (usually just one) must be a WhileStmt
800 return llvm::all_of(
801 Range: Ctx.getParentMapContext().getParents(Node: *UO),
802 P: [](const DynTypedNode &P) { return P.get<WhileStmt>() != nullptr; });
803}
804
805bool ASTContext::isUnaryOverflowPatternExcluded(const UnaryOperator *UO) {
806 // -fsanitize-undefined-ignore-overflow-pattern=negated-unsigned-const
807 // ... like -1UL;
808 if (UO->getOpcode() == UO_Minus &&
809 getLangOpts().isOverflowPatternExcluded(
810 Kind: LangOptions::OverflowPatternExclusionKind::NegUnsignedConst) &&
811 UO->isIntegerConstantExpr(Ctx: *this)) {
812 return true;
813 }
814
815 if (matchesPostDecrInWhile(UO, Ctx&: *this))
816 return true;
817
818 return false;
819}
820
821/// Check if a type can have its sanitizer instrumentation elided based on its
822/// presence within an ignorelist.
823bool ASTContext::isTypeIgnoredBySanitizer(const SanitizerMask &Mask,
824 const QualType &Ty) const {
825 std::string TyName = Ty.getUnqualifiedType().getAsString(Policy: getPrintingPolicy());
826 return NoSanitizeL->containsType(Mask, MangledTypeName: TyName);
827}
828
829TargetCXXABI::Kind ASTContext::getCXXABIKind() const {
830 auto Kind = getTargetInfo().getCXXABI().getKind();
831 return getLangOpts().CXXABI.value_or(u&: Kind);
832}
833
834CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
835 if (!LangOpts.CPlusPlus) return nullptr;
836
837 switch (getCXXABIKind()) {
838 case TargetCXXABI::AppleARM64:
839 case TargetCXXABI::Fuchsia:
840 case TargetCXXABI::GenericARM: // Same as Itanium at this level
841 case TargetCXXABI::iOS:
842 case TargetCXXABI::WatchOS:
843 case TargetCXXABI::GenericAArch64:
844 case TargetCXXABI::GenericMIPS:
845 case TargetCXXABI::GenericItanium:
846 case TargetCXXABI::WebAssembly:
847 case TargetCXXABI::XL:
848 return CreateItaniumCXXABI(Ctx&: *this);
849 case TargetCXXABI::Microsoft:
850 return CreateMicrosoftCXXABI(Ctx&: *this);
851 }
852 llvm_unreachable("Invalid CXXABI type!");
853}
854
855interp::Context &ASTContext::getInterpContext() const {
856 if (!InterpContext) {
857 InterpContext.reset(p: new interp::Context(const_cast<ASTContext &>(*this)));
858 }
859 return *InterpContext;
860}
861
862ParentMapContext &ASTContext::getParentMapContext() {
863 if (!ParentMapCtx)
864 ParentMapCtx.reset(p: new ParentMapContext(*this));
865 return *ParentMapCtx;
866}
867
868static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI,
869 const LangOptions &LangOpts) {
870 switch (LangOpts.getAddressSpaceMapMangling()) {
871 case LangOptions::ASMM_Target:
872 return TI.useAddressSpaceMapMangling();
873 case LangOptions::ASMM_On:
874 return true;
875 case LangOptions::ASMM_Off:
876 return false;
877 }
878 llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything.");
879}
880
881ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM,
882 IdentifierTable &idents, SelectorTable &sels,
883 Builtin::Context &builtins, TranslationUnitKind TUKind)
884 : ConstantArrayTypes(this_(), ConstantArrayTypesLog2InitSize),
885 DependentSizedArrayTypes(this_()), DependentSizedExtVectorTypes(this_()),
886 DependentAddressSpaceTypes(this_()), DependentVectorTypes(this_()),
887 DependentSizedMatrixTypes(this_()),
888 FunctionProtoTypes(this_(), FunctionProtoTypesLog2InitSize),
889 DependentTypeOfExprTypes(this_()), DependentDecltypeTypes(this_()),
890 DependentPackIndexingTypes(this_()), TemplateSpecializationTypes(this_()),
891 DependentBitIntTypes(this_()), SubstTemplateTemplateParmPacks(this_()),
892 DeducedTemplates(this_()), ArrayParameterTypes(this_()),
893 CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts),
894 NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)),
895 XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles,
896 LangOpts.XRayNeverInstrumentFiles,
897 LangOpts.XRayAttrListFiles, SM)),
898 ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)),
899 PrintingPolicy(LOpts), Idents(idents), Selectors(sels),
900 BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this),
901 Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts),
902 CompCategories(this_()), LastSDM(nullptr, 0) {
903 addTranslationUnitDecl();
904}
905
906void ASTContext::cleanup() {
907 // Release the DenseMaps associated with DeclContext objects.
908 // FIXME: Is this the ideal solution?
909 ReleaseDeclContextMaps();
910
911 // Call all of the deallocation functions on all of their targets.
912 for (auto &Pair : Deallocations)
913 (Pair.first)(Pair.second);
914 Deallocations.clear();
915
916 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
917 // because they can contain DenseMaps.
918 for (llvm::DenseMap<const ObjCInterfaceDecl *,
919 const ASTRecordLayout *>::iterator
920 I = ObjCLayouts.begin(),
921 E = ObjCLayouts.end();
922 I != E;)
923 // Increment in loop to prevent using deallocated memory.
924 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
925 R->Destroy(Ctx&: *this);
926 ObjCLayouts.clear();
927
928 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
929 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
930 // Increment in loop to prevent using deallocated memory.
931 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
932 R->Destroy(Ctx&: *this);
933 }
934 ASTRecordLayouts.clear();
935
936 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(),
937 AEnd = DeclAttrs.end();
938 A != AEnd; ++A)
939 A->second->~AttrVec();
940 DeclAttrs.clear();
941
942 for (const auto &Value : ModuleInitializers)
943 Value.second->~PerModuleInitializers();
944 ModuleInitializers.clear();
945
946 XRayFilter.reset();
947 NoSanitizeL.reset();
948}
949
950ASTContext::~ASTContext() { cleanup(); }
951
952void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) {
953 TraversalScope = TopLevelDecls;
954 getParentMapContext().clear();
955}
956
957void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const {
958 Deallocations.push_back(Elt: {Callback, Data});
959}
960
961void
962ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) {
963 ExternalSource = std::move(Source);
964}
965
966void ASTContext::PrintStats() const {
967 llvm::errs() << "\n*** AST Context Stats:\n";
968 llvm::errs() << " " << Types.size() << " types total.\n";
969
970 unsigned counts[] = {
971#define TYPE(Name, Parent) 0,
972#define ABSTRACT_TYPE(Name, Parent)
973#include "clang/AST/TypeNodes.inc"
974 0 // Extra
975 };
976
977 for (unsigned i = 0, e = Types.size(); i != e; ++i) {
978 Type *T = Types[i];
979 counts[(unsigned)T->getTypeClass()]++;
980 }
981
982 unsigned Idx = 0;
983 unsigned TotalBytes = 0;
984#define TYPE(Name, Parent) \
985 if (counts[Idx]) \
986 llvm::errs() << " " << counts[Idx] << " " << #Name \
987 << " types, " << sizeof(Name##Type) << " each " \
988 << "(" << counts[Idx] * sizeof(Name##Type) \
989 << " bytes)\n"; \
990 TotalBytes += counts[Idx] * sizeof(Name##Type); \
991 ++Idx;
992#define ABSTRACT_TYPE(Name, Parent)
993#include "clang/AST/TypeNodes.inc"
994
995 llvm::errs() << "Total bytes = " << TotalBytes << "\n";
996
997 // Implicit special member functions.
998 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/"
999 << NumImplicitDefaultConstructors
1000 << " implicit default constructors created\n";
1001 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/"
1002 << NumImplicitCopyConstructors
1003 << " implicit copy constructors created\n";
1004 if (getLangOpts().CPlusPlus)
1005 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/"
1006 << NumImplicitMoveConstructors
1007 << " implicit move constructors created\n";
1008 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/"
1009 << NumImplicitCopyAssignmentOperators
1010 << " implicit copy assignment operators created\n";
1011 if (getLangOpts().CPlusPlus)
1012 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/"
1013 << NumImplicitMoveAssignmentOperators
1014 << " implicit move assignment operators created\n";
1015 llvm::errs() << NumImplicitDestructorsDeclared << "/"
1016 << NumImplicitDestructors
1017 << " implicit destructors created\n";
1018
1019 if (ExternalSource) {
1020 llvm::errs() << "\n";
1021 ExternalSource->PrintStats();
1022 }
1023
1024 BumpAlloc.PrintStats();
1025}
1026
1027void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M,
1028 bool NotifyListeners) {
1029 if (NotifyListeners)
1030 if (auto *Listener = getASTMutationListener();
1031 Listener && !ND->isUnconditionallyVisible())
1032 Listener->RedefinedHiddenDefinition(D: ND, M);
1033
1034 MergedDefModules[cast<NamedDecl>(Val: ND->getCanonicalDecl())].push_back(NewVal: M);
1035}
1036
1037void ASTContext::deduplicateMergedDefinitionsFor(NamedDecl *ND) {
1038 auto It = MergedDefModules.find(Val: cast<NamedDecl>(Val: ND->getCanonicalDecl()));
1039 if (It == MergedDefModules.end())
1040 return;
1041
1042 auto &Merged = It->second;
1043 llvm::DenseSet<Module*> Found;
1044 for (Module *&M : Merged)
1045 if (!Found.insert(V: M).second)
1046 M = nullptr;
1047 llvm::erase(C&: Merged, V: nullptr);
1048}
1049
1050ArrayRef<Module *>
1051ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) {
1052 auto MergedIt =
1053 MergedDefModules.find(Val: cast<NamedDecl>(Val: Def->getCanonicalDecl()));
1054 if (MergedIt == MergedDefModules.end())
1055 return {};
1056 return MergedIt->second;
1057}
1058
1059void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) {
1060 if (LazyInitializers.empty())
1061 return;
1062
1063 auto *Source = Ctx.getExternalSource();
1064 assert(Source && "lazy initializers but no external source");
1065
1066 auto LazyInits = std::move(LazyInitializers);
1067 LazyInitializers.clear();
1068
1069 for (auto ID : LazyInits)
1070 Initializers.push_back(Elt: Source->GetExternalDecl(ID));
1071
1072 assert(LazyInitializers.empty() &&
1073 "GetExternalDecl for lazy module initializer added more inits");
1074}
1075
1076void ASTContext::addModuleInitializer(Module *M, Decl *D) {
1077 // One special case: if we add a module initializer that imports another
1078 // module, and that module's only initializer is an ImportDecl, simplify.
1079 if (const auto *ID = dyn_cast<ImportDecl>(Val: D)) {
1080 auto It = ModuleInitializers.find(Val: ID->getImportedModule());
1081
1082 // Maybe the ImportDecl does nothing at all. (Common case.)
1083 if (It == ModuleInitializers.end())
1084 return;
1085
1086 // Maybe the ImportDecl only imports another ImportDecl.
1087 auto &Imported = *It->second;
1088 if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) {
1089 Imported.resolve(Ctx&: *this);
1090 auto *OnlyDecl = Imported.Initializers.front();
1091 if (isa<ImportDecl>(Val: OnlyDecl))
1092 D = OnlyDecl;
1093 }
1094 }
1095
1096 auto *&Inits = ModuleInitializers[M];
1097 if (!Inits)
1098 Inits = new (*this) PerModuleInitializers;
1099 Inits->Initializers.push_back(Elt: D);
1100}
1101
1102void ASTContext::addLazyModuleInitializers(Module *M,
1103 ArrayRef<GlobalDeclID> IDs) {
1104 auto *&Inits = ModuleInitializers[M];
1105 if (!Inits)
1106 Inits = new (*this) PerModuleInitializers;
1107 Inits->LazyInitializers.insert(I: Inits->LazyInitializers.end(),
1108 From: IDs.begin(), To: IDs.end());
1109}
1110
1111ArrayRef<Decl *> ASTContext::getModuleInitializers(Module *M) {
1112 auto It = ModuleInitializers.find(Val: M);
1113 if (It == ModuleInitializers.end())
1114 return {};
1115
1116 auto *Inits = It->second;
1117 Inits->resolve(Ctx&: *this);
1118 return Inits->Initializers;
1119}
1120
1121void ASTContext::setCurrentNamedModule(Module *M) {
1122 assert(M->isNamedModule());
1123 assert(!CurrentCXXNamedModule &&
1124 "We should set named module for ASTContext for only once");
1125 CurrentCXXNamedModule = M;
1126}
1127
1128bool ASTContext::isInSameModule(const Module *M1, const Module *M2) const {
1129 if (!M1 != !M2)
1130 return false;
1131
1132 /// Get the representative module for M. The representative module is the
1133 /// first module unit for a specific primary module name. So that the module
1134 /// units have the same representative module belongs to the same module.
1135 ///
1136 /// The process is helpful to reduce the expensive string operations.
1137 auto GetRepresentativeModule = [this](const Module *M) {
1138 auto Iter = SameModuleLookupSet.find(Val: M);
1139 if (Iter != SameModuleLookupSet.end())
1140 return Iter->second;
1141
1142 const Module *RepresentativeModule =
1143 PrimaryModuleNameMap.try_emplace(Key: M->getPrimaryModuleInterfaceName(), Args&: M)
1144 .first->second;
1145 SameModuleLookupSet[M] = RepresentativeModule;
1146 return RepresentativeModule;
1147 };
1148
1149 assert(M1 && "Shouldn't call `isInSameModule` if both M1 and M2 are none.");
1150 return GetRepresentativeModule(M1) == GetRepresentativeModule(M2);
1151}
1152
1153ExternCContextDecl *ASTContext::getExternCContextDecl() const {
1154 if (!ExternCContext)
1155 ExternCContext = ExternCContextDecl::Create(C: *this, TU: getTranslationUnitDecl());
1156
1157 return ExternCContext;
1158}
1159
1160BuiltinTemplateDecl *
1161ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK,
1162 const IdentifierInfo *II) const {
1163 auto *BuiltinTemplate =
1164 BuiltinTemplateDecl::Create(C: *this, DC: getTranslationUnitDecl(), Name: II, BTK);
1165 BuiltinTemplate->setImplicit();
1166 getTranslationUnitDecl()->addDecl(D: BuiltinTemplate);
1167
1168 return BuiltinTemplate;
1169}
1170
1171#define BuiltinTemplate(BTName) \
1172 BuiltinTemplateDecl *ASTContext::get##BTName##Decl() const { \
1173 if (!Decl##BTName) \
1174 Decl##BTName = \
1175 buildBuiltinTemplateDecl(BTK##BTName, get##BTName##Name()); \
1176 return Decl##BTName; \
1177 }
1178#include "clang/Basic/BuiltinTemplates.inc"
1179
1180RecordDecl *ASTContext::buildImplicitRecord(StringRef Name,
1181 RecordDecl::TagKind TK) const {
1182 SourceLocation Loc;
1183 RecordDecl *NewDecl;
1184 if (getLangOpts().CPlusPlus)
1185 NewDecl = CXXRecordDecl::Create(C: *this, TK, DC: getTranslationUnitDecl(), StartLoc: Loc,
1186 IdLoc: Loc, Id: &Idents.get(Name));
1187 else
1188 NewDecl = RecordDecl::Create(C: *this, TK, DC: getTranslationUnitDecl(), StartLoc: Loc, IdLoc: Loc,
1189 Id: &Idents.get(Name));
1190 NewDecl->setImplicit();
1191 NewDecl->addAttr(A: TypeVisibilityAttr::CreateImplicit(
1192 Ctx&: const_cast<ASTContext &>(*this), Visibility: TypeVisibilityAttr::Default));
1193 return NewDecl;
1194}
1195
1196TypedefDecl *ASTContext::buildImplicitTypedef(QualType T,
1197 StringRef Name) const {
1198 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T);
1199 TypedefDecl *NewDecl = TypedefDecl::Create(
1200 C&: const_cast<ASTContext &>(*this), DC: getTranslationUnitDecl(),
1201 StartLoc: SourceLocation(), IdLoc: SourceLocation(), Id: &Idents.get(Name), TInfo);
1202 NewDecl->setImplicit();
1203 return NewDecl;
1204}
1205
1206TypedefDecl *ASTContext::getInt128Decl() const {
1207 if (!Int128Decl)
1208 Int128Decl = buildImplicitTypedef(T: Int128Ty, Name: "__int128_t");
1209 return Int128Decl;
1210}
1211
1212TypedefDecl *ASTContext::getUInt128Decl() const {
1213 if (!UInt128Decl)
1214 UInt128Decl = buildImplicitTypedef(T: UnsignedInt128Ty, Name: "__uint128_t");
1215 return UInt128Decl;
1216}
1217
1218void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) {
1219 auto *Ty = new (*this, alignof(BuiltinType)) BuiltinType(K);
1220 R = CanQualType::CreateUnsafe(Other: QualType(Ty, 0));
1221 Types.push_back(Elt: Ty);
1222}
1223
1224void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
1225 const TargetInfo *AuxTarget) {
1226 assert((!this->Target || this->Target == &Target) &&
1227 "Incorrect target reinitialization");
1228 assert(VoidTy.isNull() && "Context reinitialized?");
1229
1230 this->Target = &Target;
1231 this->AuxTarget = AuxTarget;
1232
1233 ABI.reset(p: createCXXABI(T: Target));
1234 AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(TI: Target, LangOpts);
1235
1236 // C99 6.2.5p19.
1237 InitBuiltinType(R&: VoidTy, K: BuiltinType::Void);
1238
1239 // C99 6.2.5p2.
1240 InitBuiltinType(R&: BoolTy, K: BuiltinType::Bool);
1241 // C99 6.2.5p3.
1242 if (LangOpts.CharIsSigned)
1243 InitBuiltinType(R&: CharTy, K: BuiltinType::Char_S);
1244 else
1245 InitBuiltinType(R&: CharTy, K: BuiltinType::Char_U);
1246 // C99 6.2.5p4.
1247 InitBuiltinType(R&: SignedCharTy, K: BuiltinType::SChar);
1248 InitBuiltinType(R&: ShortTy, K: BuiltinType::Short);
1249 InitBuiltinType(R&: IntTy, K: BuiltinType::Int);
1250 InitBuiltinType(R&: LongTy, K: BuiltinType::Long);
1251 InitBuiltinType(R&: LongLongTy, K: BuiltinType::LongLong);
1252
1253 // C99 6.2.5p6.
1254 InitBuiltinType(R&: UnsignedCharTy, K: BuiltinType::UChar);
1255 InitBuiltinType(R&: UnsignedShortTy, K: BuiltinType::UShort);
1256 InitBuiltinType(R&: UnsignedIntTy, K: BuiltinType::UInt);
1257 InitBuiltinType(R&: UnsignedLongTy, K: BuiltinType::ULong);
1258 InitBuiltinType(R&: UnsignedLongLongTy, K: BuiltinType::ULongLong);
1259
1260 // C99 6.2.5p10.
1261 InitBuiltinType(R&: FloatTy, K: BuiltinType::Float);
1262 InitBuiltinType(R&: DoubleTy, K: BuiltinType::Double);
1263 InitBuiltinType(R&: LongDoubleTy, K: BuiltinType::LongDouble);
1264
1265 // GNU extension, __float128 for IEEE quadruple precision
1266 InitBuiltinType(R&: Float128Ty, K: BuiltinType::Float128);
1267
1268 // __ibm128 for IBM extended precision
1269 InitBuiltinType(R&: Ibm128Ty, K: BuiltinType::Ibm128);
1270
1271 // C11 extension ISO/IEC TS 18661-3
1272 InitBuiltinType(R&: Float16Ty, K: BuiltinType::Float16);
1273
1274 // ISO/IEC JTC1 SC22 WG14 N1169 Extension
1275 InitBuiltinType(R&: ShortAccumTy, K: BuiltinType::ShortAccum);
1276 InitBuiltinType(R&: AccumTy, K: BuiltinType::Accum);
1277 InitBuiltinType(R&: LongAccumTy, K: BuiltinType::LongAccum);
1278 InitBuiltinType(R&: UnsignedShortAccumTy, K: BuiltinType::UShortAccum);
1279 InitBuiltinType(R&: UnsignedAccumTy, K: BuiltinType::UAccum);
1280 InitBuiltinType(R&: UnsignedLongAccumTy, K: BuiltinType::ULongAccum);
1281 InitBuiltinType(R&: ShortFractTy, K: BuiltinType::ShortFract);
1282 InitBuiltinType(R&: FractTy, K: BuiltinType::Fract);
1283 InitBuiltinType(R&: LongFractTy, K: BuiltinType::LongFract);
1284 InitBuiltinType(R&: UnsignedShortFractTy, K: BuiltinType::UShortFract);
1285 InitBuiltinType(R&: UnsignedFractTy, K: BuiltinType::UFract);
1286 InitBuiltinType(R&: UnsignedLongFractTy, K: BuiltinType::ULongFract);
1287 InitBuiltinType(R&: SatShortAccumTy, K: BuiltinType::SatShortAccum);
1288 InitBuiltinType(R&: SatAccumTy, K: BuiltinType::SatAccum);
1289 InitBuiltinType(R&: SatLongAccumTy, K: BuiltinType::SatLongAccum);
1290 InitBuiltinType(R&: SatUnsignedShortAccumTy, K: BuiltinType::SatUShortAccum);
1291 InitBuiltinType(R&: SatUnsignedAccumTy, K: BuiltinType::SatUAccum);
1292 InitBuiltinType(R&: SatUnsignedLongAccumTy, K: BuiltinType::SatULongAccum);
1293 InitBuiltinType(R&: SatShortFractTy, K: BuiltinType::SatShortFract);
1294 InitBuiltinType(R&: SatFractTy, K: BuiltinType::SatFract);
1295 InitBuiltinType(R&: SatLongFractTy, K: BuiltinType::SatLongFract);
1296 InitBuiltinType(R&: SatUnsignedShortFractTy, K: BuiltinType::SatUShortFract);
1297 InitBuiltinType(R&: SatUnsignedFractTy, K: BuiltinType::SatUFract);
1298 InitBuiltinType(R&: SatUnsignedLongFractTy, K: BuiltinType::SatULongFract);
1299
1300 // GNU extension, 128-bit integers.
1301 InitBuiltinType(R&: Int128Ty, K: BuiltinType::Int128);
1302 InitBuiltinType(R&: UnsignedInt128Ty, K: BuiltinType::UInt128);
1303
1304 // C++ 3.9.1p5
1305 if (TargetInfo::isTypeSigned(T: Target.getWCharType()))
1306 InitBuiltinType(R&: WCharTy, K: BuiltinType::WChar_S);
1307 else // -fshort-wchar makes wchar_t be unsigned.
1308 InitBuiltinType(R&: WCharTy, K: BuiltinType::WChar_U);
1309 if (LangOpts.CPlusPlus && LangOpts.WChar)
1310 WideCharTy = WCharTy;
1311 else {
1312 // C99 (or C++ using -fno-wchar).
1313 WideCharTy = getFromTargetType(Type: Target.getWCharType());
1314 }
1315
1316 WIntTy = getFromTargetType(Type: Target.getWIntType());
1317
1318 // C++20 (proposed)
1319 InitBuiltinType(R&: Char8Ty, K: BuiltinType::Char8);
1320
1321 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1322 InitBuiltinType(R&: Char16Ty, K: BuiltinType::Char16);
1323 else // C99
1324 Char16Ty = getFromTargetType(Type: Target.getChar16Type());
1325
1326 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1327 InitBuiltinType(R&: Char32Ty, K: BuiltinType::Char32);
1328 else // C99
1329 Char32Ty = getFromTargetType(Type: Target.getChar32Type());
1330
1331 // Placeholder type for type-dependent expressions whose type is
1332 // completely unknown. No code should ever check a type against
1333 // DependentTy and users should never see it; however, it is here to
1334 // help diagnose failures to properly check for type-dependent
1335 // expressions.
1336 InitBuiltinType(R&: DependentTy, K: BuiltinType::Dependent);
1337
1338 // Placeholder type for functions.
1339 InitBuiltinType(R&: OverloadTy, K: BuiltinType::Overload);
1340
1341 // Placeholder type for bound members.
1342 InitBuiltinType(R&: BoundMemberTy, K: BuiltinType::BoundMember);
1343
1344 // Placeholder type for unresolved templates.
1345 InitBuiltinType(R&: UnresolvedTemplateTy, K: BuiltinType::UnresolvedTemplate);
1346
1347 // Placeholder type for pseudo-objects.
1348 InitBuiltinType(R&: PseudoObjectTy, K: BuiltinType::PseudoObject);
1349
1350 // "any" type; useful for debugger-like clients.
1351 InitBuiltinType(R&: UnknownAnyTy, K: BuiltinType::UnknownAny);
1352
1353 // Placeholder type for unbridged ARC casts.
1354 InitBuiltinType(R&: ARCUnbridgedCastTy, K: BuiltinType::ARCUnbridgedCast);
1355
1356 // Placeholder type for builtin functions.
1357 InitBuiltinType(R&: BuiltinFnTy, K: BuiltinType::BuiltinFn);
1358
1359 // Placeholder type for OMP array sections.
1360 if (LangOpts.OpenMP) {
1361 InitBuiltinType(R&: ArraySectionTy, K: BuiltinType::ArraySection);
1362 InitBuiltinType(R&: OMPArrayShapingTy, K: BuiltinType::OMPArrayShaping);
1363 InitBuiltinType(R&: OMPIteratorTy, K: BuiltinType::OMPIterator);
1364 }
1365 // Placeholder type for OpenACC array sections, if we are ALSO in OMP mode,
1366 // don't bother, as we're just using the same type as OMP.
1367 if (LangOpts.OpenACC && !LangOpts.OpenMP) {
1368 InitBuiltinType(R&: ArraySectionTy, K: BuiltinType::ArraySection);
1369 }
1370 if (LangOpts.MatrixTypes)
1371 InitBuiltinType(R&: IncompleteMatrixIdxTy, K: BuiltinType::IncompleteMatrixIdx);
1372
1373 // Builtin types for 'id', 'Class', and 'SEL'.
1374 InitBuiltinType(R&: ObjCBuiltinIdTy, K: BuiltinType::ObjCId);
1375 InitBuiltinType(R&: ObjCBuiltinClassTy, K: BuiltinType::ObjCClass);
1376 InitBuiltinType(R&: ObjCBuiltinSelTy, K: BuiltinType::ObjCSel);
1377
1378 if (LangOpts.OpenCL) {
1379#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
1380 InitBuiltinType(SingletonId, BuiltinType::Id);
1381#include "clang/Basic/OpenCLImageTypes.def"
1382
1383 InitBuiltinType(R&: OCLSamplerTy, K: BuiltinType::OCLSampler);
1384 InitBuiltinType(R&: OCLEventTy, K: BuiltinType::OCLEvent);
1385 InitBuiltinType(R&: OCLClkEventTy, K: BuiltinType::OCLClkEvent);
1386 InitBuiltinType(R&: OCLQueueTy, K: BuiltinType::OCLQueue);
1387 InitBuiltinType(R&: OCLReserveIDTy, K: BuiltinType::OCLReserveID);
1388
1389#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
1390 InitBuiltinType(Id##Ty, BuiltinType::Id);
1391#include "clang/Basic/OpenCLExtensionTypes.def"
1392 }
1393
1394 if (LangOpts.HLSL) {
1395#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) \
1396 InitBuiltinType(SingletonId, BuiltinType::Id);
1397#include "clang/Basic/HLSLIntangibleTypes.def"
1398 }
1399
1400 if (Target.hasAArch64ACLETypes() ||
1401 (AuxTarget && AuxTarget->hasAArch64ACLETypes())) {
1402#define SVE_TYPE(Name, Id, SingletonId) \
1403 InitBuiltinType(SingletonId, BuiltinType::Id);
1404#include "clang/Basic/AArch64ACLETypes.def"
1405 }
1406
1407 if (Target.getTriple().isPPC64()) {
1408#define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
1409 InitBuiltinType(Id##Ty, BuiltinType::Id);
1410#include "clang/Basic/PPCTypes.def"
1411#define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
1412 InitBuiltinType(Id##Ty, BuiltinType::Id);
1413#include "clang/Basic/PPCTypes.def"
1414 }
1415
1416 if (Target.hasRISCVVTypes()) {
1417#define RVV_TYPE(Name, Id, SingletonId) \
1418 InitBuiltinType(SingletonId, BuiltinType::Id);
1419#include "clang/Basic/RISCVVTypes.def"
1420 }
1421
1422 if (Target.getTriple().isWasm() && Target.hasFeature(Feature: "reference-types")) {
1423#define WASM_TYPE(Name, Id, SingletonId) \
1424 InitBuiltinType(SingletonId, BuiltinType::Id);
1425#include "clang/Basic/WebAssemblyReferenceTypes.def"
1426 }
1427
1428 if (Target.getTriple().isAMDGPU() ||
1429 (Target.getTriple().isSPIRV() &&
1430 Target.getTriple().getVendor() == llvm::Triple::AMD) ||
1431 (AuxTarget &&
1432 (AuxTarget->getTriple().isAMDGPU() ||
1433 ((AuxTarget->getTriple().isSPIRV() &&
1434 AuxTarget->getTriple().getVendor() == llvm::Triple::AMD))))) {
1435#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) \
1436 InitBuiltinType(SingletonId, BuiltinType::Id);
1437#include "clang/Basic/AMDGPUTypes.def"
1438 }
1439
1440 // Builtin type for __objc_yes and __objc_no
1441 ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ?
1442 SignedCharTy : BoolTy);
1443
1444 ObjCConstantStringType = QualType();
1445
1446 ObjCSuperType = QualType();
1447
1448 // void * type
1449 if (LangOpts.OpenCLGenericAddressSpace) {
1450 auto Q = VoidTy.getQualifiers();
1451 Q.setAddressSpace(LangAS::opencl_generic);
1452 VoidPtrTy = getPointerType(T: getCanonicalType(
1453 T: getQualifiedType(T: VoidTy.getUnqualifiedType(), Qs: Q)));
1454 } else {
1455 VoidPtrTy = getPointerType(T: VoidTy);
1456 }
1457
1458 // nullptr type (C++0x 2.14.7)
1459 InitBuiltinType(R&: NullPtrTy, K: BuiltinType::NullPtr);
1460
1461 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16
1462 InitBuiltinType(R&: HalfTy, K: BuiltinType::Half);
1463
1464 InitBuiltinType(R&: BFloat16Ty, K: BuiltinType::BFloat16);
1465
1466 // Builtin type used to help define __builtin_va_list.
1467 VaListTagDecl = nullptr;
1468
1469 // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls.
1470 if (LangOpts.MicrosoftExt || LangOpts.Borland) {
1471 MSGuidTagDecl = buildImplicitRecord(Name: "_GUID");
1472 getTranslationUnitDecl()->addDecl(D: MSGuidTagDecl);
1473 }
1474}
1475
1476DiagnosticsEngine &ASTContext::getDiagnostics() const {
1477 return SourceMgr.getDiagnostics();
1478}
1479
1480AttrVec& ASTContext::getDeclAttrs(const Decl *D) {
1481 AttrVec *&Result = DeclAttrs[D];
1482 if (!Result) {
1483 void *Mem = Allocate(Size: sizeof(AttrVec));
1484 Result = new (Mem) AttrVec;
1485 }
1486
1487 return *Result;
1488}
1489
1490/// Erase the attributes corresponding to the given declaration.
1491void ASTContext::eraseDeclAttrs(const Decl *D) {
1492 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(Val: D);
1493 if (Pos != DeclAttrs.end()) {
1494 Pos->second->~AttrVec();
1495 DeclAttrs.erase(I: Pos);
1496 }
1497}
1498
1499// FIXME: Remove ?
1500MemberSpecializationInfo *
1501ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) {
1502 assert(Var->isStaticDataMember() && "Not a static data member");
1503 return getTemplateOrSpecializationInfo(Var)
1504 .dyn_cast<MemberSpecializationInfo *>();
1505}
1506
1507ASTContext::TemplateOrSpecializationInfo
1508ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) {
1509 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos =
1510 TemplateOrInstantiation.find(Val: Var);
1511 if (Pos == TemplateOrInstantiation.end())
1512 return {};
1513
1514 return Pos->second;
1515}
1516
1517void
1518ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl,
1519 TemplateSpecializationKind TSK,
1520 SourceLocation PointOfInstantiation) {
1521 assert(Inst->isStaticDataMember() && "Not a static data member");
1522 assert(Tmpl->isStaticDataMember() && "Not a static data member");
1523 setTemplateOrSpecializationInfo(Inst, TSI: new (*this) MemberSpecializationInfo(
1524 Tmpl, TSK, PointOfInstantiation));
1525}
1526
1527void
1528ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst,
1529 TemplateOrSpecializationInfo TSI) {
1530 assert(!TemplateOrInstantiation[Inst] &&
1531 "Already noted what the variable was instantiated from");
1532 TemplateOrInstantiation[Inst] = TSI;
1533}
1534
1535NamedDecl *
1536ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) {
1537 return InstantiatedFromUsingDecl.lookup(Val: UUD);
1538}
1539
1540void
1541ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) {
1542 assert((isa<UsingDecl>(Pattern) ||
1543 isa<UnresolvedUsingValueDecl>(Pattern) ||
1544 isa<UnresolvedUsingTypenameDecl>(Pattern)) &&
1545 "pattern decl is not a using decl");
1546 assert((isa<UsingDecl>(Inst) ||
1547 isa<UnresolvedUsingValueDecl>(Inst) ||
1548 isa<UnresolvedUsingTypenameDecl>(Inst)) &&
1549 "instantiation did not produce a using decl");
1550 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists");
1551 InstantiatedFromUsingDecl[Inst] = Pattern;
1552}
1553
1554UsingEnumDecl *
1555ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) {
1556 return InstantiatedFromUsingEnumDecl.lookup(Val: UUD);
1557}
1558
1559void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst,
1560 UsingEnumDecl *Pattern) {
1561 assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists");
1562 InstantiatedFromUsingEnumDecl[Inst] = Pattern;
1563}
1564
1565UsingShadowDecl *
1566ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) {
1567 return InstantiatedFromUsingShadowDecl.lookup(Val: Inst);
1568}
1569
1570void
1571ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst,
1572 UsingShadowDecl *Pattern) {
1573 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists");
1574 InstantiatedFromUsingShadowDecl[Inst] = Pattern;
1575}
1576
1577FieldDecl *
1578ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) const {
1579 return InstantiatedFromUnnamedFieldDecl.lookup(Val: Field);
1580}
1581
1582void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst,
1583 FieldDecl *Tmpl) {
1584 assert((!Inst->getDeclName() || Inst->isPlaceholderVar(getLangOpts())) &&
1585 "Instantiated field decl is not unnamed");
1586 assert((!Inst->getDeclName() || Inst->isPlaceholderVar(getLangOpts())) &&
1587 "Template field decl is not unnamed");
1588 assert(!InstantiatedFromUnnamedFieldDecl[Inst] &&
1589 "Already noted what unnamed field was instantiated from");
1590
1591 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl;
1592}
1593
1594ASTContext::overridden_cxx_method_iterator
1595ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const {
1596 return overridden_methods(Method).begin();
1597}
1598
1599ASTContext::overridden_cxx_method_iterator
1600ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const {
1601 return overridden_methods(Method).end();
1602}
1603
1604unsigned
1605ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const {
1606 auto Range = overridden_methods(Method);
1607 return Range.end() - Range.begin();
1608}
1609
1610ASTContext::overridden_method_range
1611ASTContext::overridden_methods(const CXXMethodDecl *Method) const {
1612 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos =
1613 OverriddenMethods.find(Val: Method->getCanonicalDecl());
1614 if (Pos == OverriddenMethods.end())
1615 return overridden_method_range(nullptr, nullptr);
1616 return overridden_method_range(Pos->second.begin(), Pos->second.end());
1617}
1618
1619void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method,
1620 const CXXMethodDecl *Overridden) {
1621 assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl());
1622 OverriddenMethods[Method].push_back(NewVal: Overridden);
1623}
1624
1625void ASTContext::getOverriddenMethods(
1626 const NamedDecl *D,
1627 SmallVectorImpl<const NamedDecl *> &Overridden) const {
1628 assert(D);
1629
1630 if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(Val: D)) {
1631 Overridden.append(in_start: overridden_methods_begin(Method: CXXMethod),
1632 in_end: overridden_methods_end(Method: CXXMethod));
1633 return;
1634 }
1635
1636 const auto *Method = dyn_cast<ObjCMethodDecl>(Val: D);
1637 if (!Method)
1638 return;
1639
1640 SmallVector<const ObjCMethodDecl *, 8> OverDecls;
1641 Method->getOverriddenMethods(Overridden&: OverDecls);
1642 Overridden.append(in_start: OverDecls.begin(), in_end: OverDecls.end());
1643}
1644
1645std::optional<ASTContext::CXXRecordDeclRelocationInfo>
1646ASTContext::getRelocationInfoForCXXRecord(const CXXRecordDecl *RD) const {
1647 assert(RD);
1648 CXXRecordDecl *D = RD->getDefinition();
1649 auto it = RelocatableClasses.find(Val: D);
1650 if (it != RelocatableClasses.end())
1651 return it->getSecond();
1652 return std::nullopt;
1653}
1654
1655void ASTContext::setRelocationInfoForCXXRecord(
1656 const CXXRecordDecl *RD, CXXRecordDeclRelocationInfo Info) {
1657 assert(RD);
1658 CXXRecordDecl *D = RD->getDefinition();
1659 assert(RelocatableClasses.find(D) == RelocatableClasses.end());
1660 RelocatableClasses.insert(KV: {D, Info});
1661}
1662
1663static bool primaryBaseHaseAddressDiscriminatedVTableAuthentication(
1664 const ASTContext &Context, const CXXRecordDecl *Class) {
1665 if (!Class->isPolymorphic())
1666 return false;
1667 const CXXRecordDecl *BaseType = Context.baseForVTableAuthentication(ThisClass: Class);
1668 using AuthAttr = VTablePointerAuthenticationAttr;
1669 const AuthAttr *ExplicitAuth = BaseType->getAttr<AuthAttr>();
1670 if (!ExplicitAuth)
1671 return Context.getLangOpts().PointerAuthVTPtrAddressDiscrimination;
1672 AuthAttr::AddressDiscriminationMode AddressDiscrimination =
1673 ExplicitAuth->getAddressDiscrimination();
1674 if (AddressDiscrimination == AuthAttr::DefaultAddressDiscrimination)
1675 return Context.getLangOpts().PointerAuthVTPtrAddressDiscrimination;
1676 return AddressDiscrimination == AuthAttr::AddressDiscrimination;
1677}
1678
1679ASTContext::PointerAuthContent
1680ASTContext::findPointerAuthContent(QualType T) const {
1681 assert(isPointerAuthenticationAvailable());
1682
1683 T = T.getCanonicalType();
1684 if (T->isDependentType())
1685 return PointerAuthContent::None;
1686
1687 if (T.hasAddressDiscriminatedPointerAuth())
1688 return PointerAuthContent::AddressDiscriminatedData;
1689 const RecordDecl *RD = T->getAsRecordDecl();
1690 if (!RD)
1691 return PointerAuthContent::None;
1692
1693 if (RD->isInvalidDecl())
1694 return PointerAuthContent::None;
1695
1696 if (auto Existing = RecordContainsAddressDiscriminatedPointerAuth.find(Val: RD);
1697 Existing != RecordContainsAddressDiscriminatedPointerAuth.end())
1698 return Existing->second;
1699
1700 PointerAuthContent Result = PointerAuthContent::None;
1701
1702 auto SaveResultAndReturn = [&]() -> PointerAuthContent {
1703 auto [ResultIter, DidAdd] =
1704 RecordContainsAddressDiscriminatedPointerAuth.try_emplace(Key: RD, Args&: Result);
1705 (void)ResultIter;
1706 (void)DidAdd;
1707 assert(DidAdd);
1708 return Result;
1709 };
1710 auto ShouldContinueAfterUpdate = [&](PointerAuthContent NewResult) {
1711 static_assert(PointerAuthContent::None <
1712 PointerAuthContent::AddressDiscriminatedVTable);
1713 static_assert(PointerAuthContent::AddressDiscriminatedVTable <
1714 PointerAuthContent::AddressDiscriminatedData);
1715 if (NewResult > Result)
1716 Result = NewResult;
1717 return Result != PointerAuthContent::AddressDiscriminatedData;
1718 };
1719 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) {
1720 if (primaryBaseHaseAddressDiscriminatedVTableAuthentication(Context: *this, Class: CXXRD) &&
1721 !ShouldContinueAfterUpdate(
1722 PointerAuthContent::AddressDiscriminatedVTable))
1723 return SaveResultAndReturn();
1724 for (auto Base : CXXRD->bases()) {
1725 if (!ShouldContinueAfterUpdate(findPointerAuthContent(T: Base.getType())))
1726 return SaveResultAndReturn();
1727 }
1728 }
1729 for (auto *FieldDecl : RD->fields()) {
1730 if (!ShouldContinueAfterUpdate(
1731 findPointerAuthContent(T: FieldDecl->getType())))
1732 return SaveResultAndReturn();
1733 }
1734 return SaveResultAndReturn();
1735}
1736
1737void ASTContext::addedLocalImportDecl(ImportDecl *Import) {
1738 assert(!Import->getNextLocalImport() &&
1739 "Import declaration already in the chain");
1740 assert(!Import->isFromASTFile() && "Non-local import declaration");
1741 if (!FirstLocalImport) {
1742 FirstLocalImport = Import;
1743 LastLocalImport = Import;
1744 return;
1745 }
1746
1747 LastLocalImport->setNextLocalImport(Import);
1748 LastLocalImport = Import;
1749}
1750
1751//===----------------------------------------------------------------------===//
1752// Type Sizing and Analysis
1753//===----------------------------------------------------------------------===//
1754
1755/// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
1756/// scalar floating point type.
1757const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
1758 switch (T->castAs<BuiltinType>()->getKind()) {
1759 default:
1760 llvm_unreachable("Not a floating point type!");
1761 case BuiltinType::BFloat16:
1762 return Target->getBFloat16Format();
1763 case BuiltinType::Float16:
1764 return Target->getHalfFormat();
1765 case BuiltinType::Half:
1766 return Target->getHalfFormat();
1767 case BuiltinType::Float: return Target->getFloatFormat();
1768 case BuiltinType::Double: return Target->getDoubleFormat();
1769 case BuiltinType::Ibm128:
1770 return Target->getIbm128Format();
1771 case BuiltinType::LongDouble:
1772 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice)
1773 return AuxTarget->getLongDoubleFormat();
1774 return Target->getLongDoubleFormat();
1775 case BuiltinType::Float128:
1776 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice)
1777 return AuxTarget->getFloat128Format();
1778 return Target->getFloat128Format();
1779 }
1780}
1781
1782CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
1783 unsigned Align = Target->getCharWidth();
1784
1785 const unsigned AlignFromAttr = D->getMaxAlignment();
1786 if (AlignFromAttr)
1787 Align = AlignFromAttr;
1788
1789 // __attribute__((aligned)) can increase or decrease alignment
1790 // *except* on a struct or struct member, where it only increases
1791 // alignment unless 'packed' is also specified.
1792 //
1793 // It is an error for alignas to decrease alignment, so we can
1794 // ignore that possibility; Sema should diagnose it.
1795 bool UseAlignAttrOnly;
1796 if (const FieldDecl *FD = dyn_cast<FieldDecl>(Val: D))
1797 UseAlignAttrOnly =
1798 FD->hasAttr<PackedAttr>() || FD->getParent()->hasAttr<PackedAttr>();
1799 else
1800 UseAlignAttrOnly = AlignFromAttr != 0;
1801 // If we're using the align attribute only, just ignore everything
1802 // else about the declaration and its type.
1803 if (UseAlignAttrOnly) {
1804 // do nothing
1805 } else if (const auto *VD = dyn_cast<ValueDecl>(Val: D)) {
1806 QualType T = VD->getType();
1807 if (const auto *RT = T->getAs<ReferenceType>()) {
1808 if (ForAlignof)
1809 T = RT->getPointeeType();
1810 else
1811 T = getPointerType(T: RT->getPointeeType());
1812 }
1813 QualType BaseT = getBaseElementType(QT: T);
1814 if (T->isFunctionType())
1815 Align = getTypeInfoImpl(T: T.getTypePtr()).Align;
1816 else if (!BaseT->isIncompleteType()) {
1817 // Adjust alignments of declarations with array type by the
1818 // large-array alignment on the target.
1819 if (const ArrayType *arrayType = getAsArrayType(T)) {
1820 unsigned MinWidth = Target->getLargeArrayMinWidth();
1821 if (!ForAlignof && MinWidth) {
1822 if (isa<VariableArrayType>(Val: arrayType))
1823 Align = std::max(a: Align, b: Target->getLargeArrayAlign());
1824 else if (isa<ConstantArrayType>(Val: arrayType) &&
1825 MinWidth <= getTypeSize(T: cast<ConstantArrayType>(Val: arrayType)))
1826 Align = std::max(a: Align, b: Target->getLargeArrayAlign());
1827 }
1828 }
1829 Align = std::max(a: Align, b: getPreferredTypeAlign(T: T.getTypePtr()));
1830 if (BaseT.getQualifiers().hasUnaligned())
1831 Align = Target->getCharWidth();
1832 }
1833
1834 // Ensure minimum alignment for global variables.
1835 if (const auto *VD = dyn_cast<VarDecl>(Val: D))
1836 if (VD->hasGlobalStorage() && !ForAlignof) {
1837 uint64_t TypeSize =
1838 !BaseT->isIncompleteType() ? getTypeSize(T: T.getTypePtr()) : 0;
1839 Align = std::max(a: Align, b: getMinGlobalAlignOfVar(Size: TypeSize, VD));
1840 }
1841
1842 // Fields can be subject to extra alignment constraints, like if
1843 // the field is packed, the struct is packed, or the struct has a
1844 // a max-field-alignment constraint (#pragma pack). So calculate
1845 // the actual alignment of the field within the struct, and then
1846 // (as we're expected to) constrain that by the alignment of the type.
1847 if (const auto *Field = dyn_cast<FieldDecl>(Val: VD)) {
1848 const RecordDecl *Parent = Field->getParent();
1849 // We can only produce a sensible answer if the record is valid.
1850 if (!Parent->isInvalidDecl()) {
1851 const ASTRecordLayout &Layout = getASTRecordLayout(D: Parent);
1852
1853 // Start with the record's overall alignment.
1854 unsigned FieldAlign = toBits(CharSize: Layout.getAlignment());
1855
1856 // Use the GCD of that and the offset within the record.
1857 uint64_t Offset = Layout.getFieldOffset(FieldNo: Field->getFieldIndex());
1858 if (Offset > 0) {
1859 // Alignment is always a power of 2, so the GCD will be a power of 2,
1860 // which means we get to do this crazy thing instead of Euclid's.
1861 uint64_t LowBitOfOffset = Offset & (~Offset + 1);
1862 if (LowBitOfOffset < FieldAlign)
1863 FieldAlign = static_cast<unsigned>(LowBitOfOffset);
1864 }
1865
1866 Align = std::min(a: Align, b: FieldAlign);
1867 }
1868 }
1869 }
1870
1871 // Some targets have hard limitation on the maximum requestable alignment in
1872 // aligned attribute for static variables.
1873 const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute();
1874 const auto *VD = dyn_cast<VarDecl>(Val: D);
1875 if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static)
1876 Align = std::min(a: Align, b: MaxAlignedAttr);
1877
1878 return toCharUnitsFromBits(BitSize: Align);
1879}
1880
1881CharUnits ASTContext::getExnObjectAlignment() const {
1882 return toCharUnitsFromBits(BitSize: Target->getExnObjectAlignment());
1883}
1884
1885// getTypeInfoDataSizeInChars - Return the size of a type, in
1886// chars. If the type is a record, its data size is returned. This is
1887// the size of the memcpy that's performed when assigning this type
1888// using a trivial copy/move assignment operator.
1889TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const {
1890 TypeInfoChars Info = getTypeInfoInChars(T);
1891
1892 // In C++, objects can sometimes be allocated into the tail padding
1893 // of a base-class subobject. We decide whether that's possible
1894 // during class layout, so here we can just trust the layout results.
1895 if (getLangOpts().CPlusPlus) {
1896 if (const auto *RD = T->getAsCXXRecordDecl(); RD && !RD->isInvalidDecl()) {
1897 const ASTRecordLayout &layout = getASTRecordLayout(D: RD);
1898 Info.Width = layout.getDataSize();
1899 }
1900 }
1901
1902 return Info;
1903}
1904
1905/// getConstantArrayInfoInChars - Performing the computation in CharUnits
1906/// instead of in bits prevents overflowing the uint64_t for some large arrays.
1907TypeInfoChars
1908static getConstantArrayInfoInChars(const ASTContext &Context,
1909 const ConstantArrayType *CAT) {
1910 TypeInfoChars EltInfo = Context.getTypeInfoInChars(T: CAT->getElementType());
1911 uint64_t Size = CAT->getZExtSize();
1912 assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <=
1913 (uint64_t)(-1)/Size) &&
1914 "Overflow in array type char size evaluation");
1915 uint64_t Width = EltInfo.Width.getQuantity() * Size;
1916 unsigned Align = EltInfo.Align.getQuantity();
1917 if (!Context.getTargetInfo().getCXXABI().isMicrosoft() ||
1918 Context.getTargetInfo().getPointerWidth(AddrSpace: LangAS::Default) == 64)
1919 Width = llvm::alignTo(Value: Width, Align);
1920 return TypeInfoChars(CharUnits::fromQuantity(Quantity: Width),
1921 CharUnits::fromQuantity(Quantity: Align),
1922 EltInfo.AlignRequirement);
1923}
1924
1925TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const {
1926 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: T))
1927 return getConstantArrayInfoInChars(Context: *this, CAT);
1928 TypeInfo Info = getTypeInfo(T);
1929 return TypeInfoChars(toCharUnitsFromBits(BitSize: Info.Width),
1930 toCharUnitsFromBits(BitSize: Info.Align), Info.AlignRequirement);
1931}
1932
1933TypeInfoChars ASTContext::getTypeInfoInChars(QualType T) const {
1934 return getTypeInfoInChars(T: T.getTypePtr());
1935}
1936
1937bool ASTContext::isPromotableIntegerType(QualType T) const {
1938 // HLSL doesn't promote all small integer types to int, it
1939 // just uses the rank-based promotion rules for all types.
1940 if (getLangOpts().HLSL)
1941 return false;
1942
1943 if (const auto *BT = T->getAs<BuiltinType>())
1944 switch (BT->getKind()) {
1945 case BuiltinType::Bool:
1946 case BuiltinType::Char_S:
1947 case BuiltinType::Char_U:
1948 case BuiltinType::SChar:
1949 case BuiltinType::UChar:
1950 case BuiltinType::Short:
1951 case BuiltinType::UShort:
1952 case BuiltinType::WChar_S:
1953 case BuiltinType::WChar_U:
1954 case BuiltinType::Char8:
1955 case BuiltinType::Char16:
1956 case BuiltinType::Char32:
1957 return true;
1958 default:
1959 return false;
1960 }
1961
1962 // Enumerated types are promotable to their compatible integer types
1963 // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2).
1964 if (const auto *ED = T->getAsEnumDecl()) {
1965 if (T->isDependentType() || ED->getPromotionType().isNull() ||
1966 ED->isScoped())
1967 return false;
1968
1969 return true;
1970 }
1971
1972 // OverflowBehaviorTypes are promotable if their underlying type is promotable
1973 if (const auto *OBT = T->getAs<OverflowBehaviorType>()) {
1974 return isPromotableIntegerType(T: OBT->getUnderlyingType());
1975 }
1976
1977 return false;
1978}
1979
1980bool ASTContext::isAlignmentRequired(const Type *T) const {
1981 return getTypeInfo(T).AlignRequirement != AlignRequirementKind::None;
1982}
1983
1984bool ASTContext::isAlignmentRequired(QualType T) const {
1985 return isAlignmentRequired(T: T.getTypePtr());
1986}
1987
1988unsigned ASTContext::getTypeAlignIfKnown(QualType T,
1989 bool NeedsPreferredAlignment) const {
1990 // An alignment on a typedef overrides anything else.
1991 if (const auto *TT = T->getAs<TypedefType>())
1992 if (unsigned Align = TT->getDecl()->getMaxAlignment())
1993 return Align;
1994
1995 // If we have an (array of) complete type, we're done.
1996 T = getBaseElementType(QT: T);
1997 if (!T->isIncompleteType())
1998 return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T);
1999
2000 // If we had an array type, its element type might be a typedef
2001 // type with an alignment attribute.
2002 if (const auto *TT = T->getAs<TypedefType>())
2003 if (unsigned Align = TT->getDecl()->getMaxAlignment())
2004 return Align;
2005
2006 // Otherwise, see if the declaration of the type had an attribute.
2007 if (const auto *TD = T->getAsTagDecl())
2008 return TD->getMaxAlignment();
2009
2010 return 0;
2011}
2012
2013TypeInfo ASTContext::getTypeInfo(const Type *T) const {
2014 TypeInfoMap::iterator I = MemoizedTypeInfo.find(Val: T);
2015 if (I != MemoizedTypeInfo.end())
2016 return I->second;
2017
2018 // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup.
2019 TypeInfo TI = getTypeInfoImpl(T);
2020 MemoizedTypeInfo[T] = TI;
2021 return TI;
2022}
2023
2024/// getTypeInfoImpl - Return the size of the specified type, in bits. This
2025/// method does not work on incomplete types.
2026///
2027/// FIXME: Pointers into different addr spaces could have different sizes and
2028/// alignment requirements: getPointerInfo should take an AddrSpace, this
2029/// should take a QualType, &c.
2030TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
2031 uint64_t Width = 0;
2032 unsigned Align = 8;
2033 AlignRequirementKind AlignRequirement = AlignRequirementKind::None;
2034 LangAS AS = LangAS::Default;
2035 switch (T->getTypeClass()) {
2036#define TYPE(Class, Base)
2037#define ABSTRACT_TYPE(Class, Base)
2038#define NON_CANONICAL_TYPE(Class, Base)
2039#define DEPENDENT_TYPE(Class, Base) case Type::Class:
2040#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \
2041 case Type::Class: \
2042 assert(!T->isDependentType() && "should not see dependent types here"); \
2043 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr());
2044#include "clang/AST/TypeNodes.inc"
2045 llvm_unreachable("Should not see dependent types");
2046
2047 case Type::FunctionNoProto:
2048 case Type::FunctionProto:
2049 // GCC extension: alignof(function) = 32 bits
2050 Width = 0;
2051 Align = 32;
2052 break;
2053
2054 case Type::IncompleteArray:
2055 case Type::VariableArray:
2056 case Type::ConstantArray:
2057 case Type::ArrayParameter: {
2058 // Model non-constant sized arrays as size zero, but track the alignment.
2059 uint64_t Size = 0;
2060 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: T))
2061 Size = CAT->getZExtSize();
2062
2063 TypeInfo EltInfo = getTypeInfo(T: cast<ArrayType>(Val: T)->getElementType());
2064 assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&
2065 "Overflow in array type bit size evaluation");
2066 Width = EltInfo.Width * Size;
2067 Align = EltInfo.Align;
2068 AlignRequirement = EltInfo.AlignRequirement;
2069 if (!getTargetInfo().getCXXABI().isMicrosoft() ||
2070 getTargetInfo().getPointerWidth(AddrSpace: LangAS::Default) == 64)
2071 Width = llvm::alignTo(Value: Width, Align);
2072 break;
2073 }
2074
2075 case Type::ExtVector:
2076 case Type::Vector: {
2077 const auto *VT = cast<VectorType>(Val: T);
2078 TypeInfo EltInfo = getTypeInfo(T: VT->getElementType());
2079 Width = VT->isPackedVectorBoolType(ctx: *this)
2080 ? VT->getNumElements()
2081 : EltInfo.Width * VT->getNumElements();
2082 // Enforce at least byte size and alignment.
2083 Width = std::max<unsigned>(a: 8, b: Width);
2084 Align = std::max<unsigned>(
2085 a: 8, b: Target->vectorsAreElementAligned() ? EltInfo.Width : Width);
2086
2087 // If the alignment is not a power of 2, round up to the next power of 2.
2088 // This happens for non-power-of-2 length vectors.
2089 if (Align & (Align-1)) {
2090 Align = llvm::bit_ceil(Value: Align);
2091 Width = llvm::alignTo(Value: Width, Align);
2092 }
2093 // Adjust the alignment based on the target max.
2094 uint64_t TargetVectorAlign = Target->getMaxVectorAlign();
2095 if (TargetVectorAlign && TargetVectorAlign < Align)
2096 Align = TargetVectorAlign;
2097 if (VT->getVectorKind() == VectorKind::SveFixedLengthData)
2098 // Adjust the alignment for fixed-length SVE vectors. This is important
2099 // for non-power-of-2 vector lengths.
2100 Align = 128;
2101 else if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
2102 // Adjust the alignment for fixed-length SVE predicates.
2103 Align = 16;
2104 else if (VT->getVectorKind() == VectorKind::RVVFixedLengthData ||
2105 VT->getVectorKind() == VectorKind::RVVFixedLengthMask ||
2106 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_1 ||
2107 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_2 ||
2108 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_4)
2109 // Adjust the alignment for fixed-length RVV vectors.
2110 Align = std::min<unsigned>(a: 64, b: Width);
2111 break;
2112 }
2113
2114 case Type::ConstantMatrix: {
2115 const auto *MT = cast<ConstantMatrixType>(Val: T);
2116 TypeInfo ElementInfo = getTypeInfo(T: MT->getElementType());
2117 // The internal layout of a matrix value is implementation defined.
2118 // Initially be ABI compatible with arrays with respect to alignment and
2119 // size.
2120 Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns();
2121 Align = ElementInfo.Align;
2122 break;
2123 }
2124
2125 case Type::Builtin:
2126 switch (cast<BuiltinType>(Val: T)->getKind()) {
2127 default: llvm_unreachable("Unknown builtin type!");
2128 case BuiltinType::Void:
2129 // GCC extension: alignof(void) = 8 bits.
2130 Width = 0;
2131 Align = 8;
2132 break;
2133 case BuiltinType::Bool:
2134 Width = Target->getBoolWidth();
2135 Align = Target->getBoolAlign();
2136 break;
2137 case BuiltinType::Char_S:
2138 case BuiltinType::Char_U:
2139 case BuiltinType::UChar:
2140 case BuiltinType::SChar:
2141 case BuiltinType::Char8:
2142 Width = Target->getCharWidth();
2143 Align = Target->getCharAlign();
2144 break;
2145 case BuiltinType::WChar_S:
2146 case BuiltinType::WChar_U:
2147 Width = Target->getWCharWidth();
2148 Align = Target->getWCharAlign();
2149 break;
2150 case BuiltinType::Char16:
2151 Width = Target->getChar16Width();
2152 Align = Target->getChar16Align();
2153 break;
2154 case BuiltinType::Char32:
2155 Width = Target->getChar32Width();
2156 Align = Target->getChar32Align();
2157 break;
2158 case BuiltinType::UShort:
2159 case BuiltinType::Short:
2160 Width = Target->getShortWidth();
2161 Align = Target->getShortAlign();
2162 break;
2163 case BuiltinType::UInt:
2164 case BuiltinType::Int:
2165 Width = Target->getIntWidth();
2166 Align = Target->getIntAlign();
2167 break;
2168 case BuiltinType::ULong:
2169 case BuiltinType::Long:
2170 Width = Target->getLongWidth();
2171 Align = Target->getLongAlign();
2172 break;
2173 case BuiltinType::ULongLong:
2174 case BuiltinType::LongLong:
2175 Width = Target->getLongLongWidth();
2176 Align = Target->getLongLongAlign();
2177 break;
2178 case BuiltinType::Int128:
2179 case BuiltinType::UInt128:
2180 Width = 128;
2181 Align = Target->getInt128Align();
2182 break;
2183 case BuiltinType::ShortAccum:
2184 case BuiltinType::UShortAccum:
2185 case BuiltinType::SatShortAccum:
2186 case BuiltinType::SatUShortAccum:
2187 Width = Target->getShortAccumWidth();
2188 Align = Target->getShortAccumAlign();
2189 break;
2190 case BuiltinType::Accum:
2191 case BuiltinType::UAccum:
2192 case BuiltinType::SatAccum:
2193 case BuiltinType::SatUAccum:
2194 Width = Target->getAccumWidth();
2195 Align = Target->getAccumAlign();
2196 break;
2197 case BuiltinType::LongAccum:
2198 case BuiltinType::ULongAccum:
2199 case BuiltinType::SatLongAccum:
2200 case BuiltinType::SatULongAccum:
2201 Width = Target->getLongAccumWidth();
2202 Align = Target->getLongAccumAlign();
2203 break;
2204 case BuiltinType::ShortFract:
2205 case BuiltinType::UShortFract:
2206 case BuiltinType::SatShortFract:
2207 case BuiltinType::SatUShortFract:
2208 Width = Target->getShortFractWidth();
2209 Align = Target->getShortFractAlign();
2210 break;
2211 case BuiltinType::Fract:
2212 case BuiltinType::UFract:
2213 case BuiltinType::SatFract:
2214 case BuiltinType::SatUFract:
2215 Width = Target->getFractWidth();
2216 Align = Target->getFractAlign();
2217 break;
2218 case BuiltinType::LongFract:
2219 case BuiltinType::ULongFract:
2220 case BuiltinType::SatLongFract:
2221 case BuiltinType::SatULongFract:
2222 Width = Target->getLongFractWidth();
2223 Align = Target->getLongFractAlign();
2224 break;
2225 case BuiltinType::BFloat16:
2226 if (Target->hasBFloat16Type()) {
2227 Width = Target->getBFloat16Width();
2228 Align = Target->getBFloat16Align();
2229 } else if ((getLangOpts().SYCLIsDevice ||
2230 (getLangOpts().OpenMP &&
2231 getLangOpts().OpenMPIsTargetDevice)) &&
2232 AuxTarget->hasBFloat16Type()) {
2233 Width = AuxTarget->getBFloat16Width();
2234 Align = AuxTarget->getBFloat16Align();
2235 }
2236 break;
2237 case BuiltinType::Float16:
2238 case BuiltinType::Half:
2239 if (Target->hasFloat16Type() || !getLangOpts().OpenMP ||
2240 !getLangOpts().OpenMPIsTargetDevice) {
2241 Width = Target->getHalfWidth();
2242 Align = Target->getHalfAlign();
2243 } else {
2244 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2245 "Expected OpenMP device compilation.");
2246 Width = AuxTarget->getHalfWidth();
2247 Align = AuxTarget->getHalfAlign();
2248 }
2249 break;
2250 case BuiltinType::Float:
2251 Width = Target->getFloatWidth();
2252 Align = Target->getFloatAlign();
2253 break;
2254 case BuiltinType::Double:
2255 Width = Target->getDoubleWidth();
2256 Align = Target->getDoubleAlign();
2257 break;
2258 case BuiltinType::Ibm128:
2259 Width = Target->getIbm128Width();
2260 Align = Target->getIbm128Align();
2261 break;
2262 case BuiltinType::LongDouble:
2263 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2264 (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() ||
2265 Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) {
2266 Width = AuxTarget->getLongDoubleWidth();
2267 Align = AuxTarget->getLongDoubleAlign();
2268 } else {
2269 Width = Target->getLongDoubleWidth();
2270 Align = Target->getLongDoubleAlign();
2271 }
2272 break;
2273 case BuiltinType::Float128:
2274 if (Target->hasFloat128Type() || !getLangOpts().OpenMP ||
2275 !getLangOpts().OpenMPIsTargetDevice) {
2276 Width = Target->getFloat128Width();
2277 Align = Target->getFloat128Align();
2278 } else {
2279 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2280 "Expected OpenMP device compilation.");
2281 Width = AuxTarget->getFloat128Width();
2282 Align = AuxTarget->getFloat128Align();
2283 }
2284 break;
2285 case BuiltinType::NullPtr:
2286 // C++ 3.9.1p11: sizeof(nullptr_t) == sizeof(void*)
2287 Width = Target->getPointerWidth(AddrSpace: LangAS::Default);
2288 Align = Target->getPointerAlign(AddrSpace: LangAS::Default);
2289 break;
2290 case BuiltinType::ObjCId:
2291 case BuiltinType::ObjCClass:
2292 case BuiltinType::ObjCSel:
2293 Width = Target->getPointerWidth(AddrSpace: LangAS::Default);
2294 Align = Target->getPointerAlign(AddrSpace: LangAS::Default);
2295 break;
2296 case BuiltinType::OCLSampler:
2297 case BuiltinType::OCLEvent:
2298 case BuiltinType::OCLClkEvent:
2299 case BuiltinType::OCLQueue:
2300 case BuiltinType::OCLReserveID:
2301#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
2302 case BuiltinType::Id:
2303#include "clang/Basic/OpenCLImageTypes.def"
2304#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
2305 case BuiltinType::Id:
2306#include "clang/Basic/OpenCLExtensionTypes.def"
2307 AS = Target->getOpenCLTypeAddrSpace(TK: getOpenCLTypeKind(T));
2308 Width = Target->getPointerWidth(AddrSpace: AS);
2309 Align = Target->getPointerAlign(AddrSpace: AS);
2310 break;
2311 // The SVE types are effectively target-specific. The length of an
2312 // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple
2313 // of 128 bits. There is one predicate bit for each vector byte, so the
2314 // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits.
2315 //
2316 // Because the length is only known at runtime, we use a dummy value
2317 // of 0 for the static length. The alignment values are those defined
2318 // by the Procedure Call Standard for the Arm Architecture.
2319#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId) \
2320 case BuiltinType::Id: \
2321 Width = 0; \
2322 Align = 128; \
2323 break;
2324#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId) \
2325 case BuiltinType::Id: \
2326 Width = 0; \
2327 Align = 16; \
2328 break;
2329#define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingletonId) \
2330 case BuiltinType::Id: \
2331 Width = 0; \
2332 Align = 16; \
2333 break;
2334#define SVE_SCALAR_TYPE(Name, MangledName, Id, SingletonId, Bits) \
2335 case BuiltinType::Id: \
2336 Width = Bits; \
2337 Align = Bits; \
2338 break;
2339#include "clang/Basic/AArch64ACLETypes.def"
2340#define PPC_VECTOR_TYPE(Name, Id, Size) \
2341 case BuiltinType::Id: \
2342 Width = Size; \
2343 Align = Size; \
2344 break;
2345#include "clang/Basic/PPCTypes.def"
2346#define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \
2347 IsFP, IsBF) \
2348 case BuiltinType::Id: \
2349 Width = 0; \
2350 Align = ElBits; \
2351 break;
2352#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \
2353 case BuiltinType::Id: \
2354 Width = 0; \
2355 Align = 8; \
2356 break;
2357#include "clang/Basic/RISCVVTypes.def"
2358#define WASM_TYPE(Name, Id, SingletonId) \
2359 case BuiltinType::Id: \
2360 Width = 0; \
2361 Align = 8; \
2362 break;
2363#include "clang/Basic/WebAssemblyReferenceTypes.def"
2364#define AMDGPU_TYPE(NAME, ID, SINGLETONID, WIDTH, ALIGN) \
2365 case BuiltinType::ID: \
2366 Width = WIDTH; \
2367 Align = ALIGN; \
2368 break;
2369#include "clang/Basic/AMDGPUTypes.def"
2370#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
2371#include "clang/Basic/HLSLIntangibleTypes.def"
2372 Width = Target->getPointerWidth(AddrSpace: LangAS::Default);
2373 Align = Target->getPointerAlign(AddrSpace: LangAS::Default);
2374 break;
2375 }
2376 break;
2377 case Type::ObjCObjectPointer:
2378 Width = Target->getPointerWidth(AddrSpace: LangAS::Default);
2379 Align = Target->getPointerAlign(AddrSpace: LangAS::Default);
2380 break;
2381 case Type::BlockPointer:
2382 AS = cast<BlockPointerType>(Val: T)->getPointeeType().getAddressSpace();
2383 Width = Target->getPointerWidth(AddrSpace: AS);
2384 Align = Target->getPointerAlign(AddrSpace: AS);
2385 break;
2386 case Type::LValueReference:
2387 case Type::RValueReference:
2388 // alignof and sizeof should never enter this code path here, so we go
2389 // the pointer route.
2390 AS = cast<ReferenceType>(Val: T)->getPointeeType().getAddressSpace();
2391 Width = Target->getPointerWidth(AddrSpace: AS);
2392 Align = Target->getPointerAlign(AddrSpace: AS);
2393 break;
2394 case Type::Pointer:
2395 AS = cast<PointerType>(Val: T)->getPointeeType().getAddressSpace();
2396 Width = Target->getPointerWidth(AddrSpace: AS);
2397 Align = Target->getPointerAlign(AddrSpace: AS);
2398 break;
2399 case Type::MemberPointer: {
2400 const auto *MPT = cast<MemberPointerType>(Val: T);
2401 CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT);
2402 Width = MPI.Width;
2403 Align = MPI.Align;
2404 break;
2405 }
2406 case Type::Complex: {
2407 // Complex types have the same alignment as their elements, but twice the
2408 // size.
2409 TypeInfo EltInfo = getTypeInfo(T: cast<ComplexType>(Val: T)->getElementType());
2410 Width = EltInfo.Width * 2;
2411 Align = EltInfo.Align;
2412 break;
2413 }
2414 case Type::ObjCObject:
2415 return getTypeInfo(T: cast<ObjCObjectType>(Val: T)->getBaseType().getTypePtr());
2416 case Type::Adjusted:
2417 case Type::Decayed:
2418 return getTypeInfo(T: cast<AdjustedType>(Val: T)->getAdjustedType().getTypePtr());
2419 case Type::ObjCInterface: {
2420 const auto *ObjCI = cast<ObjCInterfaceType>(Val: T);
2421 if (ObjCI->getDecl()->isInvalidDecl()) {
2422 Width = 8;
2423 Align = 8;
2424 break;
2425 }
2426 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(D: ObjCI->getDecl());
2427 Width = toBits(CharSize: Layout.getSize());
2428 Align = toBits(CharSize: Layout.getAlignment());
2429 break;
2430 }
2431 case Type::BitInt: {
2432 const auto *EIT = cast<BitIntType>(Val: T);
2433 Align = Target->getBitIntAlign(NumBits: EIT->getNumBits());
2434 Width = Target->getBitIntWidth(NumBits: EIT->getNumBits());
2435 break;
2436 }
2437 case Type::Record:
2438 case Type::Enum: {
2439 const auto *TT = cast<TagType>(Val: T);
2440 const TagDecl *TD = TT->getDecl()->getDefinitionOrSelf();
2441
2442 if (TD->isInvalidDecl()) {
2443 Width = 8;
2444 Align = 8;
2445 break;
2446 }
2447
2448 if (isa<EnumType>(Val: TT)) {
2449 const EnumDecl *ED = cast<EnumDecl>(Val: TD);
2450 TypeInfo Info =
2451 getTypeInfo(T: ED->getIntegerType()->getUnqualifiedDesugaredType());
2452 if (unsigned AttrAlign = ED->getMaxAlignment()) {
2453 Info.Align = AttrAlign;
2454 Info.AlignRequirement = AlignRequirementKind::RequiredByEnum;
2455 }
2456 return Info;
2457 }
2458
2459 const auto *RD = cast<RecordDecl>(Val: TD);
2460 const ASTRecordLayout &Layout = getASTRecordLayout(D: RD);
2461 Width = toBits(CharSize: Layout.getSize());
2462 Align = toBits(CharSize: Layout.getAlignment());
2463 AlignRequirement = RD->hasAttr<AlignedAttr>()
2464 ? AlignRequirementKind::RequiredByRecord
2465 : AlignRequirementKind::None;
2466 break;
2467 }
2468
2469 case Type::SubstTemplateTypeParm:
2470 return getTypeInfo(T: cast<SubstTemplateTypeParmType>(Val: T)->
2471 getReplacementType().getTypePtr());
2472
2473 case Type::Auto:
2474 case Type::DeducedTemplateSpecialization: {
2475 const auto *A = cast<DeducedType>(Val: T);
2476 assert(!A->getDeducedType().isNull() &&
2477 "cannot request the size of an undeduced or dependent auto type");
2478 return getTypeInfo(T: A->getDeducedType().getTypePtr());
2479 }
2480
2481 case Type::Paren:
2482 return getTypeInfo(T: cast<ParenType>(Val: T)->getInnerType().getTypePtr());
2483
2484 case Type::MacroQualified:
2485 return getTypeInfo(
2486 T: cast<MacroQualifiedType>(Val: T)->getUnderlyingType().getTypePtr());
2487
2488 case Type::ObjCTypeParam:
2489 return getTypeInfo(T: cast<ObjCTypeParamType>(Val: T)->desugar().getTypePtr());
2490
2491 case Type::Using:
2492 return getTypeInfo(T: cast<UsingType>(Val: T)->desugar().getTypePtr());
2493
2494 case Type::Typedef: {
2495 const auto *TT = cast<TypedefType>(Val: T);
2496 TypeInfo Info = getTypeInfo(T: TT->desugar().getTypePtr());
2497 // If the typedef has an aligned attribute on it, it overrides any computed
2498 // alignment we have. This violates the GCC documentation (which says that
2499 // attribute(aligned) can only round up) but matches its implementation.
2500 if (unsigned AttrAlign = TT->getDecl()->getMaxAlignment()) {
2501 Align = AttrAlign;
2502 AlignRequirement = AlignRequirementKind::RequiredByTypedef;
2503 } else {
2504 Align = Info.Align;
2505 AlignRequirement = Info.AlignRequirement;
2506 }
2507 Width = Info.Width;
2508 break;
2509 }
2510
2511 case Type::Attributed:
2512 return getTypeInfo(
2513 T: cast<AttributedType>(Val: T)->getEquivalentType().getTypePtr());
2514
2515 case Type::CountAttributed:
2516 return getTypeInfo(T: cast<CountAttributedType>(Val: T)->desugar().getTypePtr());
2517
2518 case Type::BTFTagAttributed:
2519 return getTypeInfo(
2520 T: cast<BTFTagAttributedType>(Val: T)->getWrappedType().getTypePtr());
2521
2522 case Type::OverflowBehavior:
2523 return getTypeInfo(
2524 T: cast<OverflowBehaviorType>(Val: T)->getUnderlyingType().getTypePtr());
2525
2526 case Type::HLSLAttributedResource:
2527 return getTypeInfo(
2528 T: cast<HLSLAttributedResourceType>(Val: T)->getWrappedType().getTypePtr());
2529
2530 case Type::HLSLInlineSpirv: {
2531 const auto *ST = cast<HLSLInlineSpirvType>(Val: T);
2532 // Size is specified in bytes, convert to bits
2533 Width = ST->getSize() * 8;
2534 Align = ST->getAlignment();
2535 if (Width == 0 && Align == 0) {
2536 // We are defaulting to laying out opaque SPIR-V types as 32-bit ints.
2537 Width = 32;
2538 Align = 32;
2539 }
2540 break;
2541 }
2542
2543 case Type::Atomic: {
2544 // Start with the base type information.
2545 TypeInfo Info = getTypeInfo(T: cast<AtomicType>(Val: T)->getValueType());
2546 Width = Info.Width;
2547 Align = Info.Align;
2548
2549 if (!Width) {
2550 // An otherwise zero-sized type should still generate an
2551 // atomic operation.
2552 Width = Target->getCharWidth();
2553 assert(Align);
2554 } else if (Width <= Target->getMaxAtomicPromoteWidth()) {
2555 // If the size of the type doesn't exceed the platform's max
2556 // atomic promotion width, make the size and alignment more
2557 // favorable to atomic operations:
2558
2559 // Round the size up to a power of 2.
2560 Width = llvm::bit_ceil(Value: Width);
2561
2562 // Set the alignment equal to the size.
2563 Align = static_cast<unsigned>(Width);
2564 }
2565 }
2566 break;
2567
2568 case Type::PredefinedSugar:
2569 return getTypeInfo(T: cast<PredefinedSugarType>(Val: T)->desugar().getTypePtr());
2570
2571 case Type::Pipe:
2572 Width = Target->getPointerWidth(AddrSpace: LangAS::opencl_global);
2573 Align = Target->getPointerAlign(AddrSpace: LangAS::opencl_global);
2574 break;
2575 }
2576
2577 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2");
2578 return TypeInfo(Width, Align, AlignRequirement);
2579}
2580
2581unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const {
2582 UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(Val: T);
2583 if (I != MemoizedUnadjustedAlign.end())
2584 return I->second;
2585
2586 unsigned UnadjustedAlign;
2587 if (const auto *RT = T->getAsCanonical<RecordType>()) {
2588 const ASTRecordLayout &Layout = getASTRecordLayout(D: RT->getDecl());
2589 UnadjustedAlign = toBits(CharSize: Layout.getUnadjustedAlignment());
2590 } else if (const auto *ObjCI = T->getAsCanonical<ObjCInterfaceType>()) {
2591 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(D: ObjCI->getDecl());
2592 UnadjustedAlign = toBits(CharSize: Layout.getUnadjustedAlignment());
2593 } else {
2594 UnadjustedAlign = getTypeAlign(T: T->getUnqualifiedDesugaredType());
2595 }
2596
2597 MemoizedUnadjustedAlign[T] = UnadjustedAlign;
2598 return UnadjustedAlign;
2599}
2600
2601unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const {
2602 unsigned SimdAlign = llvm::OpenMPIRBuilder::getOpenMPDefaultSimdAlign(
2603 TargetTriple: getTargetInfo().getTriple(), Features: Target->getTargetOpts().FeatureMap);
2604 return SimdAlign;
2605}
2606
2607/// toCharUnitsFromBits - Convert a size in bits to a size in characters.
2608CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const {
2609 return CharUnits::fromQuantity(Quantity: BitSize / getCharWidth());
2610}
2611
2612/// toBits - Convert a size in characters to a size in characters.
2613int64_t ASTContext::toBits(CharUnits CharSize) const {
2614 return CharSize.getQuantity() * getCharWidth();
2615}
2616
2617/// getTypeSizeInChars - Return the size of the specified type, in characters.
2618/// This method does not work on incomplete types.
2619CharUnits ASTContext::getTypeSizeInChars(QualType T) const {
2620 return getTypeInfoInChars(T).Width;
2621}
2622CharUnits ASTContext::getTypeSizeInChars(const Type *T) const {
2623 return getTypeInfoInChars(T).Width;
2624}
2625
2626/// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
2627/// characters. This method does not work on incomplete types.
2628CharUnits ASTContext::getTypeAlignInChars(QualType T) const {
2629 return toCharUnitsFromBits(BitSize: getTypeAlign(T));
2630}
2631CharUnits ASTContext::getTypeAlignInChars(const Type *T) const {
2632 return toCharUnitsFromBits(BitSize: getTypeAlign(T));
2633}
2634
2635/// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a
2636/// type, in characters, before alignment adjustments. This method does
2637/// not work on incomplete types.
2638CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const {
2639 return toCharUnitsFromBits(BitSize: getTypeUnadjustedAlign(T));
2640}
2641CharUnits ASTContext::getTypeUnadjustedAlignInChars(const Type *T) const {
2642 return toCharUnitsFromBits(BitSize: getTypeUnadjustedAlign(T));
2643}
2644
2645/// getPreferredTypeAlign - Return the "preferred" alignment of the specified
2646/// type for the current target in bits. This can be different than the ABI
2647/// alignment in cases where it is beneficial for performance or backwards
2648/// compatibility preserving to overalign a data type. (Note: despite the name,
2649/// the preferred alignment is ABI-impacting, and not an optimization.)
2650unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
2651 TypeInfo TI = getTypeInfo(T);
2652 unsigned ABIAlign = TI.Align;
2653
2654 T = T->getBaseElementTypeUnsafe();
2655
2656 // The preferred alignment of member pointers is that of a pointer.
2657 if (T->isMemberPointerType())
2658 return getPreferredTypeAlign(T: getPointerDiffType().getTypePtr());
2659
2660 if (!Target->allowsLargerPreferedTypeAlignment())
2661 return ABIAlign;
2662
2663 if (const auto *RD = T->getAsRecordDecl()) {
2664 // When used as part of a typedef, or together with a 'packed' attribute,
2665 // the 'aligned' attribute can be used to decrease alignment. Note that the
2666 // 'packed' case is already taken into consideration when computing the
2667 // alignment, we only need to handle the typedef case here.
2668 if (TI.AlignRequirement == AlignRequirementKind::RequiredByTypedef ||
2669 RD->isInvalidDecl())
2670 return ABIAlign;
2671
2672 unsigned PreferredAlign = static_cast<unsigned>(
2673 toBits(CharSize: getASTRecordLayout(D: RD).PreferredAlignment));
2674 assert(PreferredAlign >= ABIAlign &&
2675 "PreferredAlign should be at least as large as ABIAlign.");
2676 return PreferredAlign;
2677 }
2678
2679 // Double (and, for targets supporting AIX `power` alignment, long double) and
2680 // long long should be naturally aligned (despite requiring less alignment) if
2681 // possible.
2682 if (const auto *CT = T->getAs<ComplexType>())
2683 T = CT->getElementType().getTypePtr();
2684 if (const auto *ED = T->getAsEnumDecl())
2685 T = ED->getIntegerType().getTypePtr();
2686 if (T->isSpecificBuiltinType(K: BuiltinType::Double) ||
2687 T->isSpecificBuiltinType(K: BuiltinType::LongLong) ||
2688 T->isSpecificBuiltinType(K: BuiltinType::ULongLong) ||
2689 (T->isSpecificBuiltinType(K: BuiltinType::LongDouble) &&
2690 Target->defaultsToAIXPowerAlignment()))
2691 // Don't increase the alignment if an alignment attribute was specified on a
2692 // typedef declaration.
2693 if (!TI.isAlignRequired())
2694 return std::max(a: ABIAlign, b: (unsigned)getTypeSize(T));
2695
2696 return ABIAlign;
2697}
2698
2699/// getTargetDefaultAlignForAttributeAligned - Return the default alignment
2700/// for __attribute__((aligned)) on this target, to be used if no alignment
2701/// value is specified.
2702unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const {
2703 return getTargetInfo().getDefaultAlignForAttributeAligned();
2704}
2705
2706/// getAlignOfGlobalVar - Return the alignment in bits that should be given
2707/// to a global variable of the specified type.
2708unsigned ASTContext::getAlignOfGlobalVar(QualType T, const VarDecl *VD) const {
2709 uint64_t TypeSize = getTypeSize(T: T.getTypePtr());
2710 return std::max(a: getPreferredTypeAlign(T),
2711 b: getMinGlobalAlignOfVar(Size: TypeSize, VD));
2712}
2713
2714/// getAlignOfGlobalVarInChars - Return the alignment in characters that
2715/// should be given to a global variable of the specified type.
2716CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T,
2717 const VarDecl *VD) const {
2718 return toCharUnitsFromBits(BitSize: getAlignOfGlobalVar(T, VD));
2719}
2720
2721unsigned ASTContext::getMinGlobalAlignOfVar(uint64_t Size,
2722 const VarDecl *VD) const {
2723 // Make the default handling as that of a non-weak definition in the
2724 // current translation unit.
2725 bool HasNonWeakDef = !VD || (VD->hasDefinition() && !VD->isWeak());
2726 return getTargetInfo().getMinGlobalAlign(Size, HasNonWeakDef);
2727}
2728
2729CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const {
2730 CharUnits Offset = CharUnits::Zero();
2731 const ASTRecordLayout *Layout = &getASTRecordLayout(D: RD);
2732 while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) {
2733 Offset += Layout->getBaseClassOffset(Base);
2734 Layout = &getASTRecordLayout(D: Base);
2735 }
2736 return Offset;
2737}
2738
2739CharUnits ASTContext::getMemberPointerPathAdjustment(const APValue &MP) const {
2740 const ValueDecl *MPD = MP.getMemberPointerDecl();
2741 CharUnits ThisAdjustment = CharUnits::Zero();
2742 ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath();
2743 bool DerivedMember = MP.isMemberPointerToDerivedMember();
2744 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Val: MPD->getDeclContext());
2745 for (unsigned I = 0, N = Path.size(); I != N; ++I) {
2746 const CXXRecordDecl *Base = RD;
2747 const CXXRecordDecl *Derived = Path[I];
2748 if (DerivedMember)
2749 std::swap(a&: Base, b&: Derived);
2750 ThisAdjustment += getASTRecordLayout(D: Derived).getBaseClassOffset(Base);
2751 RD = Path[I];
2752 }
2753 if (DerivedMember)
2754 ThisAdjustment = -ThisAdjustment;
2755 return ThisAdjustment;
2756}
2757
2758/// DeepCollectObjCIvars -
2759/// This routine first collects all declared, but not synthesized, ivars in
2760/// super class and then collects all ivars, including those synthesized for
2761/// current class. This routine is used for implementation of current class
2762/// when all ivars, declared and synthesized are known.
2763void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI,
2764 bool leafClass,
2765 SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const {
2766 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
2767 DeepCollectObjCIvars(OI: SuperClass, leafClass: false, Ivars);
2768 if (!leafClass) {
2769 llvm::append_range(C&: Ivars, R: OI->ivars());
2770 } else {
2771 auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
2772 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
2773 Iv= Iv->getNextIvar())
2774 Ivars.push_back(Elt: Iv);
2775 }
2776}
2777
2778/// CollectInheritedProtocols - Collect all protocols in current class and
2779/// those inherited by it.
2780void ASTContext::CollectInheritedProtocols(const Decl *CDecl,
2781 llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) {
2782 if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(Val: CDecl)) {
2783 // We can use protocol_iterator here instead of
2784 // all_referenced_protocol_iterator since we are walking all categories.
2785 for (auto *Proto : OI->all_referenced_protocols()) {
2786 CollectInheritedProtocols(CDecl: Proto, Protocols);
2787 }
2788
2789 // Categories of this Interface.
2790 for (const auto *Cat : OI->visible_categories())
2791 CollectInheritedProtocols(CDecl: Cat, Protocols);
2792
2793 if (ObjCInterfaceDecl *SD = OI->getSuperClass())
2794 while (SD) {
2795 CollectInheritedProtocols(CDecl: SD, Protocols);
2796 SD = SD->getSuperClass();
2797 }
2798 } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(Val: CDecl)) {
2799 for (auto *Proto : OC->protocols()) {
2800 CollectInheritedProtocols(CDecl: Proto, Protocols);
2801 }
2802 } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(Val: CDecl)) {
2803 // Insert the protocol.
2804 if (!Protocols.insert(
2805 Ptr: const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second)
2806 return;
2807
2808 for (auto *Proto : OP->protocols())
2809 CollectInheritedProtocols(CDecl: Proto, Protocols);
2810 }
2811}
2812
2813static bool unionHasUniqueObjectRepresentations(const ASTContext &Context,
2814 const RecordDecl *RD,
2815 bool CheckIfTriviallyCopyable) {
2816 assert(RD->isUnion() && "Must be union type");
2817 CharUnits UnionSize =
2818 Context.getTypeSizeInChars(T: Context.getCanonicalTagType(TD: RD));
2819
2820 for (const auto *Field : RD->fields()) {
2821 if (!Context.hasUniqueObjectRepresentations(Ty: Field->getType(),
2822 CheckIfTriviallyCopyable))
2823 return false;
2824 CharUnits FieldSize = Context.getTypeSizeInChars(T: Field->getType());
2825 if (FieldSize != UnionSize)
2826 return false;
2827 }
2828 return !RD->field_empty();
2829}
2830
2831static int64_t getSubobjectOffset(const FieldDecl *Field,
2832 const ASTContext &Context,
2833 const clang::ASTRecordLayout & /*Layout*/) {
2834 return Context.getFieldOffset(FD: Field);
2835}
2836
2837static int64_t getSubobjectOffset(const CXXRecordDecl *RD,
2838 const ASTContext &Context,
2839 const clang::ASTRecordLayout &Layout) {
2840 return Context.toBits(CharSize: Layout.getBaseClassOffset(Base: RD));
2841}
2842
2843static std::optional<int64_t>
2844structHasUniqueObjectRepresentations(const ASTContext &Context,
2845 const RecordDecl *RD,
2846 bool CheckIfTriviallyCopyable);
2847
2848static std::optional<int64_t>
2849getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context,
2850 bool CheckIfTriviallyCopyable) {
2851 if (const auto *RD = Field->getType()->getAsRecordDecl();
2852 RD && !RD->isUnion())
2853 return structHasUniqueObjectRepresentations(Context, RD,
2854 CheckIfTriviallyCopyable);
2855
2856 // A _BitInt type may not be unique if it has padding bits
2857 // but if it is a bitfield the padding bits are not used.
2858 bool IsBitIntType = Field->getType()->isBitIntType();
2859 if (!Field->getType()->isReferenceType() && !IsBitIntType &&
2860 !Context.hasUniqueObjectRepresentations(Ty: Field->getType(),
2861 CheckIfTriviallyCopyable))
2862 return std::nullopt;
2863
2864 int64_t FieldSizeInBits =
2865 Context.toBits(CharSize: Context.getTypeSizeInChars(T: Field->getType()));
2866 if (Field->isBitField()) {
2867 // If we have explicit padding bits, they don't contribute bits
2868 // to the actual object representation, so return 0.
2869 if (Field->isUnnamedBitField())
2870 return 0;
2871
2872 int64_t BitfieldSize = Field->getBitWidthValue();
2873 if (IsBitIntType) {
2874 if ((unsigned)BitfieldSize >
2875 cast<BitIntType>(Val: Field->getType())->getNumBits())
2876 return std::nullopt;
2877 } else if (BitfieldSize > FieldSizeInBits) {
2878 return std::nullopt;
2879 }
2880 FieldSizeInBits = BitfieldSize;
2881 } else if (IsBitIntType && !Context.hasUniqueObjectRepresentations(
2882 Ty: Field->getType(), CheckIfTriviallyCopyable)) {
2883 return std::nullopt;
2884 }
2885 return FieldSizeInBits;
2886}
2887
2888static std::optional<int64_t>
2889getSubobjectSizeInBits(const CXXRecordDecl *RD, const ASTContext &Context,
2890 bool CheckIfTriviallyCopyable) {
2891 return structHasUniqueObjectRepresentations(Context, RD,
2892 CheckIfTriviallyCopyable);
2893}
2894
2895template <typename RangeT>
2896static std::optional<int64_t> structSubobjectsHaveUniqueObjectRepresentations(
2897 const RangeT &Subobjects, int64_t CurOffsetInBits,
2898 const ASTContext &Context, const clang::ASTRecordLayout &Layout,
2899 bool CheckIfTriviallyCopyable) {
2900 for (const auto *Subobject : Subobjects) {
2901 std::optional<int64_t> SizeInBits =
2902 getSubobjectSizeInBits(Subobject, Context, CheckIfTriviallyCopyable);
2903 if (!SizeInBits)
2904 return std::nullopt;
2905 if (*SizeInBits != 0) {
2906 int64_t Offset = getSubobjectOffset(Subobject, Context, Layout);
2907 if (Offset != CurOffsetInBits)
2908 return std::nullopt;
2909 CurOffsetInBits += *SizeInBits;
2910 }
2911 }
2912 return CurOffsetInBits;
2913}
2914
2915static std::optional<int64_t>
2916structHasUniqueObjectRepresentations(const ASTContext &Context,
2917 const RecordDecl *RD,
2918 bool CheckIfTriviallyCopyable) {
2919 assert(!RD->isUnion() && "Must be struct/class type");
2920 const auto &Layout = Context.getASTRecordLayout(D: RD);
2921
2922 int64_t CurOffsetInBits = 0;
2923 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(Val: RD)) {
2924 if (ClassDecl->isDynamicClass())
2925 return std::nullopt;
2926
2927 SmallVector<CXXRecordDecl *, 4> Bases;
2928 for (const auto &Base : ClassDecl->bases()) {
2929 // Empty types can be inherited from, and non-empty types can potentially
2930 // have tail padding, so just make sure there isn't an error.
2931 Bases.emplace_back(Args: Base.getType()->getAsCXXRecordDecl());
2932 }
2933
2934 llvm::sort(C&: Bases, Comp: [&](const CXXRecordDecl *L, const CXXRecordDecl *R) {
2935 return Layout.getBaseClassOffset(Base: L) < Layout.getBaseClassOffset(Base: R);
2936 });
2937
2938 std::optional<int64_t> OffsetAfterBases =
2939 structSubobjectsHaveUniqueObjectRepresentations(
2940 Subobjects: Bases, CurOffsetInBits, Context, Layout, CheckIfTriviallyCopyable);
2941 if (!OffsetAfterBases)
2942 return std::nullopt;
2943 CurOffsetInBits = *OffsetAfterBases;
2944 }
2945
2946 std::optional<int64_t> OffsetAfterFields =
2947 structSubobjectsHaveUniqueObjectRepresentations(
2948 Subobjects: RD->fields(), CurOffsetInBits, Context, Layout,
2949 CheckIfTriviallyCopyable);
2950 if (!OffsetAfterFields)
2951 return std::nullopt;
2952 CurOffsetInBits = *OffsetAfterFields;
2953
2954 return CurOffsetInBits;
2955}
2956
2957bool ASTContext::hasUniqueObjectRepresentations(
2958 QualType Ty, bool CheckIfTriviallyCopyable) const {
2959 // C++17 [meta.unary.prop]:
2960 // The predicate condition for a template specialization
2961 // has_unique_object_representations<T> shall be satisfied if and only if:
2962 // (9.1) - T is trivially copyable, and
2963 // (9.2) - any two objects of type T with the same value have the same
2964 // object representation, where:
2965 // - two objects of array or non-union class type are considered to have
2966 // the same value if their respective sequences of direct subobjects
2967 // have the same values, and
2968 // - two objects of union type are considered to have the same value if
2969 // they have the same active member and the corresponding members have
2970 // the same value.
2971 // The set of scalar types for which this condition holds is
2972 // implementation-defined. [ Note: If a type has padding bits, the condition
2973 // does not hold; otherwise, the condition holds true for unsigned integral
2974 // types. -- end note ]
2975 assert(!Ty.isNull() && "Null QualType sent to unique object rep check");
2976
2977 // Arrays are unique only if their element type is unique.
2978 if (Ty->isArrayType())
2979 return hasUniqueObjectRepresentations(Ty: getBaseElementType(QT: Ty),
2980 CheckIfTriviallyCopyable);
2981
2982 assert((Ty->isVoidType() || !Ty->isIncompleteType()) &&
2983 "hasUniqueObjectRepresentations should not be called with an "
2984 "incomplete type");
2985
2986 // (9.1) - T is trivially copyable...
2987 if (CheckIfTriviallyCopyable && !Ty.isTriviallyCopyableType(Context: *this))
2988 return false;
2989
2990 // All integrals and enums are unique.
2991 if (Ty->isIntegralOrEnumerationType()) {
2992 // Address discriminated integer types are not unique.
2993 if (Ty.hasAddressDiscriminatedPointerAuth())
2994 return false;
2995 // Except _BitInt types that have padding bits.
2996 if (const auto *BIT = Ty->getAs<BitIntType>())
2997 return getTypeSize(T: BIT) == BIT->getNumBits();
2998
2999 return true;
3000 }
3001
3002 // All other pointers are unique.
3003 if (Ty->isPointerType())
3004 return !Ty.hasAddressDiscriminatedPointerAuth();
3005
3006 if (const auto *MPT = Ty->getAs<MemberPointerType>())
3007 return !ABI->getMemberPointerInfo(MPT).HasPadding;
3008
3009 if (const auto *Record = Ty->getAsRecordDecl()) {
3010 if (Record->isInvalidDecl())
3011 return false;
3012
3013 if (Record->isUnion())
3014 return unionHasUniqueObjectRepresentations(Context: *this, RD: Record,
3015 CheckIfTriviallyCopyable);
3016
3017 std::optional<int64_t> StructSize = structHasUniqueObjectRepresentations(
3018 Context: *this, RD: Record, CheckIfTriviallyCopyable);
3019
3020 return StructSize && *StructSize == static_cast<int64_t>(getTypeSize(T: Ty));
3021 }
3022
3023 // FIXME: More cases to handle here (list by rsmith):
3024 // vectors (careful about, eg, vector of 3 foo)
3025 // _Complex int and friends
3026 // _Atomic T
3027 // Obj-C block pointers
3028 // Obj-C object pointers
3029 // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t,
3030 // clk_event_t, queue_t, reserve_id_t)
3031 // There're also Obj-C class types and the Obj-C selector type, but I think it
3032 // makes sense for those to return false here.
3033
3034 return false;
3035}
3036
3037unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const {
3038 unsigned count = 0;
3039 // Count ivars declared in class extension.
3040 for (const auto *Ext : OI->known_extensions())
3041 count += Ext->ivar_size();
3042
3043 // Count ivar defined in this class's implementation. This
3044 // includes synthesized ivars.
3045 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation())
3046 count += ImplDecl->ivar_size();
3047
3048 return count;
3049}
3050
3051bool ASTContext::isSentinelNullExpr(const Expr *E) {
3052 if (!E)
3053 return false;
3054
3055 // nullptr_t is always treated as null.
3056 if (E->getType()->isNullPtrType()) return true;
3057
3058 if (E->getType()->isAnyPointerType() &&
3059 E->IgnoreParenCasts()->isNullPointerConstant(Ctx&: *this,
3060 NPC: Expr::NPC_ValueDependentIsNull))
3061 return true;
3062
3063 // Unfortunately, __null has type 'int'.
3064 if (isa<GNUNullExpr>(Val: E)) return true;
3065
3066 return false;
3067}
3068
3069/// Get the implementation of ObjCInterfaceDecl, or nullptr if none
3070/// exists.
3071ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) {
3072 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
3073 I = ObjCImpls.find(Val: D);
3074 if (I != ObjCImpls.end())
3075 return cast<ObjCImplementationDecl>(Val: I->second);
3076 return nullptr;
3077}
3078
3079/// Get the implementation of ObjCCategoryDecl, or nullptr if none
3080/// exists.
3081ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) {
3082 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
3083 I = ObjCImpls.find(Val: D);
3084 if (I != ObjCImpls.end())
3085 return cast<ObjCCategoryImplDecl>(Val: I->second);
3086 return nullptr;
3087}
3088
3089/// Set the implementation of ObjCInterfaceDecl.
3090void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD,
3091 ObjCImplementationDecl *ImplD) {
3092 assert(IFaceD && ImplD && "Passed null params");
3093 ObjCImpls[IFaceD] = ImplD;
3094}
3095
3096/// Set the implementation of ObjCCategoryDecl.
3097void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD,
3098 ObjCCategoryImplDecl *ImplD) {
3099 assert(CatD && ImplD && "Passed null params");
3100 ObjCImpls[CatD] = ImplD;
3101}
3102
3103const ObjCMethodDecl *
3104ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const {
3105 return ObjCMethodRedecls.lookup(Val: MD);
3106}
3107
3108void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD,
3109 const ObjCMethodDecl *Redecl) {
3110 assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration");
3111 ObjCMethodRedecls[MD] = Redecl;
3112}
3113
3114const ObjCInterfaceDecl *ASTContext::getObjContainingInterface(
3115 const NamedDecl *ND) const {
3116 if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(Val: ND->getDeclContext()))
3117 return ID;
3118 if (const auto *CD = dyn_cast<ObjCCategoryDecl>(Val: ND->getDeclContext()))
3119 return CD->getClassInterface();
3120 if (const auto *IMD = dyn_cast<ObjCImplDecl>(Val: ND->getDeclContext()))
3121 return IMD->getClassInterface();
3122
3123 return nullptr;
3124}
3125
3126/// Get the copy initialization expression of VarDecl, or nullptr if
3127/// none exists.
3128BlockVarCopyInit ASTContext::getBlockVarCopyInit(const VarDecl *VD) const {
3129 assert(VD && "Passed null params");
3130 assert(VD->hasAttr<BlocksAttr>() &&
3131 "getBlockVarCopyInits - not __block var");
3132 auto I = BlockVarCopyInits.find(Val: VD);
3133 if (I != BlockVarCopyInits.end())
3134 return I->second;
3135 return {nullptr, false};
3136}
3137
3138/// Set the copy initialization expression of a block var decl.
3139void ASTContext::setBlockVarCopyInit(const VarDecl*VD, Expr *CopyExpr,
3140 bool CanThrow) {
3141 assert(VD && CopyExpr && "Passed null params");
3142 assert(VD->hasAttr<BlocksAttr>() &&
3143 "setBlockVarCopyInits - not __block var");
3144 BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow);
3145}
3146
3147TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T,
3148 unsigned DataSize) const {
3149 if (!DataSize)
3150 DataSize = TypeLoc::getFullDataSizeForType(Ty: T);
3151 else
3152 assert(DataSize == TypeLoc::getFullDataSizeForType(T) &&
3153 "incorrect data size provided to CreateTypeSourceInfo!");
3154
3155 auto *TInfo =
3156 (TypeSourceInfo*)BumpAlloc.Allocate(Size: sizeof(TypeSourceInfo) + DataSize, Alignment: 8);
3157 new (TInfo) TypeSourceInfo(T, DataSize);
3158 return TInfo;
3159}
3160
3161TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T,
3162 SourceLocation L) const {
3163 TypeSourceInfo *TSI = CreateTypeSourceInfo(T);
3164 TSI->getTypeLoc().initialize(Context&: const_cast<ASTContext &>(*this), Loc: L);
3165 return TSI;
3166}
3167
3168const ASTRecordLayout &
3169ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const {
3170 return getObjCLayout(D);
3171}
3172
3173static auto getCanonicalTemplateArguments(const ASTContext &C,
3174 ArrayRef<TemplateArgument> Args,
3175 bool &AnyNonCanonArgs) {
3176 SmallVector<TemplateArgument, 16> CanonArgs(Args);
3177 AnyNonCanonArgs |= C.canonicalizeTemplateArguments(Args: CanonArgs);
3178 return CanonArgs;
3179}
3180
3181bool ASTContext::canonicalizeTemplateArguments(
3182 MutableArrayRef<TemplateArgument> Args) const {
3183 bool AnyNonCanonArgs = false;
3184 for (auto &Arg : Args) {
3185 TemplateArgument OrigArg = Arg;
3186 Arg = getCanonicalTemplateArgument(Arg);
3187 AnyNonCanonArgs |= !Arg.structurallyEquals(Other: OrigArg);
3188 }
3189 return AnyNonCanonArgs;
3190}
3191
3192//===----------------------------------------------------------------------===//
3193// Type creation/memoization methods
3194//===----------------------------------------------------------------------===//
3195
3196QualType
3197ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const {
3198 unsigned fastQuals = quals.getFastQualifiers();
3199 quals.removeFastQualifiers();
3200
3201 // Check if we've already instantiated this type.
3202 llvm::FoldingSetNodeID ID;
3203 ExtQuals::Profile(ID, BaseType: baseType, Quals: quals);
3204 void *insertPos = nullptr;
3205 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, InsertPos&: insertPos)) {
3206 assert(eq->getQualifiers() == quals);
3207 return QualType(eq, fastQuals);
3208 }
3209
3210 // If the base type is not canonical, make the appropriate canonical type.
3211 QualType canon;
3212 if (!baseType->isCanonicalUnqualified()) {
3213 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split();
3214 canonSplit.Quals.addConsistentQualifiers(qs: quals);
3215 canon = getExtQualType(baseType: canonSplit.Ty, quals: canonSplit.Quals);
3216
3217 // Re-find the insert position.
3218 (void) ExtQualNodes.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
3219 }
3220
3221 auto *eq = new (*this, alignof(ExtQuals)) ExtQuals(baseType, canon, quals);
3222 ExtQualNodes.InsertNode(N: eq, InsertPos: insertPos);
3223 return QualType(eq, fastQuals);
3224}
3225
3226QualType ASTContext::getAddrSpaceQualType(QualType T,
3227 LangAS AddressSpace) const {
3228 QualType CanT = getCanonicalType(T);
3229 if (CanT.getAddressSpace() == AddressSpace)
3230 return T;
3231
3232 // If we are composing extended qualifiers together, merge together
3233 // into one ExtQuals node.
3234 QualifierCollector Quals;
3235 const Type *TypeNode = Quals.strip(type: T);
3236
3237 // If this type already has an address space specified, it cannot get
3238 // another one.
3239 assert(!Quals.hasAddressSpace() &&
3240 "Type cannot be in multiple addr spaces!");
3241 Quals.addAddressSpace(space: AddressSpace);
3242
3243 return getExtQualType(baseType: TypeNode, quals: Quals);
3244}
3245
3246QualType ASTContext::removeAddrSpaceQualType(QualType T) const {
3247 // If the type is not qualified with an address space, just return it
3248 // immediately.
3249 if (!T.hasAddressSpace())
3250 return T;
3251
3252 QualifierCollector Quals;
3253 const Type *TypeNode;
3254 // For arrays, strip the qualifier off the element type, then reconstruct the
3255 // array type
3256 if (T.getTypePtr()->isArrayType()) {
3257 T = getUnqualifiedArrayType(T, Quals);
3258 TypeNode = T.getTypePtr();
3259 } else {
3260 // If we are composing extended qualifiers together, merge together
3261 // into one ExtQuals node.
3262 while (T.hasAddressSpace()) {
3263 TypeNode = Quals.strip(type: T);
3264
3265 // If the type no longer has an address space after stripping qualifiers,
3266 // jump out.
3267 if (!QualType(TypeNode, 0).hasAddressSpace())
3268 break;
3269
3270 // There might be sugar in the way. Strip it and try again.
3271 T = T.getSingleStepDesugaredType(Context: *this);
3272 }
3273 }
3274
3275 Quals.removeAddressSpace();
3276
3277 // Removal of the address space can mean there are no longer any
3278 // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts)
3279 // or required.
3280 if (Quals.hasNonFastQualifiers())
3281 return getExtQualType(baseType: TypeNode, quals: Quals);
3282 else
3283 return QualType(TypeNode, Quals.getFastQualifiers());
3284}
3285
3286uint16_t
3287ASTContext::getPointerAuthVTablePointerDiscriminator(const CXXRecordDecl *RD) {
3288 assert(RD->isPolymorphic() &&
3289 "Attempted to get vtable pointer discriminator on a monomorphic type");
3290 std::unique_ptr<MangleContext> MC(createMangleContext());
3291 SmallString<256> Str;
3292 llvm::raw_svector_ostream Out(Str);
3293 MC->mangleCXXVTable(RD, Out);
3294 return llvm::getPointerAuthStableSipHash(S: Str);
3295}
3296
3297/// Encode a function type for use in the discriminator of a function pointer
3298/// type. We can't use the itanium scheme for this since C has quite permissive
3299/// rules for type compatibility that we need to be compatible with.
3300///
3301/// Formally, this function associates every function pointer type T with an
3302/// encoded string E(T). Let the equivalence relation T1 ~ T2 be defined as
3303/// E(T1) == E(T2). E(T) is part of the ABI of values of type T. C type
3304/// compatibility requires equivalent treatment under the ABI, so
3305/// CCompatible(T1, T2) must imply E(T1) == E(T2), that is, CCompatible must be
3306/// a subset of ~. Crucially, however, it must be a proper subset because
3307/// CCompatible is not an equivalence relation: for example, int[] is compatible
3308/// with both int[1] and int[2], but the latter are not compatible with each
3309/// other. Therefore this encoding function must be careful to only distinguish
3310/// types if there is no third type with which they are both required to be
3311/// compatible.
3312static void encodeTypeForFunctionPointerAuth(const ASTContext &Ctx,
3313 raw_ostream &OS, QualType QT) {
3314 // FIXME: Consider address space qualifiers.
3315 const Type *T = QT.getCanonicalType().getTypePtr();
3316
3317 // FIXME: Consider using the C++ type mangling when we encounter a construct
3318 // that is incompatible with C.
3319
3320 switch (T->getTypeClass()) {
3321 case Type::Atomic:
3322 return encodeTypeForFunctionPointerAuth(
3323 Ctx, OS, QT: cast<AtomicType>(Val: T)->getValueType());
3324
3325 case Type::LValueReference:
3326 OS << "R";
3327 encodeTypeForFunctionPointerAuth(Ctx, OS,
3328 QT: cast<ReferenceType>(Val: T)->getPointeeType());
3329 return;
3330 case Type::RValueReference:
3331 OS << "O";
3332 encodeTypeForFunctionPointerAuth(Ctx, OS,
3333 QT: cast<ReferenceType>(Val: T)->getPointeeType());
3334 return;
3335
3336 case Type::Pointer:
3337 // C11 6.7.6.1p2:
3338 // For two pointer types to be compatible, both shall be identically
3339 // qualified and both shall be pointers to compatible types.
3340 // FIXME: we should also consider pointee types.
3341 OS << "P";
3342 return;
3343
3344 case Type::ObjCObjectPointer:
3345 case Type::BlockPointer:
3346 OS << "P";
3347 return;
3348
3349 case Type::Complex:
3350 OS << "C";
3351 return encodeTypeForFunctionPointerAuth(
3352 Ctx, OS, QT: cast<ComplexType>(Val: T)->getElementType());
3353
3354 case Type::VariableArray:
3355 case Type::ConstantArray:
3356 case Type::IncompleteArray:
3357 case Type::ArrayParameter:
3358 // C11 6.7.6.2p6:
3359 // For two array types to be compatible, both shall have compatible
3360 // element types, and if both size specifiers are present, and are integer
3361 // constant expressions, then both size specifiers shall have the same
3362 // constant value [...]
3363 //
3364 // So since ElemType[N] has to be compatible ElemType[], we can't encode the
3365 // width of the array.
3366 OS << "A";
3367 return encodeTypeForFunctionPointerAuth(
3368 Ctx, OS, QT: cast<ArrayType>(Val: T)->getElementType());
3369
3370 case Type::ObjCInterface:
3371 case Type::ObjCObject:
3372 OS << "<objc_object>";
3373 return;
3374
3375 case Type::Enum: {
3376 // C11 6.7.2.2p4:
3377 // Each enumerated type shall be compatible with char, a signed integer
3378 // type, or an unsigned integer type.
3379 //
3380 // So we have to treat enum types as integers.
3381 QualType UnderlyingType = T->castAsEnumDecl()->getIntegerType();
3382 return encodeTypeForFunctionPointerAuth(
3383 Ctx, OS, QT: UnderlyingType.isNull() ? Ctx.IntTy : UnderlyingType);
3384 }
3385
3386 case Type::FunctionNoProto:
3387 case Type::FunctionProto: {
3388 // C11 6.7.6.3p15:
3389 // For two function types to be compatible, both shall specify compatible
3390 // return types. Moreover, the parameter type lists, if both are present,
3391 // shall agree in the number of parameters and in the use of the ellipsis
3392 // terminator; corresponding parameters shall have compatible types.
3393 //
3394 // That paragraph goes on to describe how unprototyped functions are to be
3395 // handled, which we ignore here. Unprototyped function pointers are hashed
3396 // as though they were prototyped nullary functions since thats probably
3397 // what the user meant. This behavior is non-conforming.
3398 // FIXME: If we add a "custom discriminator" function type attribute we
3399 // should encode functions as their discriminators.
3400 OS << "F";
3401 const auto *FuncType = cast<FunctionType>(Val: T);
3402 encodeTypeForFunctionPointerAuth(Ctx, OS, QT: FuncType->getReturnType());
3403 if (const auto *FPT = dyn_cast<FunctionProtoType>(Val: FuncType)) {
3404 for (QualType Param : FPT->param_types()) {
3405 Param = Ctx.getSignatureParameterType(T: Param);
3406 encodeTypeForFunctionPointerAuth(Ctx, OS, QT: Param);
3407 }
3408 if (FPT->isVariadic())
3409 OS << "z";
3410 }
3411 OS << "E";
3412 return;
3413 }
3414
3415 case Type::MemberPointer: {
3416 OS << "M";
3417 const auto *MPT = T->castAs<MemberPointerType>();
3418 encodeTypeForFunctionPointerAuth(
3419 Ctx, OS, QT: QualType(MPT->getQualifier().getAsType(), 0));
3420 encodeTypeForFunctionPointerAuth(Ctx, OS, QT: MPT->getPointeeType());
3421 return;
3422 }
3423 case Type::ExtVector:
3424 case Type::Vector:
3425 OS << "Dv" << Ctx.getTypeSizeInChars(T).getQuantity();
3426 break;
3427
3428 // Don't bother discriminating based on these types.
3429 case Type::Pipe:
3430 case Type::BitInt:
3431 case Type::ConstantMatrix:
3432 OS << "?";
3433 return;
3434
3435 case Type::Builtin: {
3436 const auto *BTy = T->castAs<BuiltinType>();
3437 switch (BTy->getKind()) {
3438#define SIGNED_TYPE(Id, SingletonId) \
3439 case BuiltinType::Id: \
3440 OS << "i"; \
3441 return;
3442#define UNSIGNED_TYPE(Id, SingletonId) \
3443 case BuiltinType::Id: \
3444 OS << "i"; \
3445 return;
3446#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id:
3447#define BUILTIN_TYPE(Id, SingletonId)
3448#include "clang/AST/BuiltinTypes.def"
3449 llvm_unreachable("placeholder types should not appear here.");
3450
3451 case BuiltinType::Half:
3452 OS << "Dh";
3453 return;
3454 case BuiltinType::Float:
3455 OS << "f";
3456 return;
3457 case BuiltinType::Double:
3458 OS << "d";
3459 return;
3460 case BuiltinType::LongDouble:
3461 OS << "e";
3462 return;
3463 case BuiltinType::Float16:
3464 OS << "DF16_";
3465 return;
3466 case BuiltinType::Float128:
3467 OS << "g";
3468 return;
3469
3470 case BuiltinType::Void:
3471 OS << "v";
3472 return;
3473
3474 case BuiltinType::ObjCId:
3475 case BuiltinType::ObjCClass:
3476 case BuiltinType::ObjCSel:
3477 case BuiltinType::NullPtr:
3478 OS << "P";
3479 return;
3480
3481 // Don't bother discriminating based on OpenCL types.
3482 case BuiltinType::OCLSampler:
3483 case BuiltinType::OCLEvent:
3484 case BuiltinType::OCLClkEvent:
3485 case BuiltinType::OCLQueue:
3486 case BuiltinType::OCLReserveID:
3487 case BuiltinType::BFloat16:
3488 case BuiltinType::VectorQuad:
3489 case BuiltinType::VectorPair:
3490 case BuiltinType::DMR1024:
3491 case BuiltinType::DMR2048:
3492 OS << "?";
3493 return;
3494
3495 // Don't bother discriminating based on these seldom-used types.
3496 case BuiltinType::Ibm128:
3497 return;
3498#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3499 case BuiltinType::Id: \
3500 return;
3501#include "clang/Basic/OpenCLImageTypes.def"
3502#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3503 case BuiltinType::Id: \
3504 return;
3505#include "clang/Basic/OpenCLExtensionTypes.def"
3506#define SVE_TYPE(Name, Id, SingletonId) \
3507 case BuiltinType::Id: \
3508 return;
3509#include "clang/Basic/AArch64ACLETypes.def"
3510#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) \
3511 case BuiltinType::Id: \
3512 return;
3513#include "clang/Basic/HLSLIntangibleTypes.def"
3514 case BuiltinType::Dependent:
3515 llvm_unreachable("should never get here");
3516#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) case BuiltinType::Id:
3517#include "clang/Basic/AMDGPUTypes.def"
3518 case BuiltinType::WasmExternRef:
3519#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3520#include "clang/Basic/RISCVVTypes.def"
3521 llvm_unreachable("not yet implemented");
3522 }
3523 llvm_unreachable("should never get here");
3524 }
3525 case Type::Record: {
3526 const RecordDecl *RD = T->castAsCanonical<RecordType>()->getDecl();
3527 const IdentifierInfo *II = RD->getIdentifier();
3528
3529 // In C++, an immediate typedef of an anonymous struct or union
3530 // is considered to name it for ODR purposes, but C's specification
3531 // of type compatibility does not have a similar rule. Using the typedef
3532 // name in function type discriminators anyway, as we do here,
3533 // therefore technically violates the C standard: two function pointer
3534 // types defined in terms of two typedef'd anonymous structs with
3535 // different names are formally still compatible, but we are assigning
3536 // them different discriminators and therefore incompatible ABIs.
3537 //
3538 // This is a relatively minor violation that significantly improves
3539 // discrimination in some cases and has not caused problems in
3540 // practice. Regardless, it is now part of the ABI in places where
3541 // function type discrimination is used, and it can no longer be
3542 // changed except on new platforms.
3543
3544 if (!II)
3545 if (const TypedefNameDecl *Typedef = RD->getTypedefNameForAnonDecl())
3546 II = Typedef->getDeclName().getAsIdentifierInfo();
3547
3548 if (!II) {
3549 OS << "<anonymous_record>";
3550 return;
3551 }
3552 OS << II->getLength() << II->getName();
3553 return;
3554 }
3555 case Type::HLSLAttributedResource:
3556 case Type::HLSLInlineSpirv:
3557 llvm_unreachable("should never get here");
3558 break;
3559 case Type::OverflowBehavior:
3560 llvm_unreachable("should never get here");
3561 break;
3562 case Type::DeducedTemplateSpecialization:
3563 case Type::Auto:
3564#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3565#define DEPENDENT_TYPE(Class, Base) case Type::Class:
3566#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3567#define ABSTRACT_TYPE(Class, Base)
3568#define TYPE(Class, Base)
3569#include "clang/AST/TypeNodes.inc"
3570 llvm_unreachable("unexpected non-canonical or dependent type!");
3571 return;
3572 }
3573}
3574
3575uint16_t ASTContext::getPointerAuthTypeDiscriminator(QualType T) {
3576 assert(!T->isDependentType() &&
3577 "cannot compute type discriminator of a dependent type");
3578 SmallString<256> Str;
3579 llvm::raw_svector_ostream Out(Str);
3580
3581 if (T->isFunctionPointerType() || T->isFunctionReferenceType())
3582 T = T->getPointeeType();
3583
3584 if (T->isFunctionType()) {
3585 encodeTypeForFunctionPointerAuth(Ctx: *this, OS&: Out, QT: T);
3586 } else {
3587 T = T.getUnqualifiedType();
3588 // Calls to member function pointers don't need to worry about
3589 // language interop or the laxness of the C type compatibility rules.
3590 // We just mangle the member pointer type directly, which is
3591 // implicitly much stricter about type matching. However, we do
3592 // strip any top-level exception specification before this mangling.
3593 // C++23 requires calls to work when the function type is convertible
3594 // to the pointer type by a function pointer conversion, which can
3595 // change the exception specification. This does not technically
3596 // require the exception specification to not affect representation,
3597 // because the function pointer conversion is still always a direct
3598 // value conversion and therefore an opportunity to resign the
3599 // pointer. (This is in contrast to e.g. qualification conversions,
3600 // which can be applied in nested pointer positions, effectively
3601 // requiring qualified and unqualified representations to match.)
3602 // However, it is pragmatic to ignore exception specifications
3603 // because it allows a certain amount of `noexcept` mismatching
3604 // to not become a visible ODR problem. This also leaves some
3605 // room for the committee to add laxness to function pointer
3606 // conversions in future standards.
3607 if (auto *MPT = T->getAs<MemberPointerType>())
3608 if (MPT->isMemberFunctionPointer()) {
3609 QualType PointeeType = MPT->getPointeeType();
3610 if (PointeeType->castAs<FunctionProtoType>()->getExceptionSpecType() !=
3611 EST_None) {
3612 QualType FT = getFunctionTypeWithExceptionSpec(Orig: PointeeType, ESI: EST_None);
3613 T = getMemberPointerType(T: FT, Qualifier: MPT->getQualifier(),
3614 Cls: MPT->getMostRecentCXXRecordDecl());
3615 }
3616 }
3617 std::unique_ptr<MangleContext> MC(createMangleContext());
3618 MC->mangleCanonicalTypeName(T, Out);
3619 }
3620
3621 return llvm::getPointerAuthStableSipHash(S: Str);
3622}
3623
3624QualType ASTContext::getObjCGCQualType(QualType T,
3625 Qualifiers::GC GCAttr) const {
3626 QualType CanT = getCanonicalType(T);
3627 if (CanT.getObjCGCAttr() == GCAttr)
3628 return T;
3629
3630 if (const auto *ptr = T->getAs<PointerType>()) {
3631 QualType Pointee = ptr->getPointeeType();
3632 if (Pointee->isAnyPointerType()) {
3633 QualType ResultType = getObjCGCQualType(T: Pointee, GCAttr);
3634 return getPointerType(T: ResultType);
3635 }
3636 }
3637
3638 // If we are composing extended qualifiers together, merge together
3639 // into one ExtQuals node.
3640 QualifierCollector Quals;
3641 const Type *TypeNode = Quals.strip(type: T);
3642
3643 // If this type already has an ObjCGC specified, it cannot get
3644 // another one.
3645 assert(!Quals.hasObjCGCAttr() &&
3646 "Type cannot have multiple ObjCGCs!");
3647 Quals.addObjCGCAttr(type: GCAttr);
3648
3649 return getExtQualType(baseType: TypeNode, quals: Quals);
3650}
3651
3652QualType ASTContext::removePtrSizeAddrSpace(QualType T) const {
3653 if (const PointerType *Ptr = T->getAs<PointerType>()) {
3654 QualType Pointee = Ptr->getPointeeType();
3655 if (isPtrSizeAddressSpace(AS: Pointee.getAddressSpace())) {
3656 return getPointerType(T: removeAddrSpaceQualType(T: Pointee));
3657 }
3658 }
3659 return T;
3660}
3661
3662QualType ASTContext::getCountAttributedType(
3663 QualType WrappedTy, Expr *CountExpr, bool CountInBytes, bool OrNull,
3664 ArrayRef<TypeCoupledDeclRefInfo> DependentDecls) const {
3665 assert(WrappedTy->isPointerType() || WrappedTy->isArrayType());
3666
3667 llvm::FoldingSetNodeID ID;
3668 CountAttributedType::Profile(ID, WrappedTy, CountExpr, CountInBytes, Nullable: OrNull);
3669
3670 void *InsertPos = nullptr;
3671 CountAttributedType *CATy =
3672 CountAttributedTypes.FindNodeOrInsertPos(ID, InsertPos);
3673 if (CATy)
3674 return QualType(CATy, 0);
3675
3676 QualType CanonTy = getCanonicalType(T: WrappedTy);
3677 size_t Size = CountAttributedType::totalSizeToAlloc<TypeCoupledDeclRefInfo>(
3678 Counts: DependentDecls.size());
3679 CATy = (CountAttributedType *)Allocate(Size, Align: TypeAlignment);
3680 new (CATy) CountAttributedType(WrappedTy, CanonTy, CountExpr, CountInBytes,
3681 OrNull, DependentDecls);
3682 Types.push_back(Elt: CATy);
3683 CountAttributedTypes.InsertNode(N: CATy, InsertPos);
3684
3685 return QualType(CATy, 0);
3686}
3687
3688QualType
3689ASTContext::adjustType(QualType Orig,
3690 llvm::function_ref<QualType(QualType)> Adjust) const {
3691 switch (Orig->getTypeClass()) {
3692 case Type::Attributed: {
3693 const auto *AT = cast<AttributedType>(Val&: Orig);
3694 return getAttributedType(attrKind: AT->getAttrKind(),
3695 modifiedType: adjustType(Orig: AT->getModifiedType(), Adjust),
3696 equivalentType: adjustType(Orig: AT->getEquivalentType(), Adjust),
3697 attr: AT->getAttr());
3698 }
3699
3700 case Type::BTFTagAttributed: {
3701 const auto *BTFT = dyn_cast<BTFTagAttributedType>(Val&: Orig);
3702 return getBTFTagAttributedType(BTFAttr: BTFT->getAttr(),
3703 Wrapped: adjustType(Orig: BTFT->getWrappedType(), Adjust));
3704 }
3705
3706 case Type::OverflowBehavior: {
3707 const auto *OB = dyn_cast<OverflowBehaviorType>(Val&: Orig);
3708 return getOverflowBehaviorType(Kind: OB->getBehaviorKind(),
3709 Wrapped: adjustType(Orig: OB->getUnderlyingType(), Adjust));
3710 }
3711
3712 case Type::Paren:
3713 return getParenType(
3714 NamedType: adjustType(Orig: cast<ParenType>(Val&: Orig)->getInnerType(), Adjust));
3715
3716 case Type::Adjusted: {
3717 const auto *AT = cast<AdjustedType>(Val&: Orig);
3718 return getAdjustedType(Orig: AT->getOriginalType(),
3719 New: adjustType(Orig: AT->getAdjustedType(), Adjust));
3720 }
3721
3722 case Type::MacroQualified: {
3723 const auto *MQT = cast<MacroQualifiedType>(Val&: Orig);
3724 return getMacroQualifiedType(UnderlyingTy: adjustType(Orig: MQT->getUnderlyingType(), Adjust),
3725 MacroII: MQT->getMacroIdentifier());
3726 }
3727
3728 default:
3729 return Adjust(Orig);
3730 }
3731}
3732
3733const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T,
3734 FunctionType::ExtInfo Info) {
3735 if (T->getExtInfo() == Info)
3736 return T;
3737
3738 QualType Result;
3739 if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(Val: T)) {
3740 Result = getFunctionNoProtoType(ResultTy: FNPT->getReturnType(), Info);
3741 } else {
3742 const auto *FPT = cast<FunctionProtoType>(Val: T);
3743 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
3744 EPI.ExtInfo = Info;
3745 Result = getFunctionType(ResultTy: FPT->getReturnType(), Args: FPT->getParamTypes(), EPI);
3746 }
3747
3748 return cast<FunctionType>(Val: Result.getTypePtr());
3749}
3750
3751QualType ASTContext::adjustFunctionResultType(QualType FunctionType,
3752 QualType ResultType) {
3753 return adjustType(Orig: FunctionType, Adjust: [&](QualType Orig) {
3754 if (const auto *FNPT = Orig->getAs<FunctionNoProtoType>())
3755 return getFunctionNoProtoType(ResultTy: ResultType, Info: FNPT->getExtInfo());
3756
3757 const auto *FPT = Orig->castAs<FunctionProtoType>();
3758 return getFunctionType(ResultTy: ResultType, Args: FPT->getParamTypes(),
3759 EPI: FPT->getExtProtoInfo());
3760 });
3761}
3762
3763void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD,
3764 QualType ResultType) {
3765 FD = FD->getMostRecentDecl();
3766 while (true) {
3767 FD->setType(adjustFunctionResultType(FunctionType: FD->getType(), ResultType));
3768 if (FunctionDecl *Next = FD->getPreviousDecl())
3769 FD = Next;
3770 else
3771 break;
3772 }
3773 if (ASTMutationListener *L = getASTMutationListener())
3774 L->DeducedReturnType(FD, ReturnType: ResultType);
3775}
3776
3777/// Get a function type and produce the equivalent function type with the
3778/// specified exception specification. Type sugar that can be present on a
3779/// declaration of a function with an exception specification is permitted
3780/// and preserved. Other type sugar (for instance, typedefs) is not.
3781QualType ASTContext::getFunctionTypeWithExceptionSpec(
3782 QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const {
3783 return adjustType(Orig, Adjust: [&](QualType Ty) {
3784 const auto *Proto = Ty->castAs<FunctionProtoType>();
3785 return getFunctionType(ResultTy: Proto->getReturnType(), Args: Proto->getParamTypes(),
3786 EPI: Proto->getExtProtoInfo().withExceptionSpec(ESI));
3787 });
3788}
3789
3790bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T,
3791 QualType U) const {
3792 return hasSameType(T1: T, T2: U) ||
3793 (getLangOpts().CPlusPlus17 &&
3794 hasSameType(T1: getFunctionTypeWithExceptionSpec(Orig: T, ESI: EST_None),
3795 T2: getFunctionTypeWithExceptionSpec(Orig: U, ESI: EST_None)));
3796}
3797
3798QualType ASTContext::getFunctionTypeWithoutPtrSizes(QualType T) {
3799 if (const auto *Proto = T->getAs<FunctionProtoType>()) {
3800 QualType RetTy = removePtrSizeAddrSpace(T: Proto->getReturnType());
3801 SmallVector<QualType, 16> Args(Proto->param_types().size());
3802 for (unsigned i = 0, n = Args.size(); i != n; ++i)
3803 Args[i] = removePtrSizeAddrSpace(T: Proto->param_types()[i]);
3804 return getFunctionType(ResultTy: RetTy, Args, EPI: Proto->getExtProtoInfo());
3805 }
3806
3807 if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) {
3808 QualType RetTy = removePtrSizeAddrSpace(T: Proto->getReturnType());
3809 return getFunctionNoProtoType(ResultTy: RetTy, Info: Proto->getExtInfo());
3810 }
3811
3812 return T;
3813}
3814
3815bool ASTContext::hasSameFunctionTypeIgnoringPtrSizes(QualType T, QualType U) {
3816 return hasSameType(T1: T, T2: U) ||
3817 hasSameType(T1: getFunctionTypeWithoutPtrSizes(T),
3818 T2: getFunctionTypeWithoutPtrSizes(T: U));
3819}
3820
3821QualType ASTContext::getFunctionTypeWithoutParamABIs(QualType T) const {
3822 if (const auto *Proto = T->getAs<FunctionProtoType>()) {
3823 FunctionProtoType::ExtProtoInfo EPI = Proto->getExtProtoInfo();
3824 EPI.ExtParameterInfos = nullptr;
3825 return getFunctionType(ResultTy: Proto->getReturnType(), Args: Proto->param_types(), EPI);
3826 }
3827 return T;
3828}
3829
3830bool ASTContext::hasSameFunctionTypeIgnoringParamABI(QualType T,
3831 QualType U) const {
3832 return hasSameType(T1: T, T2: U) || hasSameType(T1: getFunctionTypeWithoutParamABIs(T),
3833 T2: getFunctionTypeWithoutParamABIs(T: U));
3834}
3835
3836void ASTContext::adjustExceptionSpec(
3837 FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI,
3838 bool AsWritten) {
3839 // Update the type.
3840 QualType Updated =
3841 getFunctionTypeWithExceptionSpec(Orig: FD->getType(), ESI);
3842 FD->setType(Updated);
3843
3844 if (!AsWritten)
3845 return;
3846
3847 // Update the type in the type source information too.
3848 if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) {
3849 // If the type and the type-as-written differ, we may need to update
3850 // the type-as-written too.
3851 if (TSInfo->getType() != FD->getType())
3852 Updated = getFunctionTypeWithExceptionSpec(Orig: TSInfo->getType(), ESI);
3853
3854 // FIXME: When we get proper type location information for exceptions,
3855 // we'll also have to rebuild the TypeSourceInfo. For now, we just patch
3856 // up the TypeSourceInfo;
3857 assert(TypeLoc::getFullDataSizeForType(Updated) ==
3858 TypeLoc::getFullDataSizeForType(TSInfo->getType()) &&
3859 "TypeLoc size mismatch from updating exception specification");
3860 TSInfo->overrideType(T: Updated);
3861 }
3862}
3863
3864/// getComplexType - Return the uniqued reference to the type for a complex
3865/// number with the specified element type.
3866QualType ASTContext::getComplexType(QualType T) const {
3867 // Unique pointers, to guarantee there is only one pointer of a particular
3868 // structure.
3869 llvm::FoldingSetNodeID ID;
3870 ComplexType::Profile(ID, Element: T);
3871
3872 void *InsertPos = nullptr;
3873 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos))
3874 return QualType(CT, 0);
3875
3876 // If the pointee type isn't canonical, this won't be a canonical type either,
3877 // so fill in the canonical type field.
3878 QualType Canonical;
3879 if (!T.isCanonical()) {
3880 Canonical = getComplexType(T: getCanonicalType(T));
3881
3882 // Get the new insert position for the node we care about.
3883 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos);
3884 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3885 }
3886 auto *New = new (*this, alignof(ComplexType)) ComplexType(T, Canonical);
3887 Types.push_back(Elt: New);
3888 ComplexTypes.InsertNode(N: New, InsertPos);
3889 return QualType(New, 0);
3890}
3891
3892/// getPointerType - Return the uniqued reference to the type for a pointer to
3893/// the specified type.
3894QualType ASTContext::getPointerType(QualType T) const {
3895 // Unique pointers, to guarantee there is only one pointer of a particular
3896 // structure.
3897 llvm::FoldingSetNodeID ID;
3898 PointerType::Profile(ID, Pointee: T);
3899
3900 void *InsertPos = nullptr;
3901 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3902 return QualType(PT, 0);
3903
3904 // If the pointee type isn't canonical, this won't be a canonical type either,
3905 // so fill in the canonical type field.
3906 QualType Canonical;
3907 if (!T.isCanonical()) {
3908 Canonical = getPointerType(T: getCanonicalType(T));
3909
3910 // Get the new insert position for the node we care about.
3911 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3912 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3913 }
3914 auto *New = new (*this, alignof(PointerType)) PointerType(T, Canonical);
3915 Types.push_back(Elt: New);
3916 PointerTypes.InsertNode(N: New, InsertPos);
3917 return QualType(New, 0);
3918}
3919
3920QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const {
3921 llvm::FoldingSetNodeID ID;
3922 AdjustedType::Profile(ID, Orig, New);
3923 void *InsertPos = nullptr;
3924 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3925 if (AT)
3926 return QualType(AT, 0);
3927
3928 QualType Canonical = getCanonicalType(T: New);
3929
3930 // Get the new insert position for the node we care about.
3931 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3932 assert(!AT && "Shouldn't be in the map!");
3933
3934 AT = new (*this, alignof(AdjustedType))
3935 AdjustedType(Type::Adjusted, Orig, New, Canonical);
3936 Types.push_back(Elt: AT);
3937 AdjustedTypes.InsertNode(N: AT, InsertPos);
3938 return QualType(AT, 0);
3939}
3940
3941QualType ASTContext::getDecayedType(QualType Orig, QualType Decayed) const {
3942 llvm::FoldingSetNodeID ID;
3943 AdjustedType::Profile(ID, Orig, New: Decayed);
3944 void *InsertPos = nullptr;
3945 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3946 if (AT)
3947 return QualType(AT, 0);
3948
3949 QualType Canonical = getCanonicalType(T: Decayed);
3950
3951 // Get the new insert position for the node we care about.
3952 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3953 assert(!AT && "Shouldn't be in the map!");
3954
3955 AT = new (*this, alignof(DecayedType)) DecayedType(Orig, Decayed, Canonical);
3956 Types.push_back(Elt: AT);
3957 AdjustedTypes.InsertNode(N: AT, InsertPos);
3958 return QualType(AT, 0);
3959}
3960
3961QualType ASTContext::getDecayedType(QualType T) const {
3962 assert((T->isArrayType() || T->isFunctionType()) && "T does not decay");
3963
3964 QualType Decayed;
3965
3966 // C99 6.7.5.3p7:
3967 // A declaration of a parameter as "array of type" shall be
3968 // adjusted to "qualified pointer to type", where the type
3969 // qualifiers (if any) are those specified within the [ and ] of
3970 // the array type derivation.
3971 if (T->isArrayType())
3972 Decayed = getArrayDecayedType(T);
3973
3974 // C99 6.7.5.3p8:
3975 // A declaration of a parameter as "function returning type"
3976 // shall be adjusted to "pointer to function returning type", as
3977 // in 6.3.2.1.
3978 if (T->isFunctionType())
3979 Decayed = getPointerType(T);
3980
3981 return getDecayedType(Orig: T, Decayed);
3982}
3983
3984QualType ASTContext::getArrayParameterType(QualType Ty) const {
3985 if (Ty->isArrayParameterType())
3986 return Ty;
3987 assert(Ty->isConstantArrayType() && "Ty must be an array type.");
3988 QualType DTy = Ty.getDesugaredType(Context: *this);
3989 const auto *ATy = cast<ConstantArrayType>(Val&: DTy);
3990 llvm::FoldingSetNodeID ID;
3991 ATy->Profile(ID, Ctx: *this, ET: ATy->getElementType(), ArraySize: ATy->getZExtSize(),
3992 SizeExpr: ATy->getSizeExpr(), SizeMod: ATy->getSizeModifier(),
3993 TypeQuals: ATy->getIndexTypeQualifiers().getAsOpaqueValue());
3994 void *InsertPos = nullptr;
3995 ArrayParameterType *AT =
3996 ArrayParameterTypes.FindNodeOrInsertPos(ID, InsertPos);
3997 if (AT)
3998 return QualType(AT, 0);
3999
4000 QualType Canonical;
4001 if (!DTy.isCanonical()) {
4002 Canonical = getArrayParameterType(Ty: getCanonicalType(T: Ty));
4003
4004 // Get the new insert position for the node we care about.
4005 AT = ArrayParameterTypes.FindNodeOrInsertPos(ID, InsertPos);
4006 assert(!AT && "Shouldn't be in the map!");
4007 }
4008
4009 AT = new (*this, alignof(ArrayParameterType))
4010 ArrayParameterType(ATy, Canonical);
4011 Types.push_back(Elt: AT);
4012 ArrayParameterTypes.InsertNode(N: AT, InsertPos);
4013 return QualType(AT, 0);
4014}
4015
4016/// getBlockPointerType - Return the uniqued reference to the type for
4017/// a pointer to the specified block.
4018QualType ASTContext::getBlockPointerType(QualType T) const {
4019 assert(T->isFunctionType() && "block of function types only");
4020 // Unique pointers, to guarantee there is only one block of a particular
4021 // structure.
4022 llvm::FoldingSetNodeID ID;
4023 BlockPointerType::Profile(ID, Pointee: T);
4024
4025 void *InsertPos = nullptr;
4026 if (BlockPointerType *PT =
4027 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
4028 return QualType(PT, 0);
4029
4030 // If the block pointee type isn't canonical, this won't be a canonical
4031 // type either so fill in the canonical type field.
4032 QualType Canonical;
4033 if (!T.isCanonical()) {
4034 Canonical = getBlockPointerType(T: getCanonicalType(T));
4035
4036 // Get the new insert position for the node we care about.
4037 BlockPointerType *NewIP =
4038 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
4039 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4040 }
4041 auto *New =
4042 new (*this, alignof(BlockPointerType)) BlockPointerType(T, Canonical);
4043 Types.push_back(Elt: New);
4044 BlockPointerTypes.InsertNode(N: New, InsertPos);
4045 return QualType(New, 0);
4046}
4047
4048/// getLValueReferenceType - Return the uniqued reference to the type for an
4049/// lvalue reference to the specified type.
4050QualType
4051ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
4052 assert((!T->isPlaceholderType() ||
4053 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) &&
4054 "Unresolved placeholder type");
4055
4056 // Unique pointers, to guarantee there is only one pointer of a particular
4057 // structure.
4058 llvm::FoldingSetNodeID ID;
4059 ReferenceType::Profile(ID, Referencee: T, SpelledAsLValue);
4060
4061 void *InsertPos = nullptr;
4062 if (LValueReferenceType *RT =
4063 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
4064 return QualType(RT, 0);
4065
4066 const auto *InnerRef = T->getAs<ReferenceType>();
4067
4068 // If the referencee type isn't canonical, this won't be a canonical type
4069 // either, so fill in the canonical type field.
4070 QualType Canonical;
4071 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) {
4072 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
4073 Canonical = getLValueReferenceType(T: getCanonicalType(T: PointeeType));
4074
4075 // Get the new insert position for the node we care about.
4076 LValueReferenceType *NewIP =
4077 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
4078 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4079 }
4080
4081 auto *New = new (*this, alignof(LValueReferenceType))
4082 LValueReferenceType(T, Canonical, SpelledAsLValue);
4083 Types.push_back(Elt: New);
4084 LValueReferenceTypes.InsertNode(N: New, InsertPos);
4085
4086 return QualType(New, 0);
4087}
4088
4089/// getRValueReferenceType - Return the uniqued reference to the type for an
4090/// rvalue reference to the specified type.
4091QualType ASTContext::getRValueReferenceType(QualType T) const {
4092 assert((!T->isPlaceholderType() ||
4093 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) &&
4094 "Unresolved placeholder type");
4095
4096 // Unique pointers, to guarantee there is only one pointer of a particular
4097 // structure.
4098 llvm::FoldingSetNodeID ID;
4099 ReferenceType::Profile(ID, Referencee: T, SpelledAsLValue: false);
4100
4101 void *InsertPos = nullptr;
4102 if (RValueReferenceType *RT =
4103 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
4104 return QualType(RT, 0);
4105
4106 const auto *InnerRef = T->getAs<ReferenceType>();
4107
4108 // If the referencee type isn't canonical, this won't be a canonical type
4109 // either, so fill in the canonical type field.
4110 QualType Canonical;
4111 if (InnerRef || !T.isCanonical()) {
4112 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
4113 Canonical = getRValueReferenceType(T: getCanonicalType(T: PointeeType));
4114
4115 // Get the new insert position for the node we care about.
4116 RValueReferenceType *NewIP =
4117 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
4118 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4119 }
4120
4121 auto *New = new (*this, alignof(RValueReferenceType))
4122 RValueReferenceType(T, Canonical);
4123 Types.push_back(Elt: New);
4124 RValueReferenceTypes.InsertNode(N: New, InsertPos);
4125 return QualType(New, 0);
4126}
4127
4128QualType ASTContext::getMemberPointerType(QualType T,
4129 NestedNameSpecifier Qualifier,
4130 const CXXRecordDecl *Cls) const {
4131 if (!Qualifier) {
4132 assert(Cls && "At least one of Qualifier or Cls must be provided");
4133 Qualifier = NestedNameSpecifier(getCanonicalTagType(TD: Cls).getTypePtr());
4134 } else if (!Cls) {
4135 Cls = Qualifier.getAsRecordDecl();
4136 }
4137 // Unique pointers, to guarantee there is only one pointer of a particular
4138 // structure.
4139 llvm::FoldingSetNodeID ID;
4140 MemberPointerType::Profile(ID, Pointee: T, Qualifier, Cls);
4141
4142 void *InsertPos = nullptr;
4143 if (MemberPointerType *PT =
4144 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
4145 return QualType(PT, 0);
4146
4147 NestedNameSpecifier CanonicalQualifier = [&] {
4148 if (!Cls)
4149 return Qualifier.getCanonical();
4150 NestedNameSpecifier R(getCanonicalTagType(TD: Cls).getTypePtr());
4151 assert(R.isCanonical());
4152 return R;
4153 }();
4154 // If the pointee or class type isn't canonical, this won't be a canonical
4155 // type either, so fill in the canonical type field.
4156 QualType Canonical;
4157 if (!T.isCanonical() || Qualifier != CanonicalQualifier) {
4158 Canonical =
4159 getMemberPointerType(T: getCanonicalType(T), Qualifier: CanonicalQualifier, Cls);
4160 assert(!cast<MemberPointerType>(Canonical)->isSugared());
4161 // Get the new insert position for the node we care about.
4162 [[maybe_unused]] MemberPointerType *NewIP =
4163 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
4164 assert(!NewIP && "Shouldn't be in the map!");
4165 }
4166 auto *New = new (*this, alignof(MemberPointerType))
4167 MemberPointerType(T, Qualifier, Canonical);
4168 Types.push_back(Elt: New);
4169 MemberPointerTypes.InsertNode(N: New, InsertPos);
4170 return QualType(New, 0);
4171}
4172
4173/// getConstantArrayType - Return the unique reference to the type for an
4174/// array of the specified element type.
4175QualType ASTContext::getConstantArrayType(QualType EltTy,
4176 const llvm::APInt &ArySizeIn,
4177 const Expr *SizeExpr,
4178 ArraySizeModifier ASM,
4179 unsigned IndexTypeQuals) const {
4180 assert((EltTy->isDependentType() ||
4181 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) &&
4182 "Constant array of VLAs is illegal!");
4183
4184 // We only need the size as part of the type if it's instantiation-dependent.
4185 if (SizeExpr && !SizeExpr->isInstantiationDependent())
4186 SizeExpr = nullptr;
4187
4188 // Convert the array size into a canonical width matching the pointer size for
4189 // the target.
4190 llvm::APInt ArySize(ArySizeIn);
4191 ArySize = ArySize.zextOrTrunc(width: Target->getMaxPointerWidth());
4192
4193 llvm::FoldingSetNodeID ID;
4194 ConstantArrayType::Profile(ID, Ctx: *this, ET: EltTy, ArraySize: ArySize.getZExtValue(), SizeExpr,
4195 SizeMod: ASM, TypeQuals: IndexTypeQuals);
4196
4197 void *InsertPos = nullptr;
4198 if (ConstantArrayType *ATP =
4199 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos))
4200 return QualType(ATP, 0);
4201
4202 // If the element type isn't canonical or has qualifiers, or the array bound
4203 // is instantiation-dependent, this won't be a canonical type either, so fill
4204 // in the canonical type field.
4205 QualType Canon;
4206 // FIXME: Check below should look for qualifiers behind sugar.
4207 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) {
4208 SplitQualType canonSplit = getCanonicalType(T: EltTy).split();
4209 Canon = getConstantArrayType(EltTy: QualType(canonSplit.Ty, 0), ArySizeIn: ArySize, SizeExpr: nullptr,
4210 ASM, IndexTypeQuals);
4211 Canon = getQualifiedType(T: Canon, Qs: canonSplit.Quals);
4212
4213 // Get the new insert position for the node we care about.
4214 ConstantArrayType *NewIP =
4215 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
4216 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4217 }
4218
4219 auto *New = ConstantArrayType::Create(Ctx: *this, ET: EltTy, Can: Canon, Sz: ArySize, SzExpr: SizeExpr,
4220 SzMod: ASM, Qual: IndexTypeQuals);
4221 ConstantArrayTypes.InsertNode(N: New, InsertPos);
4222 Types.push_back(Elt: New);
4223 return QualType(New, 0);
4224}
4225
4226/// getVariableArrayDecayedType - Turns the given type, which may be
4227/// variably-modified, into the corresponding type with all the known
4228/// sizes replaced with [*].
4229QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
4230 // Vastly most common case.
4231 if (!type->isVariablyModifiedType()) return type;
4232
4233 QualType result;
4234
4235 SplitQualType split = type.getSplitDesugaredType();
4236 const Type *ty = split.Ty;
4237 switch (ty->getTypeClass()) {
4238#define TYPE(Class, Base)
4239#define ABSTRACT_TYPE(Class, Base)
4240#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
4241#include "clang/AST/TypeNodes.inc"
4242 llvm_unreachable("didn't desugar past all non-canonical types?");
4243
4244 // These types should never be variably-modified.
4245 case Type::Builtin:
4246 case Type::Complex:
4247 case Type::Vector:
4248 case Type::DependentVector:
4249 case Type::ExtVector:
4250 case Type::DependentSizedExtVector:
4251 case Type::ConstantMatrix:
4252 case Type::DependentSizedMatrix:
4253 case Type::DependentAddressSpace:
4254 case Type::ObjCObject:
4255 case Type::ObjCInterface:
4256 case Type::ObjCObjectPointer:
4257 case Type::Record:
4258 case Type::Enum:
4259 case Type::UnresolvedUsing:
4260 case Type::TypeOfExpr:
4261 case Type::TypeOf:
4262 case Type::Decltype:
4263 case Type::UnaryTransform:
4264 case Type::DependentName:
4265 case Type::InjectedClassName:
4266 case Type::TemplateSpecialization:
4267 case Type::TemplateTypeParm:
4268 case Type::SubstTemplateTypeParmPack:
4269 case Type::SubstBuiltinTemplatePack:
4270 case Type::Auto:
4271 case Type::DeducedTemplateSpecialization:
4272 case Type::PackExpansion:
4273 case Type::PackIndexing:
4274 case Type::BitInt:
4275 case Type::DependentBitInt:
4276 case Type::ArrayParameter:
4277 case Type::HLSLAttributedResource:
4278 case Type::HLSLInlineSpirv:
4279 case Type::OverflowBehavior:
4280 llvm_unreachable("type should never be variably-modified");
4281
4282 // These types can be variably-modified but should never need to
4283 // further decay.
4284 case Type::FunctionNoProto:
4285 case Type::FunctionProto:
4286 case Type::BlockPointer:
4287 case Type::MemberPointer:
4288 case Type::Pipe:
4289 return type;
4290
4291 // These types can be variably-modified. All these modifications
4292 // preserve structure except as noted by comments.
4293 // TODO: if we ever care about optimizing VLAs, there are no-op
4294 // optimizations available here.
4295 case Type::Pointer:
4296 result = getPointerType(T: getVariableArrayDecayedType(
4297 type: cast<PointerType>(Val: ty)->getPointeeType()));
4298 break;
4299
4300 case Type::LValueReference: {
4301 const auto *lv = cast<LValueReferenceType>(Val: ty);
4302 result = getLValueReferenceType(
4303 T: getVariableArrayDecayedType(type: lv->getPointeeType()),
4304 SpelledAsLValue: lv->isSpelledAsLValue());
4305 break;
4306 }
4307
4308 case Type::RValueReference: {
4309 const auto *lv = cast<RValueReferenceType>(Val: ty);
4310 result = getRValueReferenceType(
4311 T: getVariableArrayDecayedType(type: lv->getPointeeType()));
4312 break;
4313 }
4314
4315 case Type::Atomic: {
4316 const auto *at = cast<AtomicType>(Val: ty);
4317 result = getAtomicType(T: getVariableArrayDecayedType(type: at->getValueType()));
4318 break;
4319 }
4320
4321 case Type::ConstantArray: {
4322 const auto *cat = cast<ConstantArrayType>(Val: ty);
4323 result = getConstantArrayType(
4324 EltTy: getVariableArrayDecayedType(type: cat->getElementType()),
4325 ArySizeIn: cat->getSize(),
4326 SizeExpr: cat->getSizeExpr(),
4327 ASM: cat->getSizeModifier(),
4328 IndexTypeQuals: cat->getIndexTypeCVRQualifiers());
4329 break;
4330 }
4331
4332 case Type::DependentSizedArray: {
4333 const auto *dat = cast<DependentSizedArrayType>(Val: ty);
4334 result = getDependentSizedArrayType(
4335 EltTy: getVariableArrayDecayedType(type: dat->getElementType()), NumElts: dat->getSizeExpr(),
4336 ASM: dat->getSizeModifier(), IndexTypeQuals: dat->getIndexTypeCVRQualifiers());
4337 break;
4338 }
4339
4340 // Turn incomplete types into [*] types.
4341 case Type::IncompleteArray: {
4342 const auto *iat = cast<IncompleteArrayType>(Val: ty);
4343 result =
4344 getVariableArrayType(EltTy: getVariableArrayDecayedType(type: iat->getElementType()),
4345 /*size*/ NumElts: nullptr, ASM: ArraySizeModifier::Normal,
4346 IndexTypeQuals: iat->getIndexTypeCVRQualifiers());
4347 break;
4348 }
4349
4350 // Turn VLA types into [*] types.
4351 case Type::VariableArray: {
4352 const auto *vat = cast<VariableArrayType>(Val: ty);
4353 result =
4354 getVariableArrayType(EltTy: getVariableArrayDecayedType(type: vat->getElementType()),
4355 /*size*/ NumElts: nullptr, ASM: ArraySizeModifier::Star,
4356 IndexTypeQuals: vat->getIndexTypeCVRQualifiers());
4357 break;
4358 }
4359 }
4360
4361 // Apply the top-level qualifiers from the original.
4362 return getQualifiedType(T: result, Qs: split.Quals);
4363}
4364
4365/// getVariableArrayType - Returns a non-unique reference to the type for a
4366/// variable array of the specified element type.
4367QualType ASTContext::getVariableArrayType(QualType EltTy, Expr *NumElts,
4368 ArraySizeModifier ASM,
4369 unsigned IndexTypeQuals) const {
4370 // Since we don't unique expressions, it isn't possible to unique VLA's
4371 // that have an expression provided for their size.
4372 QualType Canon;
4373
4374 // Be sure to pull qualifiers off the element type.
4375 // FIXME: Check below should look for qualifiers behind sugar.
4376 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
4377 SplitQualType canonSplit = getCanonicalType(T: EltTy).split();
4378 Canon = getVariableArrayType(EltTy: QualType(canonSplit.Ty, 0), NumElts, ASM,
4379 IndexTypeQuals);
4380 Canon = getQualifiedType(T: Canon, Qs: canonSplit.Quals);
4381 }
4382
4383 auto *New = new (*this, alignof(VariableArrayType))
4384 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals);
4385
4386 VariableArrayTypes.push_back(x: New);
4387 Types.push_back(Elt: New);
4388 return QualType(New, 0);
4389}
4390
4391/// getDependentSizedArrayType - Returns a non-unique reference to
4392/// the type for a dependently-sized array of the specified element
4393/// type.
4394QualType
4395ASTContext::getDependentSizedArrayType(QualType elementType, Expr *numElements,
4396 ArraySizeModifier ASM,
4397 unsigned elementTypeQuals) const {
4398 assert((!numElements || numElements->isTypeDependent() ||
4399 numElements->isValueDependent()) &&
4400 "Size must be type- or value-dependent!");
4401
4402 SplitQualType canonElementType = getCanonicalType(T: elementType).split();
4403
4404 void *insertPos = nullptr;
4405 llvm::FoldingSetNodeID ID;
4406 DependentSizedArrayType::Profile(
4407 ID, Context: *this, ET: numElements ? QualType(canonElementType.Ty, 0) : elementType,
4408 SizeMod: ASM, TypeQuals: elementTypeQuals, E: numElements);
4409
4410 // Look for an existing type with these properties.
4411 DependentSizedArrayType *canonTy =
4412 DependentSizedArrayTypes.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
4413
4414 // Dependently-sized array types that do not have a specified number
4415 // of elements will have their sizes deduced from a dependent
4416 // initializer.
4417 if (!numElements) {
4418 if (canonTy)
4419 return QualType(canonTy, 0);
4420
4421 auto *newType = new (*this, alignof(DependentSizedArrayType))
4422 DependentSizedArrayType(elementType, QualType(), numElements, ASM,
4423 elementTypeQuals);
4424 DependentSizedArrayTypes.InsertNode(N: newType, InsertPos: insertPos);
4425 Types.push_back(Elt: newType);
4426 return QualType(newType, 0);
4427 }
4428
4429 // If we don't have one, build one.
4430 if (!canonTy) {
4431 canonTy = new (*this, alignof(DependentSizedArrayType))
4432 DependentSizedArrayType(QualType(canonElementType.Ty, 0), QualType(),
4433 numElements, ASM, elementTypeQuals);
4434 DependentSizedArrayTypes.InsertNode(N: canonTy, InsertPos: insertPos);
4435 Types.push_back(Elt: canonTy);
4436 }
4437
4438 // Apply qualifiers from the element type to the array.
4439 QualType canon = getQualifiedType(T: QualType(canonTy,0),
4440 Qs: canonElementType.Quals);
4441
4442 // If we didn't need extra canonicalization for the element type or the size
4443 // expression, then just use that as our result.
4444 if (QualType(canonElementType.Ty, 0) == elementType &&
4445 canonTy->getSizeExpr() == numElements)
4446 return canon;
4447
4448 // Otherwise, we need to build a type which follows the spelling
4449 // of the element type.
4450 auto *sugaredType = new (*this, alignof(DependentSizedArrayType))
4451 DependentSizedArrayType(elementType, canon, numElements, ASM,
4452 elementTypeQuals);
4453 Types.push_back(Elt: sugaredType);
4454 return QualType(sugaredType, 0);
4455}
4456
4457QualType ASTContext::getIncompleteArrayType(QualType elementType,
4458 ArraySizeModifier ASM,
4459 unsigned elementTypeQuals) const {
4460 llvm::FoldingSetNodeID ID;
4461 IncompleteArrayType::Profile(ID, ET: elementType, SizeMod: ASM, TypeQuals: elementTypeQuals);
4462
4463 void *insertPos = nullptr;
4464 if (IncompleteArrayType *iat =
4465 IncompleteArrayTypes.FindNodeOrInsertPos(ID, InsertPos&: insertPos))
4466 return QualType(iat, 0);
4467
4468 // If the element type isn't canonical, this won't be a canonical type
4469 // either, so fill in the canonical type field. We also have to pull
4470 // qualifiers off the element type.
4471 QualType canon;
4472
4473 // FIXME: Check below should look for qualifiers behind sugar.
4474 if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) {
4475 SplitQualType canonSplit = getCanonicalType(T: elementType).split();
4476 canon = getIncompleteArrayType(elementType: QualType(canonSplit.Ty, 0),
4477 ASM, elementTypeQuals);
4478 canon = getQualifiedType(T: canon, Qs: canonSplit.Quals);
4479
4480 // Get the new insert position for the node we care about.
4481 IncompleteArrayType *existing =
4482 IncompleteArrayTypes.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
4483 assert(!existing && "Shouldn't be in the map!"); (void) existing;
4484 }
4485
4486 auto *newType = new (*this, alignof(IncompleteArrayType))
4487 IncompleteArrayType(elementType, canon, ASM, elementTypeQuals);
4488
4489 IncompleteArrayTypes.InsertNode(N: newType, InsertPos: insertPos);
4490 Types.push_back(Elt: newType);
4491 return QualType(newType, 0);
4492}
4493
4494ASTContext::BuiltinVectorTypeInfo
4495ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const {
4496#define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \
4497 {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \
4498 NUMVECTORS};
4499
4500#define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \
4501 {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS};
4502
4503 switch (Ty->getKind()) {
4504 default:
4505 llvm_unreachable("Unsupported builtin vector type");
4506
4507#define SVE_VECTOR_TYPE_INT(Name, MangledName, Id, SingletonId, NumEls, \
4508 ElBits, NF, IsSigned) \
4509 case BuiltinType::Id: \
4510 return {getIntTypeForBitwidth(ElBits, IsSigned), \
4511 llvm::ElementCount::getScalable(NumEls), NF};
4512#define SVE_VECTOR_TYPE_FLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4513 ElBits, NF) \
4514 case BuiltinType::Id: \
4515 return {ElBits == 16 ? HalfTy : (ElBits == 32 ? FloatTy : DoubleTy), \
4516 llvm::ElementCount::getScalable(NumEls), NF};
4517#define SVE_VECTOR_TYPE_BFLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4518 ElBits, NF) \
4519 case BuiltinType::Id: \
4520 return {BFloat16Ty, llvm::ElementCount::getScalable(NumEls), NF};
4521#define SVE_VECTOR_TYPE_MFLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4522 ElBits, NF) \
4523 case BuiltinType::Id: \
4524 return {MFloat8Ty, llvm::ElementCount::getScalable(NumEls), NF};
4525#define SVE_PREDICATE_TYPE_ALL(Name, MangledName, Id, SingletonId, NumEls, NF) \
4526 case BuiltinType::Id: \
4527 return {BoolTy, llvm::ElementCount::getScalable(NumEls), NF};
4528#include "clang/Basic/AArch64ACLETypes.def"
4529
4530#define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \
4531 IsSigned) \
4532 case BuiltinType::Id: \
4533 return {getIntTypeForBitwidth(ElBits, IsSigned), \
4534 llvm::ElementCount::getScalable(NumEls), NF};
4535#define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
4536 case BuiltinType::Id: \
4537 return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \
4538 llvm::ElementCount::getScalable(NumEls), NF};
4539#define RVV_VECTOR_TYPE_BFLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
4540 case BuiltinType::Id: \
4541 return {BFloat16Ty, llvm::ElementCount::getScalable(NumEls), NF};
4542#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
4543 case BuiltinType::Id: \
4544 return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1};
4545#include "clang/Basic/RISCVVTypes.def"
4546 }
4547}
4548
4549/// getExternrefType - Return a WebAssembly externref type, which represents an
4550/// opaque reference to a host value.
4551QualType ASTContext::getWebAssemblyExternrefType() const {
4552 if (Target->getTriple().isWasm() && Target->hasFeature(Feature: "reference-types")) {
4553#define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \
4554 if (BuiltinType::Id == BuiltinType::WasmExternRef) \
4555 return SingletonId;
4556#include "clang/Basic/WebAssemblyReferenceTypes.def"
4557 }
4558 llvm_unreachable(
4559 "shouldn't try to generate type externref outside WebAssembly target");
4560}
4561
4562/// getScalableVectorType - Return the unique reference to a scalable vector
4563/// type of the specified element type and size. VectorType must be a built-in
4564/// type.
4565QualType ASTContext::getScalableVectorType(QualType EltTy, unsigned NumElts,
4566 unsigned NumFields) const {
4567 auto K = llvm::ScalableVecTyKey{.EltTy: EltTy, .NumElts: NumElts, .NumFields: NumFields};
4568 if (auto It = ScalableVecTyMap.find(Val: K); It != ScalableVecTyMap.end())
4569 return It->second;
4570
4571 if (Target->hasAArch64ACLETypes()) {
4572 uint64_t EltTySize = getTypeSize(T: EltTy);
4573
4574#define SVE_VECTOR_TYPE_INT(Name, MangledName, Id, SingletonId, NumEls, \
4575 ElBits, NF, IsSigned) \
4576 if (EltTy->hasIntegerRepresentation() && !EltTy->isBooleanType() && \
4577 EltTy->hasSignedIntegerRepresentation() == IsSigned && \
4578 EltTySize == ElBits && NumElts == (NumEls * NF) && NumFields == 1) { \
4579 return ScalableVecTyMap[K] = SingletonId; \
4580 }
4581#define SVE_VECTOR_TYPE_FLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4582 ElBits, NF) \
4583 if (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
4584 EltTySize == ElBits && NumElts == (NumEls * NF) && NumFields == 1) { \
4585 return ScalableVecTyMap[K] = SingletonId; \
4586 }
4587#define SVE_VECTOR_TYPE_BFLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4588 ElBits, NF) \
4589 if (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
4590 EltTySize == ElBits && NumElts == (NumEls * NF) && NumFields == 1) { \
4591 return ScalableVecTyMap[K] = SingletonId; \
4592 }
4593#define SVE_VECTOR_TYPE_MFLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4594 ElBits, NF) \
4595 if (EltTy->isMFloat8Type() && EltTySize == ElBits && \
4596 NumElts == (NumEls * NF) && NumFields == 1) { \
4597 return ScalableVecTyMap[K] = SingletonId; \
4598 }
4599#define SVE_PREDICATE_TYPE_ALL(Name, MangledName, Id, SingletonId, NumEls, NF) \
4600 if (EltTy->isBooleanType() && NumElts == (NumEls * NF) && NumFields == 1) \
4601 return ScalableVecTyMap[K] = SingletonId;
4602#include "clang/Basic/AArch64ACLETypes.def"
4603 } else if (Target->hasRISCVVTypes()) {
4604 uint64_t EltTySize = getTypeSize(T: EltTy);
4605#define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \
4606 IsFP, IsBF) \
4607 if (!EltTy->isBooleanType() && \
4608 ((EltTy->hasIntegerRepresentation() && \
4609 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
4610 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
4611 IsFP && !IsBF) || \
4612 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
4613 IsBF && !IsFP)) && \
4614 EltTySize == ElBits && NumElts == NumEls && NumFields == NF) \
4615 return ScalableVecTyMap[K] = SingletonId;
4616#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
4617 if (EltTy->isBooleanType() && NumElts == NumEls) \
4618 return ScalableVecTyMap[K] = SingletonId;
4619#include "clang/Basic/RISCVVTypes.def"
4620 }
4621 return QualType();
4622}
4623
4624/// getVectorType - Return the unique reference to a vector type of
4625/// the specified element type and size. VectorType must be a built-in type.
4626QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
4627 VectorKind VecKind) const {
4628 assert(vecType->isBuiltinType() ||
4629 (vecType->isBitIntType() &&
4630 // Only support _BitInt elements with byte-sized power of 2 NumBits.
4631 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits())));
4632
4633 // Check if we've already instantiated a vector of this type.
4634 llvm::FoldingSetNodeID ID;
4635 VectorType::Profile(ID, ElementType: vecType, NumElements: NumElts, TypeClass: Type::Vector, VecKind);
4636
4637 void *InsertPos = nullptr;
4638 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
4639 return QualType(VTP, 0);
4640
4641 // If the element type isn't canonical, this won't be a canonical type either,
4642 // so fill in the canonical type field.
4643 QualType Canonical;
4644 if (!vecType.isCanonical()) {
4645 Canonical = getVectorType(vecType: getCanonicalType(T: vecType), NumElts, VecKind);
4646
4647 // Get the new insert position for the node we care about.
4648 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4649 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4650 }
4651 auto *New = new (*this, alignof(VectorType))
4652 VectorType(vecType, NumElts, Canonical, VecKind);
4653 VectorTypes.InsertNode(N: New, InsertPos);
4654 Types.push_back(Elt: New);
4655 return QualType(New, 0);
4656}
4657
4658QualType ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr,
4659 SourceLocation AttrLoc,
4660 VectorKind VecKind) const {
4661 llvm::FoldingSetNodeID ID;
4662 DependentVectorType::Profile(ID, Context: *this, ElementType: getCanonicalType(T: VecType), SizeExpr,
4663 VecKind);
4664 void *InsertPos = nullptr;
4665 DependentVectorType *Canon =
4666 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4667 DependentVectorType *New;
4668
4669 if (Canon) {
4670 New = new (*this, alignof(DependentVectorType)) DependentVectorType(
4671 VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind);
4672 } else {
4673 QualType CanonVecTy = getCanonicalType(T: VecType);
4674 if (CanonVecTy == VecType) {
4675 New = new (*this, alignof(DependentVectorType))
4676 DependentVectorType(VecType, QualType(), SizeExpr, AttrLoc, VecKind);
4677
4678 DependentVectorType *CanonCheck =
4679 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4680 assert(!CanonCheck &&
4681 "Dependent-sized vector_size canonical type broken");
4682 (void)CanonCheck;
4683 DependentVectorTypes.InsertNode(N: New, InsertPos);
4684 } else {
4685 QualType CanonTy = getDependentVectorType(VecType: CanonVecTy, SizeExpr,
4686 AttrLoc: SourceLocation(), VecKind);
4687 New = new (*this, alignof(DependentVectorType))
4688 DependentVectorType(VecType, CanonTy, SizeExpr, AttrLoc, VecKind);
4689 }
4690 }
4691
4692 Types.push_back(Elt: New);
4693 return QualType(New, 0);
4694}
4695
4696/// getExtVectorType - Return the unique reference to an extended vector type of
4697/// the specified element type and size. VectorType must be a built-in type.
4698QualType ASTContext::getExtVectorType(QualType vecType,
4699 unsigned NumElts) const {
4700 assert(vecType->isBuiltinType() || vecType->isDependentType() ||
4701 (vecType->isBitIntType() &&
4702 // Only support _BitInt elements with byte-sized power of 2 NumBits.
4703 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits())));
4704
4705 // Check if we've already instantiated a vector of this type.
4706 llvm::FoldingSetNodeID ID;
4707 VectorType::Profile(ID, ElementType: vecType, NumElements: NumElts, TypeClass: Type::ExtVector,
4708 VecKind: VectorKind::Generic);
4709 void *InsertPos = nullptr;
4710 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
4711 return QualType(VTP, 0);
4712
4713 // If the element type isn't canonical, this won't be a canonical type either,
4714 // so fill in the canonical type field.
4715 QualType Canonical;
4716 if (!vecType.isCanonical()) {
4717 Canonical = getExtVectorType(vecType: getCanonicalType(T: vecType), NumElts);
4718
4719 // Get the new insert position for the node we care about.
4720 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4721 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4722 }
4723 auto *New = new (*this, alignof(ExtVectorType))
4724 ExtVectorType(vecType, NumElts, Canonical);
4725 VectorTypes.InsertNode(N: New, InsertPos);
4726 Types.push_back(Elt: New);
4727 return QualType(New, 0);
4728}
4729
4730QualType
4731ASTContext::getDependentSizedExtVectorType(QualType vecType,
4732 Expr *SizeExpr,
4733 SourceLocation AttrLoc) const {
4734 llvm::FoldingSetNodeID ID;
4735 DependentSizedExtVectorType::Profile(ID, Context: *this, ElementType: getCanonicalType(T: vecType),
4736 SizeExpr);
4737
4738 void *InsertPos = nullptr;
4739 DependentSizedExtVectorType *Canon
4740 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4741 DependentSizedExtVectorType *New;
4742 if (Canon) {
4743 // We already have a canonical version of this array type; use it as
4744 // the canonical type for a newly-built type.
4745 New = new (*this, alignof(DependentSizedExtVectorType))
4746 DependentSizedExtVectorType(vecType, QualType(Canon, 0), SizeExpr,
4747 AttrLoc);
4748 } else {
4749 QualType CanonVecTy = getCanonicalType(T: vecType);
4750 if (CanonVecTy == vecType) {
4751 New = new (*this, alignof(DependentSizedExtVectorType))
4752 DependentSizedExtVectorType(vecType, QualType(), SizeExpr, AttrLoc);
4753
4754 DependentSizedExtVectorType *CanonCheck
4755 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4756 assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken");
4757 (void)CanonCheck;
4758 DependentSizedExtVectorTypes.InsertNode(N: New, InsertPos);
4759 } else {
4760 QualType CanonExtTy = getDependentSizedExtVectorType(vecType: CanonVecTy, SizeExpr,
4761 AttrLoc: SourceLocation());
4762 New = new (*this, alignof(DependentSizedExtVectorType))
4763 DependentSizedExtVectorType(vecType, CanonExtTy, SizeExpr, AttrLoc);
4764 }
4765 }
4766
4767 Types.push_back(Elt: New);
4768 return QualType(New, 0);
4769}
4770
4771QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows,
4772 unsigned NumColumns) const {
4773 llvm::FoldingSetNodeID ID;
4774 ConstantMatrixType::Profile(ID, ElementType: ElementTy, NumRows, NumColumns,
4775 TypeClass: Type::ConstantMatrix);
4776
4777 assert(MatrixType::isValidElementType(ElementTy, getLangOpts()) &&
4778 "need a valid element type");
4779 assert(NumRows > 0 && NumRows <= LangOpts.MaxMatrixDimension &&
4780 NumColumns > 0 && NumColumns <= LangOpts.MaxMatrixDimension &&
4781 "need valid matrix dimensions");
4782 void *InsertPos = nullptr;
4783 if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos))
4784 return QualType(MTP, 0);
4785
4786 QualType Canonical;
4787 if (!ElementTy.isCanonical()) {
4788 Canonical =
4789 getConstantMatrixType(ElementTy: getCanonicalType(T: ElementTy), NumRows, NumColumns);
4790
4791 ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4792 assert(!NewIP && "Matrix type shouldn't already exist in the map");
4793 (void)NewIP;
4794 }
4795
4796 auto *New = new (*this, alignof(ConstantMatrixType))
4797 ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical);
4798 MatrixTypes.InsertNode(N: New, InsertPos);
4799 Types.push_back(Elt: New);
4800 return QualType(New, 0);
4801}
4802
4803QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy,
4804 Expr *RowExpr,
4805 Expr *ColumnExpr,
4806 SourceLocation AttrLoc) const {
4807 QualType CanonElementTy = getCanonicalType(T: ElementTy);
4808 llvm::FoldingSetNodeID ID;
4809 DependentSizedMatrixType::Profile(ID, Context: *this, ElementType: CanonElementTy, RowExpr,
4810 ColumnExpr);
4811
4812 void *InsertPos = nullptr;
4813 DependentSizedMatrixType *Canon =
4814 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4815
4816 if (!Canon) {
4817 Canon = new (*this, alignof(DependentSizedMatrixType))
4818 DependentSizedMatrixType(CanonElementTy, QualType(), RowExpr,
4819 ColumnExpr, AttrLoc);
4820#ifndef NDEBUG
4821 DependentSizedMatrixType *CanonCheck =
4822 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4823 assert(!CanonCheck && "Dependent-sized matrix canonical type broken");
4824#endif
4825 DependentSizedMatrixTypes.InsertNode(N: Canon, InsertPos);
4826 Types.push_back(Elt: Canon);
4827 }
4828
4829 // Already have a canonical version of the matrix type
4830 //
4831 // If it exactly matches the requested type, use it directly.
4832 if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr &&
4833 Canon->getRowExpr() == ColumnExpr)
4834 return QualType(Canon, 0);
4835
4836 // Use Canon as the canonical type for newly-built type.
4837 DependentSizedMatrixType *New = new (*this, alignof(DependentSizedMatrixType))
4838 DependentSizedMatrixType(ElementTy, QualType(Canon, 0), RowExpr,
4839 ColumnExpr, AttrLoc);
4840 Types.push_back(Elt: New);
4841 return QualType(New, 0);
4842}
4843
4844QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType,
4845 Expr *AddrSpaceExpr,
4846 SourceLocation AttrLoc) const {
4847 assert(AddrSpaceExpr->isInstantiationDependent());
4848
4849 QualType canonPointeeType = getCanonicalType(T: PointeeType);
4850
4851 void *insertPos = nullptr;
4852 llvm::FoldingSetNodeID ID;
4853 DependentAddressSpaceType::Profile(ID, Context: *this, PointeeType: canonPointeeType,
4854 AddrSpaceExpr);
4855
4856 DependentAddressSpaceType *canonTy =
4857 DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
4858
4859 if (!canonTy) {
4860 canonTy = new (*this, alignof(DependentAddressSpaceType))
4861 DependentAddressSpaceType(canonPointeeType, QualType(), AddrSpaceExpr,
4862 AttrLoc);
4863 DependentAddressSpaceTypes.InsertNode(N: canonTy, InsertPos: insertPos);
4864 Types.push_back(Elt: canonTy);
4865 }
4866
4867 if (canonPointeeType == PointeeType &&
4868 canonTy->getAddrSpaceExpr() == AddrSpaceExpr)
4869 return QualType(canonTy, 0);
4870
4871 auto *sugaredType = new (*this, alignof(DependentAddressSpaceType))
4872 DependentAddressSpaceType(PointeeType, QualType(canonTy, 0),
4873 AddrSpaceExpr, AttrLoc);
4874 Types.push_back(Elt: sugaredType);
4875 return QualType(sugaredType, 0);
4876}
4877
4878/// Determine whether \p T is canonical as the result type of a function.
4879static bool isCanonicalResultType(QualType T) {
4880 return T.isCanonical() &&
4881 (T.getObjCLifetime() == Qualifiers::OCL_None ||
4882 T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone);
4883}
4884
4885/// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
4886QualType
4887ASTContext::getFunctionNoProtoType(QualType ResultTy,
4888 const FunctionType::ExtInfo &Info) const {
4889 // FIXME: This assertion cannot be enabled (yet) because the ObjC rewriter
4890 // functionality creates a function without a prototype regardless of
4891 // language mode (so it makes them even in C++). Once the rewriter has been
4892 // fixed, this assertion can be enabled again.
4893 //assert(!LangOpts.requiresStrictPrototypes() &&
4894 // "strict prototypes are disabled");
4895
4896 // Unique functions, to guarantee there is only one function of a particular
4897 // structure.
4898 llvm::FoldingSetNodeID ID;
4899 FunctionNoProtoType::Profile(ID, ResultType: ResultTy, Info);
4900
4901 void *InsertPos = nullptr;
4902 if (FunctionNoProtoType *FT =
4903 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
4904 return QualType(FT, 0);
4905
4906 QualType Canonical;
4907 if (!isCanonicalResultType(T: ResultTy)) {
4908 Canonical =
4909 getFunctionNoProtoType(ResultTy: getCanonicalFunctionResultType(ResultType: ResultTy), Info);
4910
4911 // Get the new insert position for the node we care about.
4912 FunctionNoProtoType *NewIP =
4913 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
4914 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4915 }
4916
4917 auto *New = new (*this, alignof(FunctionNoProtoType))
4918 FunctionNoProtoType(ResultTy, Canonical, Info);
4919 Types.push_back(Elt: New);
4920 FunctionNoProtoTypes.InsertNode(N: New, InsertPos);
4921 return QualType(New, 0);
4922}
4923
4924CanQualType
4925ASTContext::getCanonicalFunctionResultType(QualType ResultType) const {
4926 CanQualType CanResultType = getCanonicalType(T: ResultType);
4927
4928 // Canonical result types do not have ARC lifetime qualifiers.
4929 if (CanResultType.getQualifiers().hasObjCLifetime()) {
4930 Qualifiers Qs = CanResultType.getQualifiers();
4931 Qs.removeObjCLifetime();
4932 return CanQualType::CreateUnsafe(
4933 Other: getQualifiedType(T: CanResultType.getUnqualifiedType(), Qs));
4934 }
4935
4936 return CanResultType;
4937}
4938
4939static bool isCanonicalExceptionSpecification(
4940 const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) {
4941 if (ESI.Type == EST_None)
4942 return true;
4943 if (!NoexceptInType)
4944 return false;
4945
4946 // C++17 onwards: exception specification is part of the type, as a simple
4947 // boolean "can this function type throw".
4948 if (ESI.Type == EST_BasicNoexcept)
4949 return true;
4950
4951 // A noexcept(expr) specification is (possibly) canonical if expr is
4952 // value-dependent.
4953 if (ESI.Type == EST_DependentNoexcept)
4954 return true;
4955
4956 // A dynamic exception specification is canonical if it only contains pack
4957 // expansions (so we can't tell whether it's non-throwing) and all its
4958 // contained types are canonical.
4959 if (ESI.Type == EST_Dynamic) {
4960 bool AnyPackExpansions = false;
4961 for (QualType ET : ESI.Exceptions) {
4962 if (!ET.isCanonical())
4963 return false;
4964 if (ET->getAs<PackExpansionType>())
4965 AnyPackExpansions = true;
4966 }
4967 return AnyPackExpansions;
4968 }
4969
4970 return false;
4971}
4972
4973QualType ASTContext::getFunctionTypeInternal(
4974 QualType ResultTy, ArrayRef<QualType> ArgArray,
4975 const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const {
4976 size_t NumArgs = ArgArray.size();
4977
4978 // Unique functions, to guarantee there is only one function of a particular
4979 // structure.
4980 llvm::FoldingSetNodeID ID;
4981 FunctionProtoType::Profile(ID, Result: ResultTy, ArgTys: ArgArray.begin(), NumArgs, EPI,
4982 Context: *this, Canonical: true);
4983
4984 QualType Canonical;
4985 bool Unique = false;
4986
4987 void *InsertPos = nullptr;
4988 if (FunctionProtoType *FPT =
4989 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) {
4990 QualType Existing = QualType(FPT, 0);
4991
4992 // If we find a pre-existing equivalent FunctionProtoType, we can just reuse
4993 // it so long as our exception specification doesn't contain a dependent
4994 // noexcept expression, or we're just looking for a canonical type.
4995 // Otherwise, we're going to need to create a type
4996 // sugar node to hold the concrete expression.
4997 if (OnlyWantCanonical || !isComputedNoexcept(ESpecType: EPI.ExceptionSpec.Type) ||
4998 EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr())
4999 return Existing;
5000
5001 // We need a new type sugar node for this one, to hold the new noexcept
5002 // expression. We do no canonicalization here, but that's OK since we don't
5003 // expect to see the same noexcept expression much more than once.
5004 Canonical = getCanonicalType(T: Existing);
5005 Unique = true;
5006 }
5007
5008 bool NoexceptInType = getLangOpts().CPlusPlus17;
5009 bool IsCanonicalExceptionSpec =
5010 isCanonicalExceptionSpecification(ESI: EPI.ExceptionSpec, NoexceptInType);
5011
5012 // Determine whether the type being created is already canonical or not.
5013 bool isCanonical = !Unique && IsCanonicalExceptionSpec &&
5014 isCanonicalResultType(T: ResultTy) && !EPI.HasTrailingReturn;
5015 for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
5016 if (!ArgArray[i].isCanonicalAsParam())
5017 isCanonical = false;
5018
5019 if (OnlyWantCanonical)
5020 assert(isCanonical &&
5021 "given non-canonical parameters constructing canonical type");
5022
5023 // If this type isn't canonical, get the canonical version of it if we don't
5024 // already have it. The exception spec is only partially part of the
5025 // canonical type, and only in C++17 onwards.
5026 if (!isCanonical && Canonical.isNull()) {
5027 SmallVector<QualType, 16> CanonicalArgs;
5028 CanonicalArgs.reserve(N: NumArgs);
5029 for (unsigned i = 0; i != NumArgs; ++i)
5030 CanonicalArgs.push_back(Elt: getCanonicalParamType(T: ArgArray[i]));
5031
5032 llvm::SmallVector<QualType, 8> ExceptionTypeStorage;
5033 FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI;
5034 CanonicalEPI.HasTrailingReturn = false;
5035
5036 if (IsCanonicalExceptionSpec) {
5037 // Exception spec is already OK.
5038 } else if (NoexceptInType) {
5039 switch (EPI.ExceptionSpec.Type) {
5040 case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated:
5041 // We don't know yet. It shouldn't matter what we pick here; no-one
5042 // should ever look at this.
5043 [[fallthrough]];
5044 case EST_None: case EST_MSAny: case EST_NoexceptFalse:
5045 CanonicalEPI.ExceptionSpec.Type = EST_None;
5046 break;
5047
5048 // A dynamic exception specification is almost always "not noexcept",
5049 // with the exception that a pack expansion might expand to no types.
5050 case EST_Dynamic: {
5051 bool AnyPacks = false;
5052 for (QualType ET : EPI.ExceptionSpec.Exceptions) {
5053 if (ET->getAs<PackExpansionType>())
5054 AnyPacks = true;
5055 ExceptionTypeStorage.push_back(Elt: getCanonicalType(T: ET));
5056 }
5057 if (!AnyPacks)
5058 CanonicalEPI.ExceptionSpec.Type = EST_None;
5059 else {
5060 CanonicalEPI.ExceptionSpec.Type = EST_Dynamic;
5061 CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage;
5062 }
5063 break;
5064 }
5065
5066 case EST_DynamicNone:
5067 case EST_BasicNoexcept:
5068 case EST_NoexceptTrue:
5069 case EST_NoThrow:
5070 CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept;
5071 break;
5072
5073 case EST_DependentNoexcept:
5074 llvm_unreachable("dependent noexcept is already canonical");
5075 }
5076 } else {
5077 CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo();
5078 }
5079
5080 // Adjust the canonical function result type.
5081 CanQualType CanResultTy = getCanonicalFunctionResultType(ResultType: ResultTy);
5082 Canonical =
5083 getFunctionTypeInternal(ResultTy: CanResultTy, ArgArray: CanonicalArgs, EPI: CanonicalEPI, OnlyWantCanonical: true);
5084
5085 // Get the new insert position for the node we care about.
5086 FunctionProtoType *NewIP =
5087 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
5088 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
5089 }
5090
5091 // Compute the needed size to hold this FunctionProtoType and the
5092 // various trailing objects.
5093 auto ESH = FunctionProtoType::getExceptionSpecSize(
5094 EST: EPI.ExceptionSpec.Type, NumExceptions: EPI.ExceptionSpec.Exceptions.size());
5095 size_t Size = FunctionProtoType::totalSizeToAlloc<
5096 QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields,
5097 FunctionType::FunctionTypeExtraAttributeInfo,
5098 FunctionType::FunctionTypeArmAttributes, FunctionType::ExceptionType,
5099 Expr *, FunctionDecl *, FunctionProtoType::ExtParameterInfo, Qualifiers,
5100 FunctionEffect, EffectConditionExpr>(
5101 Counts: NumArgs, Counts: EPI.Variadic, Counts: EPI.requiresFunctionProtoTypeExtraBitfields(),
5102 Counts: EPI.requiresFunctionProtoTypeExtraAttributeInfo(),
5103 Counts: EPI.requiresFunctionProtoTypeArmAttributes(), Counts: ESH.NumExceptionType,
5104 Counts: ESH.NumExprPtr, Counts: ESH.NumFunctionDeclPtr,
5105 Counts: EPI.ExtParameterInfos ? NumArgs : 0,
5106 Counts: EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0, Counts: EPI.FunctionEffects.size(),
5107 Counts: EPI.FunctionEffects.conditions().size());
5108
5109 auto *FTP = (FunctionProtoType *)Allocate(Size, Align: alignof(FunctionProtoType));
5110 FunctionProtoType::ExtProtoInfo newEPI = EPI;
5111 new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI);
5112 Types.push_back(Elt: FTP);
5113 if (!Unique)
5114 FunctionProtoTypes.InsertNode(N: FTP, InsertPos);
5115 if (!EPI.FunctionEffects.empty())
5116 AnyFunctionEffects = true;
5117 return QualType(FTP, 0);
5118}
5119
5120QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const {
5121 llvm::FoldingSetNodeID ID;
5122 PipeType::Profile(ID, T, isRead: ReadOnly);
5123
5124 void *InsertPos = nullptr;
5125 if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos))
5126 return QualType(PT, 0);
5127
5128 // If the pipe element type isn't canonical, this won't be a canonical type
5129 // either, so fill in the canonical type field.
5130 QualType Canonical;
5131 if (!T.isCanonical()) {
5132 Canonical = getPipeType(T: getCanonicalType(T), ReadOnly);
5133
5134 // Get the new insert position for the node we care about.
5135 PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos);
5136 assert(!NewIP && "Shouldn't be in the map!");
5137 (void)NewIP;
5138 }
5139 auto *New = new (*this, alignof(PipeType)) PipeType(T, Canonical, ReadOnly);
5140 Types.push_back(Elt: New);
5141 PipeTypes.InsertNode(N: New, InsertPos);
5142 return QualType(New, 0);
5143}
5144
5145QualType ASTContext::adjustStringLiteralBaseType(QualType Ty) const {
5146 // OpenCL v1.1 s6.5.3: a string literal is in the constant address space.
5147 return LangOpts.OpenCL ? getAddrSpaceQualType(T: Ty, AddressSpace: LangAS::opencl_constant)
5148 : Ty;
5149}
5150
5151QualType ASTContext::getReadPipeType(QualType T) const {
5152 return getPipeType(T, ReadOnly: true);
5153}
5154
5155QualType ASTContext::getWritePipeType(QualType T) const {
5156 return getPipeType(T, ReadOnly: false);
5157}
5158
5159QualType ASTContext::getBitIntType(bool IsUnsigned, unsigned NumBits) const {
5160 llvm::FoldingSetNodeID ID;
5161 BitIntType::Profile(ID, IsUnsigned, NumBits);
5162
5163 void *InsertPos = nullptr;
5164 if (BitIntType *EIT = BitIntTypes.FindNodeOrInsertPos(ID, InsertPos))
5165 return QualType(EIT, 0);
5166
5167 auto *New = new (*this, alignof(BitIntType)) BitIntType(IsUnsigned, NumBits);
5168 BitIntTypes.InsertNode(N: New, InsertPos);
5169 Types.push_back(Elt: New);
5170 return QualType(New, 0);
5171}
5172
5173QualType ASTContext::getDependentBitIntType(bool IsUnsigned,
5174 Expr *NumBitsExpr) const {
5175 assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent");
5176 llvm::FoldingSetNodeID ID;
5177 DependentBitIntType::Profile(ID, Context: *this, IsUnsigned, NumBitsExpr);
5178
5179 void *InsertPos = nullptr;
5180 if (DependentBitIntType *Existing =
5181 DependentBitIntTypes.FindNodeOrInsertPos(ID, InsertPos))
5182 return QualType(Existing, 0);
5183
5184 auto *New = new (*this, alignof(DependentBitIntType))
5185 DependentBitIntType(IsUnsigned, NumBitsExpr);
5186 DependentBitIntTypes.InsertNode(N: New, InsertPos);
5187
5188 Types.push_back(Elt: New);
5189 return QualType(New, 0);
5190}
5191
5192QualType
5193ASTContext::getPredefinedSugarType(PredefinedSugarType::Kind KD) const {
5194 using Kind = PredefinedSugarType::Kind;
5195
5196 if (auto *Target = PredefinedSugarTypes[llvm::to_underlying(E: KD)];
5197 Target != nullptr)
5198 return QualType(Target, 0);
5199
5200 auto getCanonicalType = [](const ASTContext &Ctx, Kind KDI) -> QualType {
5201 switch (KDI) {
5202 // size_t (C99TC3 6.5.3.4), signed size_t (C++23 5.13.2) and
5203 // ptrdiff_t (C99TC3 6.5.6) Although these types are not built-in, they
5204 // are part of the core language and are widely used. Using
5205 // PredefinedSugarType makes these types as named sugar types rather than
5206 // standard integer types, enabling better hints and diagnostics.
5207 case Kind::SizeT:
5208 return Ctx.getFromTargetType(Type: Ctx.Target->getSizeType());
5209 case Kind::SignedSizeT:
5210 return Ctx.getFromTargetType(Type: Ctx.Target->getSignedSizeType());
5211 case Kind::PtrdiffT:
5212 return Ctx.getFromTargetType(Type: Ctx.Target->getPtrDiffType(AddrSpace: LangAS::Default));
5213 }
5214 llvm_unreachable("unexpected kind");
5215 };
5216 auto *New = new (*this, alignof(PredefinedSugarType))
5217 PredefinedSugarType(KD, &Idents.get(Name: PredefinedSugarType::getName(KD)),
5218 getCanonicalType(*this, static_cast<Kind>(KD)));
5219 Types.push_back(Elt: New);
5220 PredefinedSugarTypes[llvm::to_underlying(E: KD)] = New;
5221 return QualType(New, 0);
5222}
5223
5224QualType ASTContext::getTypeDeclType(ElaboratedTypeKeyword Keyword,
5225 NestedNameSpecifier Qualifier,
5226 const TypeDecl *Decl) const {
5227 if (auto *Tag = dyn_cast<TagDecl>(Val: Decl))
5228 return getTagType(Keyword, Qualifier, TD: Tag,
5229 /*OwnsTag=*/false);
5230 if (auto *Typedef = dyn_cast<TypedefNameDecl>(Val: Decl))
5231 return getTypedefType(Keyword, Qualifier, Decl: Typedef);
5232 if (auto *UD = dyn_cast<UnresolvedUsingTypenameDecl>(Val: Decl))
5233 return getUnresolvedUsingType(Keyword, Qualifier, D: UD);
5234
5235 assert(Keyword == ElaboratedTypeKeyword::None);
5236 assert(!Qualifier);
5237 return QualType(Decl->TypeForDecl, 0);
5238}
5239
5240CanQualType ASTContext::getCanonicalTypeDeclType(const TypeDecl *TD) const {
5241 if (auto *Tag = dyn_cast<TagDecl>(Val: TD))
5242 return getCanonicalTagType(TD: Tag);
5243 if (auto *TN = dyn_cast<TypedefNameDecl>(Val: TD))
5244 return getCanonicalType(T: TN->getUnderlyingType());
5245 if (const auto *UD = dyn_cast<UnresolvedUsingTypenameDecl>(Val: TD))
5246 return getCanonicalUnresolvedUsingType(D: UD);
5247 assert(TD->TypeForDecl);
5248 return TD->TypeForDecl->getCanonicalTypeUnqualified();
5249}
5250
5251QualType ASTContext::getTypeDeclType(const TypeDecl *Decl) const {
5252 if (const auto *TD = dyn_cast<TagDecl>(Val: Decl))
5253 return getCanonicalTagType(TD);
5254 if (const auto *TD = dyn_cast<TypedefNameDecl>(Val: Decl);
5255 isa_and_nonnull<TypedefDecl, TypeAliasDecl>(Val: TD))
5256 return getTypedefType(Keyword: ElaboratedTypeKeyword::None,
5257 /*Qualifier=*/std::nullopt, Decl: TD);
5258 if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Val: Decl))
5259 return getCanonicalUnresolvedUsingType(D: Using);
5260
5261 assert(Decl->TypeForDecl);
5262 return QualType(Decl->TypeForDecl, 0);
5263}
5264
5265/// getTypedefType - Return the unique reference to the type for the
5266/// specified typedef name decl.
5267QualType
5268ASTContext::getTypedefType(ElaboratedTypeKeyword Keyword,
5269 NestedNameSpecifier Qualifier,
5270 const TypedefNameDecl *Decl, QualType UnderlyingType,
5271 std::optional<bool> TypeMatchesDeclOrNone) const {
5272 if (!TypeMatchesDeclOrNone) {
5273 QualType DeclUnderlyingType = Decl->getUnderlyingType();
5274 assert(!DeclUnderlyingType.isNull());
5275 if (UnderlyingType.isNull())
5276 UnderlyingType = DeclUnderlyingType;
5277 else
5278 assert(hasSameType(UnderlyingType, DeclUnderlyingType));
5279 TypeMatchesDeclOrNone = UnderlyingType == DeclUnderlyingType;
5280 } else {
5281 // FIXME: This is a workaround for a serialization cycle: assume the decl
5282 // underlying type is not available; don't touch it.
5283 assert(!UnderlyingType.isNull());
5284 }
5285
5286 if (Keyword == ElaboratedTypeKeyword::None && !Qualifier &&
5287 *TypeMatchesDeclOrNone) {
5288 if (Decl->TypeForDecl)
5289 return QualType(Decl->TypeForDecl, 0);
5290
5291 auto *NewType = new (*this, alignof(TypedefType))
5292 TypedefType(Type::Typedef, Keyword, Qualifier, Decl, UnderlyingType,
5293 !*TypeMatchesDeclOrNone);
5294
5295 Types.push_back(Elt: NewType);
5296 Decl->TypeForDecl = NewType;
5297 return QualType(NewType, 0);
5298 }
5299
5300 llvm::FoldingSetNodeID ID;
5301 TypedefType::Profile(ID, Keyword, Qualifier, Decl,
5302 Underlying: *TypeMatchesDeclOrNone ? QualType() : UnderlyingType);
5303
5304 void *InsertPos = nullptr;
5305 if (FoldingSetPlaceholder<TypedefType> *Placeholder =
5306 TypedefTypes.FindNodeOrInsertPos(ID, InsertPos))
5307 return QualType(Placeholder->getType(), 0);
5308
5309 void *Mem =
5310 Allocate(Size: TypedefType::totalSizeToAlloc<FoldingSetPlaceholder<TypedefType>,
5311 NestedNameSpecifier, QualType>(
5312 Counts: 1, Counts: !!Qualifier, Counts: !*TypeMatchesDeclOrNone),
5313 Align: alignof(TypedefType));
5314 auto *NewType =
5315 new (Mem) TypedefType(Type::Typedef, Keyword, Qualifier, Decl,
5316 UnderlyingType, !*TypeMatchesDeclOrNone);
5317 auto *Placeholder = new (NewType->getFoldingSetPlaceholder())
5318 FoldingSetPlaceholder<TypedefType>();
5319 TypedefTypes.InsertNode(N: Placeholder, InsertPos);
5320 Types.push_back(Elt: NewType);
5321 return QualType(NewType, 0);
5322}
5323
5324QualType ASTContext::getUsingType(ElaboratedTypeKeyword Keyword,
5325 NestedNameSpecifier Qualifier,
5326 const UsingShadowDecl *D,
5327 QualType UnderlyingType) const {
5328 // FIXME: This is expensive to compute every time!
5329 if (UnderlyingType.isNull()) {
5330 const auto *UD = cast<UsingDecl>(Val: D->getIntroducer());
5331 UnderlyingType =
5332 getTypeDeclType(Keyword: UD->hasTypename() ? ElaboratedTypeKeyword::Typename
5333 : ElaboratedTypeKeyword::None,
5334 Qualifier: UD->getQualifier(), Decl: cast<TypeDecl>(Val: D->getTargetDecl()));
5335 }
5336
5337 llvm::FoldingSetNodeID ID;
5338 UsingType::Profile(ID, Keyword, Qualifier, D, UnderlyingType);
5339
5340 void *InsertPos = nullptr;
5341 if (const UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos))
5342 return QualType(T, 0);
5343
5344 assert(!UnderlyingType.hasLocalQualifiers());
5345
5346 assert(
5347 hasSameType(getCanonicalTypeDeclType(cast<TypeDecl>(D->getTargetDecl())),
5348 UnderlyingType));
5349
5350 void *Mem =
5351 Allocate(Size: UsingType::totalSizeToAlloc<NestedNameSpecifier>(Counts: !!Qualifier),
5352 Align: alignof(UsingType));
5353 UsingType *T = new (Mem) UsingType(Keyword, Qualifier, D, UnderlyingType);
5354 Types.push_back(Elt: T);
5355 UsingTypes.InsertNode(N: T, InsertPos);
5356 return QualType(T, 0);
5357}
5358
5359TagType *ASTContext::getTagTypeInternal(ElaboratedTypeKeyword Keyword,
5360 NestedNameSpecifier Qualifier,
5361 const TagDecl *TD, bool OwnsTag,
5362 bool IsInjected,
5363 const Type *CanonicalType,
5364 bool WithFoldingSetNode) const {
5365 auto [TC, Size] = [&] {
5366 switch (TD->getDeclKind()) {
5367 case Decl::Enum:
5368 static_assert(alignof(EnumType) == alignof(TagType));
5369 return std::make_tuple(args: Type::Enum, args: sizeof(EnumType));
5370 case Decl::ClassTemplatePartialSpecialization:
5371 case Decl::ClassTemplateSpecialization:
5372 case Decl::CXXRecord:
5373 static_assert(alignof(RecordType) == alignof(TagType));
5374 static_assert(alignof(InjectedClassNameType) == alignof(TagType));
5375 if (cast<CXXRecordDecl>(Val: TD)->hasInjectedClassType())
5376 return std::make_tuple(args: Type::InjectedClassName,
5377 args: sizeof(InjectedClassNameType));
5378 [[fallthrough]];
5379 case Decl::Record:
5380 return std::make_tuple(args: Type::Record, args: sizeof(RecordType));
5381 default:
5382 llvm_unreachable("unexpected decl kind");
5383 }
5384 }();
5385
5386 if (Qualifier) {
5387 static_assert(alignof(NestedNameSpecifier) <= alignof(TagType));
5388 Size = llvm::alignTo(Value: Size, Align: alignof(NestedNameSpecifier)) +
5389 sizeof(NestedNameSpecifier);
5390 }
5391 void *Mem;
5392 if (WithFoldingSetNode) {
5393 // FIXME: It would be more profitable to tail allocate the folding set node
5394 // from the type, instead of the other way around, due to the greater
5395 // alignment requirements of the type. But this makes it harder to deal with
5396 // the different type node sizes. This would require either uniquing from
5397 // different folding sets, or having the folding setaccept a
5398 // contextual parameter which is not fixed at construction.
5399 Mem = Allocate(
5400 Size: sizeof(TagTypeFoldingSetPlaceholder) +
5401 TagTypeFoldingSetPlaceholder::getOffset() + Size,
5402 Align: std::max(a: alignof(TagTypeFoldingSetPlaceholder), b: alignof(TagType)));
5403 auto *T = new (Mem) TagTypeFoldingSetPlaceholder();
5404 Mem = T->getTagType();
5405 } else {
5406 Mem = Allocate(Size, Align: alignof(TagType));
5407 }
5408
5409 auto *T = [&, TC = TC]() -> TagType * {
5410 switch (TC) {
5411 case Type::Enum: {
5412 assert(isa<EnumDecl>(TD));
5413 auto *T = new (Mem) EnumType(TC, Keyword, Qualifier, TD, OwnsTag,
5414 IsInjected, CanonicalType);
5415 assert(reinterpret_cast<void *>(T) ==
5416 reinterpret_cast<void *>(static_cast<TagType *>(T)) &&
5417 "TagType must be the first base of EnumType");
5418 return T;
5419 }
5420 case Type::Record: {
5421 assert(isa<RecordDecl>(TD));
5422 auto *T = new (Mem) RecordType(TC, Keyword, Qualifier, TD, OwnsTag,
5423 IsInjected, CanonicalType);
5424 assert(reinterpret_cast<void *>(T) ==
5425 reinterpret_cast<void *>(static_cast<TagType *>(T)) &&
5426 "TagType must be the first base of RecordType");
5427 return T;
5428 }
5429 case Type::InjectedClassName: {
5430 auto *T = new (Mem) InjectedClassNameType(Keyword, Qualifier, TD,
5431 IsInjected, CanonicalType);
5432 assert(reinterpret_cast<void *>(T) ==
5433 reinterpret_cast<void *>(static_cast<TagType *>(T)) &&
5434 "TagType must be the first base of InjectedClassNameType");
5435 return T;
5436 }
5437 default:
5438 llvm_unreachable("unexpected type class");
5439 }
5440 }();
5441 assert(T->getKeyword() == Keyword);
5442 assert(T->getQualifier() == Qualifier);
5443 assert(T->getDecl() == TD);
5444 assert(T->isInjected() == IsInjected);
5445 assert(T->isTagOwned() == OwnsTag);
5446 assert((T->isCanonicalUnqualified()
5447 ? QualType()
5448 : T->getCanonicalTypeInternal()) == QualType(CanonicalType, 0));
5449 Types.push_back(Elt: T);
5450 return T;
5451}
5452
5453static const TagDecl *getNonInjectedClassName(const TagDecl *TD) {
5454 if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: TD);
5455 RD && RD->isInjectedClassName())
5456 return cast<TagDecl>(Val: RD->getDeclContext());
5457 return TD;
5458}
5459
5460CanQualType ASTContext::getCanonicalTagType(const TagDecl *TD) const {
5461 TD = ::getNonInjectedClassName(TD)->getCanonicalDecl();
5462 if (TD->TypeForDecl)
5463 return TD->TypeForDecl->getCanonicalTypeUnqualified();
5464
5465 const Type *CanonicalType = getTagTypeInternal(
5466 Keyword: ElaboratedTypeKeyword::None,
5467 /*Qualifier=*/std::nullopt, TD,
5468 /*OwnsTag=*/false, /*IsInjected=*/false, /*CanonicalType=*/nullptr,
5469 /*WithFoldingSetNode=*/false);
5470 TD->TypeForDecl = CanonicalType;
5471 return CanQualType::CreateUnsafe(Other: QualType(CanonicalType, 0));
5472}
5473
5474QualType ASTContext::getTagType(ElaboratedTypeKeyword Keyword,
5475 NestedNameSpecifier Qualifier,
5476 const TagDecl *TD, bool OwnsTag) const {
5477
5478 const TagDecl *NonInjectedTD = ::getNonInjectedClassName(TD);
5479 bool IsInjected = TD != NonInjectedTD;
5480
5481 ElaboratedTypeKeyword PreferredKeyword =
5482 getLangOpts().CPlusPlus ? ElaboratedTypeKeyword::None
5483 : KeywordHelpers::getKeywordForTagTypeKind(
5484 Tag: NonInjectedTD->getTagKind());
5485
5486 if (Keyword == PreferredKeyword && !Qualifier && !OwnsTag) {
5487 if (const Type *T = TD->TypeForDecl; T && !T->isCanonicalUnqualified())
5488 return QualType(T, 0);
5489
5490 const Type *CanonicalType = getCanonicalTagType(TD: NonInjectedTD).getTypePtr();
5491 const Type *T =
5492 getTagTypeInternal(Keyword,
5493 /*Qualifier=*/std::nullopt, TD: NonInjectedTD,
5494 /*OwnsTag=*/false, IsInjected, CanonicalType,
5495 /*WithFoldingSetNode=*/false);
5496 TD->TypeForDecl = T;
5497 return QualType(T, 0);
5498 }
5499
5500 llvm::FoldingSetNodeID ID;
5501 TagTypeFoldingSetPlaceholder::Profile(ID, Keyword, Qualifier, Tag: NonInjectedTD,
5502 OwnsTag, IsInjected);
5503
5504 void *InsertPos = nullptr;
5505 if (TagTypeFoldingSetPlaceholder *T =
5506 TagTypes.FindNodeOrInsertPos(ID, InsertPos))
5507 return QualType(T->getTagType(), 0);
5508
5509 const Type *CanonicalType = getCanonicalTagType(TD: NonInjectedTD).getTypePtr();
5510 TagType *T =
5511 getTagTypeInternal(Keyword, Qualifier, TD: NonInjectedTD, OwnsTag, IsInjected,
5512 CanonicalType, /*WithFoldingSetNode=*/true);
5513 TagTypes.InsertNode(N: TagTypeFoldingSetPlaceholder::fromTagType(T), InsertPos);
5514 return QualType(T, 0);
5515}
5516
5517bool ASTContext::computeBestEnumTypes(bool IsPacked, unsigned NumNegativeBits,
5518 unsigned NumPositiveBits,
5519 QualType &BestType,
5520 QualType &BestPromotionType) {
5521 unsigned IntWidth = Target->getIntWidth();
5522 unsigned CharWidth = Target->getCharWidth();
5523 unsigned ShortWidth = Target->getShortWidth();
5524 bool EnumTooLarge = false;
5525 unsigned BestWidth;
5526 if (NumNegativeBits) {
5527 // If there is a negative value, figure out the smallest integer type (of
5528 // int/long/longlong) that fits.
5529 // If it's packed, check also if it fits a char or a short.
5530 if (IsPacked && NumNegativeBits <= CharWidth &&
5531 NumPositiveBits < CharWidth) {
5532 BestType = SignedCharTy;
5533 BestWidth = CharWidth;
5534 } else if (IsPacked && NumNegativeBits <= ShortWidth &&
5535 NumPositiveBits < ShortWidth) {
5536 BestType = ShortTy;
5537 BestWidth = ShortWidth;
5538 } else if (NumNegativeBits <= IntWidth && NumPositiveBits < IntWidth) {
5539 BestType = IntTy;
5540 BestWidth = IntWidth;
5541 } else {
5542 BestWidth = Target->getLongWidth();
5543
5544 if (NumNegativeBits <= BestWidth && NumPositiveBits < BestWidth) {
5545 BestType = LongTy;
5546 } else {
5547 BestWidth = Target->getLongLongWidth();
5548
5549 if (NumNegativeBits > BestWidth || NumPositiveBits >= BestWidth)
5550 EnumTooLarge = true;
5551 BestType = LongLongTy;
5552 }
5553 }
5554 BestPromotionType = (BestWidth <= IntWidth ? IntTy : BestType);
5555 } else {
5556 // If there is no negative value, figure out the smallest type that fits
5557 // all of the enumerator values.
5558 // If it's packed, check also if it fits a char or a short.
5559 if (IsPacked && NumPositiveBits <= CharWidth) {
5560 BestType = UnsignedCharTy;
5561 BestPromotionType = IntTy;
5562 BestWidth = CharWidth;
5563 } else if (IsPacked && NumPositiveBits <= ShortWidth) {
5564 BestType = UnsignedShortTy;
5565 BestPromotionType = IntTy;
5566 BestWidth = ShortWidth;
5567 } else if (NumPositiveBits <= IntWidth) {
5568 BestType = UnsignedIntTy;
5569 BestWidth = IntWidth;
5570 BestPromotionType = (NumPositiveBits == BestWidth || !LangOpts.CPlusPlus)
5571 ? UnsignedIntTy
5572 : IntTy;
5573 } else if (NumPositiveBits <= (BestWidth = Target->getLongWidth())) {
5574 BestType = UnsignedLongTy;
5575 BestPromotionType = (NumPositiveBits == BestWidth || !LangOpts.CPlusPlus)
5576 ? UnsignedLongTy
5577 : LongTy;
5578 } else {
5579 BestWidth = Target->getLongLongWidth();
5580 if (NumPositiveBits > BestWidth) {
5581 // This can happen with bit-precise integer types, but those are not
5582 // allowed as the type for an enumerator per C23 6.7.2.2p4 and p12.
5583 // FIXME: GCC uses __int128_t and __uint128_t for cases that fit within
5584 // a 128-bit integer, we should consider doing the same.
5585 EnumTooLarge = true;
5586 }
5587 BestType = UnsignedLongLongTy;
5588 BestPromotionType = (NumPositiveBits == BestWidth || !LangOpts.CPlusPlus)
5589 ? UnsignedLongLongTy
5590 : LongLongTy;
5591 }
5592 }
5593 return EnumTooLarge;
5594}
5595
5596bool ASTContext::isRepresentableIntegerValue(llvm::APSInt &Value, QualType T) {
5597 assert((T->isIntegralType(*this) || T->isEnumeralType()) &&
5598 "Integral type required!");
5599 unsigned BitWidth = getIntWidth(T);
5600
5601 if (Value.isUnsigned() || Value.isNonNegative()) {
5602 if (T->isSignedIntegerOrEnumerationType())
5603 --BitWidth;
5604 return Value.getActiveBits() <= BitWidth;
5605 }
5606 return Value.getSignificantBits() <= BitWidth;
5607}
5608
5609UnresolvedUsingType *ASTContext::getUnresolvedUsingTypeInternal(
5610 ElaboratedTypeKeyword Keyword, NestedNameSpecifier Qualifier,
5611 const UnresolvedUsingTypenameDecl *D, void *InsertPos,
5612 const Type *CanonicalType) const {
5613 void *Mem = Allocate(
5614 Size: UnresolvedUsingType::totalSizeToAlloc<
5615 FoldingSetPlaceholder<UnresolvedUsingType>, NestedNameSpecifier>(
5616 Counts: !!InsertPos, Counts: !!Qualifier),
5617 Align: alignof(UnresolvedUsingType));
5618 auto *T = new (Mem) UnresolvedUsingType(Keyword, Qualifier, D, CanonicalType);
5619 if (InsertPos) {
5620 auto *Placeholder = new (T->getFoldingSetPlaceholder())
5621 FoldingSetPlaceholder<TypedefType>();
5622 TypedefTypes.InsertNode(N: Placeholder, InsertPos);
5623 }
5624 Types.push_back(Elt: T);
5625 return T;
5626}
5627
5628CanQualType ASTContext::getCanonicalUnresolvedUsingType(
5629 const UnresolvedUsingTypenameDecl *D) const {
5630 D = D->getCanonicalDecl();
5631 if (D->TypeForDecl)
5632 return D->TypeForDecl->getCanonicalTypeUnqualified();
5633
5634 const Type *CanonicalType = getUnresolvedUsingTypeInternal(
5635 Keyword: ElaboratedTypeKeyword::None,
5636 /*Qualifier=*/std::nullopt, D,
5637 /*InsertPos=*/nullptr, /*CanonicalType=*/nullptr);
5638 D->TypeForDecl = CanonicalType;
5639 return CanQualType::CreateUnsafe(Other: QualType(CanonicalType, 0));
5640}
5641
5642QualType
5643ASTContext::getUnresolvedUsingType(ElaboratedTypeKeyword Keyword,
5644 NestedNameSpecifier Qualifier,
5645 const UnresolvedUsingTypenameDecl *D) const {
5646 if (Keyword == ElaboratedTypeKeyword::None && !Qualifier) {
5647 if (const Type *T = D->TypeForDecl; T && !T->isCanonicalUnqualified())
5648 return QualType(T, 0);
5649
5650 const Type *CanonicalType = getCanonicalUnresolvedUsingType(D).getTypePtr();
5651 const Type *T =
5652 getUnresolvedUsingTypeInternal(Keyword: ElaboratedTypeKeyword::None,
5653 /*Qualifier=*/std::nullopt, D,
5654 /*InsertPos=*/nullptr, CanonicalType);
5655 D->TypeForDecl = T;
5656 return QualType(T, 0);
5657 }
5658
5659 llvm::FoldingSetNodeID ID;
5660 UnresolvedUsingType::Profile(ID, Keyword, Qualifier, D);
5661
5662 void *InsertPos = nullptr;
5663 if (FoldingSetPlaceholder<UnresolvedUsingType> *Placeholder =
5664 UnresolvedUsingTypes.FindNodeOrInsertPos(ID, InsertPos))
5665 return QualType(Placeholder->getType(), 0);
5666 assert(InsertPos);
5667
5668 const Type *CanonicalType = getCanonicalUnresolvedUsingType(D).getTypePtr();
5669 const Type *T = getUnresolvedUsingTypeInternal(Keyword, Qualifier, D,
5670 InsertPos, CanonicalType);
5671 return QualType(T, 0);
5672}
5673
5674QualType ASTContext::getAttributedType(attr::Kind attrKind,
5675 QualType modifiedType,
5676 QualType equivalentType,
5677 const Attr *attr) const {
5678 llvm::FoldingSetNodeID id;
5679 AttributedType::Profile(ID&: id, attrKind, modified: modifiedType, equivalent: equivalentType, attr);
5680
5681 void *insertPos = nullptr;
5682 AttributedType *type = AttributedTypes.FindNodeOrInsertPos(ID: id, InsertPos&: insertPos);
5683 if (type) return QualType(type, 0);
5684
5685 assert(!attr || attr->getKind() == attrKind);
5686
5687 QualType canon = getCanonicalType(T: equivalentType);
5688 type = new (*this, alignof(AttributedType))
5689 AttributedType(canon, attrKind, attr, modifiedType, equivalentType);
5690
5691 Types.push_back(Elt: type);
5692 AttributedTypes.InsertNode(N: type, InsertPos: insertPos);
5693
5694 return QualType(type, 0);
5695}
5696
5697QualType ASTContext::getAttributedType(const Attr *attr, QualType modifiedType,
5698 QualType equivalentType) const {
5699 return getAttributedType(attrKind: attr->getKind(), modifiedType, equivalentType, attr);
5700}
5701
5702QualType ASTContext::getAttributedType(NullabilityKind nullability,
5703 QualType modifiedType,
5704 QualType equivalentType) {
5705 switch (nullability) {
5706 case NullabilityKind::NonNull:
5707 return getAttributedType(attrKind: attr::TypeNonNull, modifiedType, equivalentType);
5708
5709 case NullabilityKind::Nullable:
5710 return getAttributedType(attrKind: attr::TypeNullable, modifiedType, equivalentType);
5711
5712 case NullabilityKind::NullableResult:
5713 return getAttributedType(attrKind: attr::TypeNullableResult, modifiedType,
5714 equivalentType);
5715
5716 case NullabilityKind::Unspecified:
5717 return getAttributedType(attrKind: attr::TypeNullUnspecified, modifiedType,
5718 equivalentType);
5719 }
5720
5721 llvm_unreachable("Unknown nullability kind");
5722}
5723
5724QualType ASTContext::getBTFTagAttributedType(const BTFTypeTagAttr *BTFAttr,
5725 QualType Wrapped) const {
5726 llvm::FoldingSetNodeID ID;
5727 BTFTagAttributedType::Profile(ID, Wrapped, BTFAttr);
5728
5729 void *InsertPos = nullptr;
5730 BTFTagAttributedType *Ty =
5731 BTFTagAttributedTypes.FindNodeOrInsertPos(ID, InsertPos);
5732 if (Ty)
5733 return QualType(Ty, 0);
5734
5735 QualType Canon = getCanonicalType(T: Wrapped);
5736 Ty = new (*this, alignof(BTFTagAttributedType))
5737 BTFTagAttributedType(Canon, Wrapped, BTFAttr);
5738
5739 Types.push_back(Elt: Ty);
5740 BTFTagAttributedTypes.InsertNode(N: Ty, InsertPos);
5741
5742 return QualType(Ty, 0);
5743}
5744
5745QualType ASTContext::getOverflowBehaviorType(const OverflowBehaviorAttr *Attr,
5746 QualType Underlying) const {
5747 const IdentifierInfo *II = Attr->getBehaviorKind();
5748 StringRef IdentName = II->getName();
5749 OverflowBehaviorType::OverflowBehaviorKind Kind;
5750 if (IdentName == "wrap") {
5751 Kind = OverflowBehaviorType::OverflowBehaviorKind::Wrap;
5752 } else if (IdentName == "trap") {
5753 Kind = OverflowBehaviorType::OverflowBehaviorKind::Trap;
5754 } else {
5755 return Underlying;
5756 }
5757
5758 return getOverflowBehaviorType(Kind, Wrapped: Underlying);
5759}
5760
5761QualType ASTContext::getOverflowBehaviorType(
5762 OverflowBehaviorType::OverflowBehaviorKind Kind,
5763 QualType Underlying) const {
5764 assert(!Underlying->isOverflowBehaviorType() &&
5765 "Cannot have underlying types that are themselves OBTs");
5766 llvm::FoldingSetNodeID ID;
5767 OverflowBehaviorType::Profile(ID, Underlying, Kind);
5768 void *InsertPos = nullptr;
5769
5770 if (OverflowBehaviorType *OBT =
5771 OverflowBehaviorTypes.FindNodeOrInsertPos(ID, InsertPos)) {
5772 return QualType(OBT, 0);
5773 }
5774
5775 QualType Canonical;
5776 if (!Underlying.isCanonical() || Underlying.hasLocalQualifiers()) {
5777 SplitQualType canonSplit = getCanonicalType(T: Underlying).split();
5778 Canonical = getOverflowBehaviorType(Kind, Underlying: QualType(canonSplit.Ty, 0));
5779 Canonical = getQualifiedType(T: Canonical, Qs: canonSplit.Quals);
5780 assert(!OverflowBehaviorTypes.FindNodeOrInsertPos(ID, InsertPos) &&
5781 "Shouldn't be in the map");
5782 }
5783
5784 OverflowBehaviorType *Ty = new (*this, alignof(OverflowBehaviorType))
5785 OverflowBehaviorType(Canonical, Underlying, Kind);
5786
5787 Types.push_back(Elt: Ty);
5788 OverflowBehaviorTypes.InsertNode(N: Ty, InsertPos);
5789 return QualType(Ty, 0);
5790}
5791
5792QualType ASTContext::getHLSLAttributedResourceType(
5793 QualType Wrapped, QualType Contained,
5794 const HLSLAttributedResourceType::Attributes &Attrs) {
5795
5796 llvm::FoldingSetNodeID ID;
5797 HLSLAttributedResourceType::Profile(ID, Wrapped, Contained, Attrs);
5798
5799 void *InsertPos = nullptr;
5800 HLSLAttributedResourceType *Ty =
5801 HLSLAttributedResourceTypes.FindNodeOrInsertPos(ID, InsertPos);
5802 if (Ty)
5803 return QualType(Ty, 0);
5804
5805 Ty = new (*this, alignof(HLSLAttributedResourceType))
5806 HLSLAttributedResourceType(Wrapped, Contained, Attrs);
5807
5808 Types.push_back(Elt: Ty);
5809 HLSLAttributedResourceTypes.InsertNode(N: Ty, InsertPos);
5810
5811 return QualType(Ty, 0);
5812}
5813
5814QualType ASTContext::getHLSLInlineSpirvType(uint32_t Opcode, uint32_t Size,
5815 uint32_t Alignment,
5816 ArrayRef<SpirvOperand> Operands) {
5817 llvm::FoldingSetNodeID ID;
5818 HLSLInlineSpirvType::Profile(ID, Opcode, Size, Alignment, Operands);
5819
5820 void *InsertPos = nullptr;
5821 HLSLInlineSpirvType *Ty =
5822 HLSLInlineSpirvTypes.FindNodeOrInsertPos(ID, InsertPos);
5823 if (Ty)
5824 return QualType(Ty, 0);
5825
5826 void *Mem = Allocate(
5827 Size: HLSLInlineSpirvType::totalSizeToAlloc<SpirvOperand>(Counts: Operands.size()),
5828 Align: alignof(HLSLInlineSpirvType));
5829
5830 Ty = new (Mem) HLSLInlineSpirvType(Opcode, Size, Alignment, Operands);
5831
5832 Types.push_back(Elt: Ty);
5833 HLSLInlineSpirvTypes.InsertNode(N: Ty, InsertPos);
5834
5835 return QualType(Ty, 0);
5836}
5837
5838/// Retrieve a substitution-result type.
5839QualType ASTContext::getSubstTemplateTypeParmType(QualType Replacement,
5840 Decl *AssociatedDecl,
5841 unsigned Index,
5842 UnsignedOrNone PackIndex,
5843 bool Final) const {
5844 llvm::FoldingSetNodeID ID;
5845 SubstTemplateTypeParmType::Profile(ID, Replacement, AssociatedDecl, Index,
5846 PackIndex, Final);
5847 void *InsertPos = nullptr;
5848 SubstTemplateTypeParmType *SubstParm =
5849 SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
5850
5851 if (!SubstParm) {
5852 void *Mem = Allocate(Size: SubstTemplateTypeParmType::totalSizeToAlloc<QualType>(
5853 Counts: !Replacement.isCanonical()),
5854 Align: alignof(SubstTemplateTypeParmType));
5855 SubstParm = new (Mem) SubstTemplateTypeParmType(Replacement, AssociatedDecl,
5856 Index, PackIndex, Final);
5857 Types.push_back(Elt: SubstParm);
5858 SubstTemplateTypeParmTypes.InsertNode(N: SubstParm, InsertPos);
5859 }
5860
5861 return QualType(SubstParm, 0);
5862}
5863
5864QualType
5865ASTContext::getSubstTemplateTypeParmPackType(Decl *AssociatedDecl,
5866 unsigned Index, bool Final,
5867 const TemplateArgument &ArgPack) {
5868#ifndef NDEBUG
5869 for (const auto &P : ArgPack.pack_elements())
5870 assert(P.getKind() == TemplateArgument::Type && "Pack contains a non-type");
5871#endif
5872
5873 llvm::FoldingSetNodeID ID;
5874 SubstTemplateTypeParmPackType::Profile(ID, AssociatedDecl, Index, Final,
5875 ArgPack);
5876 void *InsertPos = nullptr;
5877 if (SubstTemplateTypeParmPackType *SubstParm =
5878 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos))
5879 return QualType(SubstParm, 0);
5880
5881 QualType Canon;
5882 {
5883 TemplateArgument CanonArgPack = getCanonicalTemplateArgument(Arg: ArgPack);
5884 if (!AssociatedDecl->isCanonicalDecl() ||
5885 !CanonArgPack.structurallyEquals(Other: ArgPack)) {
5886 Canon = getSubstTemplateTypeParmPackType(
5887 AssociatedDecl: AssociatedDecl->getCanonicalDecl(), Index, Final, ArgPack: CanonArgPack);
5888 [[maybe_unused]] const auto *Nothing =
5889 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos);
5890 assert(!Nothing);
5891 }
5892 }
5893
5894 auto *SubstParm = new (*this, alignof(SubstTemplateTypeParmPackType))
5895 SubstTemplateTypeParmPackType(Canon, AssociatedDecl, Index, Final,
5896 ArgPack);
5897 Types.push_back(Elt: SubstParm);
5898 SubstTemplateTypeParmPackTypes.InsertNode(N: SubstParm, InsertPos);
5899 return QualType(SubstParm, 0);
5900}
5901
5902QualType
5903ASTContext::getSubstBuiltinTemplatePack(const TemplateArgument &ArgPack) {
5904 assert(llvm::all_of(ArgPack.pack_elements(),
5905 [](const auto &P) {
5906 return P.getKind() == TemplateArgument::Type;
5907 }) &&
5908 "Pack contains a non-type");
5909
5910 llvm::FoldingSetNodeID ID;
5911 SubstBuiltinTemplatePackType::Profile(ID, ArgPack);
5912
5913 void *InsertPos = nullptr;
5914 if (auto *T =
5915 SubstBuiltinTemplatePackTypes.FindNodeOrInsertPos(ID, InsertPos))
5916 return QualType(T, 0);
5917
5918 QualType Canon;
5919 TemplateArgument CanonArgPack = getCanonicalTemplateArgument(Arg: ArgPack);
5920 if (!CanonArgPack.structurallyEquals(Other: ArgPack)) {
5921 Canon = getSubstBuiltinTemplatePack(ArgPack: CanonArgPack);
5922 // Refresh InsertPos, in case the recursive call above caused rehashing,
5923 // which would invalidate the bucket pointer.
5924 [[maybe_unused]] const auto *Nothing =
5925 SubstBuiltinTemplatePackTypes.FindNodeOrInsertPos(ID, InsertPos);
5926 assert(!Nothing);
5927 }
5928
5929 auto *PackType = new (*this, alignof(SubstBuiltinTemplatePackType))
5930 SubstBuiltinTemplatePackType(Canon, ArgPack);
5931 Types.push_back(Elt: PackType);
5932 SubstBuiltinTemplatePackTypes.InsertNode(N: PackType, InsertPos);
5933 return QualType(PackType, 0);
5934}
5935
5936/// Retrieve the template type parameter type for a template
5937/// parameter or parameter pack with the given depth, index, and (optionally)
5938/// name.
5939QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index,
5940 bool ParameterPack,
5941 TemplateTypeParmDecl *TTPDecl) const {
5942 llvm::FoldingSetNodeID ID;
5943 TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl);
5944 void *InsertPos = nullptr;
5945 TemplateTypeParmType *TypeParm
5946 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
5947
5948 if (TypeParm)
5949 return QualType(TypeParm, 0);
5950
5951 if (TTPDecl) {
5952 QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack);
5953 TypeParm = new (*this, alignof(TemplateTypeParmType))
5954 TemplateTypeParmType(Depth, Index, ParameterPack, TTPDecl, Canon);
5955
5956 TemplateTypeParmType *TypeCheck
5957 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
5958 assert(!TypeCheck && "Template type parameter canonical type broken");
5959 (void)TypeCheck;
5960 } else
5961 TypeParm = new (*this, alignof(TemplateTypeParmType)) TemplateTypeParmType(
5962 Depth, Index, ParameterPack, /*TTPDecl=*/nullptr, /*Canon=*/QualType());
5963
5964 Types.push_back(Elt: TypeParm);
5965 TemplateTypeParmTypes.InsertNode(N: TypeParm, InsertPos);
5966
5967 return QualType(TypeParm, 0);
5968}
5969
5970static ElaboratedTypeKeyword
5971getCanonicalElaboratedTypeKeyword(ElaboratedTypeKeyword Keyword) {
5972 switch (Keyword) {
5973 // These are just themselves.
5974 case ElaboratedTypeKeyword::None:
5975 case ElaboratedTypeKeyword::Struct:
5976 case ElaboratedTypeKeyword::Union:
5977 case ElaboratedTypeKeyword::Enum:
5978 case ElaboratedTypeKeyword::Interface:
5979 return Keyword;
5980
5981 // These are equivalent.
5982 case ElaboratedTypeKeyword::Typename:
5983 return ElaboratedTypeKeyword::None;
5984
5985 // These are functionally equivalent, so relying on their equivalence is
5986 // IFNDR. By making them equivalent, we disallow overloading, which at least
5987 // can produce a diagnostic.
5988 case ElaboratedTypeKeyword::Class:
5989 return ElaboratedTypeKeyword::Struct;
5990 }
5991 llvm_unreachable("unexpected keyword kind");
5992}
5993
5994TypeSourceInfo *ASTContext::getTemplateSpecializationTypeInfo(
5995 ElaboratedTypeKeyword Keyword, SourceLocation ElaboratedKeywordLoc,
5996 NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKeywordLoc,
5997 TemplateName Name, SourceLocation NameLoc,
5998 const TemplateArgumentListInfo &SpecifiedArgs,
5999 ArrayRef<TemplateArgument> CanonicalArgs, QualType Underlying) const {
6000 QualType TST = getTemplateSpecializationType(
6001 Keyword, T: Name, SpecifiedArgs: SpecifiedArgs.arguments(), CanonicalArgs, Canon: Underlying);
6002
6003 TypeSourceInfo *TSI = CreateTypeSourceInfo(T: TST);
6004 TSI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>().set(
6005 ElaboratedKeywordLoc, QualifierLoc, TemplateKeywordLoc, NameLoc,
6006 TAL: SpecifiedArgs);
6007 return TSI;
6008}
6009
6010QualType ASTContext::getTemplateSpecializationType(
6011 ElaboratedTypeKeyword Keyword, TemplateName Template,
6012 ArrayRef<TemplateArgumentLoc> SpecifiedArgs,
6013 ArrayRef<TemplateArgument> CanonicalArgs, QualType Underlying) const {
6014 SmallVector<TemplateArgument, 4> SpecifiedArgVec;
6015 SpecifiedArgVec.reserve(N: SpecifiedArgs.size());
6016 for (const TemplateArgumentLoc &Arg : SpecifiedArgs)
6017 SpecifiedArgVec.push_back(Elt: Arg.getArgument());
6018
6019 return getTemplateSpecializationType(Keyword, T: Template, SpecifiedArgs: SpecifiedArgVec,
6020 CanonicalArgs, Underlying);
6021}
6022
6023[[maybe_unused]] static bool
6024hasAnyPackExpansions(ArrayRef<TemplateArgument> Args) {
6025 for (const TemplateArgument &Arg : Args)
6026 if (Arg.isPackExpansion())
6027 return true;
6028 return false;
6029}
6030
6031QualType ASTContext::getCanonicalTemplateSpecializationType(
6032 ElaboratedTypeKeyword Keyword, TemplateName Template,
6033 ArrayRef<TemplateArgument> Args) const {
6034 assert(Template ==
6035 getCanonicalTemplateName(Template, /*IgnoreDeduced=*/true));
6036 assert((Keyword == ElaboratedTypeKeyword::None ||
6037 Template.getAsDependentTemplateName()));
6038#ifndef NDEBUG
6039 for (const auto &Arg : Args)
6040 assert(Arg.structurallyEquals(getCanonicalTemplateArgument(Arg)));
6041#endif
6042
6043 llvm::FoldingSetNodeID ID;
6044 TemplateSpecializationType::Profile(ID, Keyword, T: Template, Args, Underlying: QualType(),
6045 Context: *this);
6046 void *InsertPos = nullptr;
6047 if (auto *T = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos))
6048 return QualType(T, 0);
6049
6050 void *Mem = Allocate(Size: sizeof(TemplateSpecializationType) +
6051 sizeof(TemplateArgument) * Args.size(),
6052 Align: alignof(TemplateSpecializationType));
6053 auto *Spec =
6054 new (Mem) TemplateSpecializationType(Keyword, Template,
6055 /*IsAlias=*/false, Args, QualType());
6056 assert(Spec->isDependentType() &&
6057 "canonical template specialization must be dependent");
6058 Types.push_back(Elt: Spec);
6059 TemplateSpecializationTypes.InsertNode(N: Spec, InsertPos);
6060 return QualType(Spec, 0);
6061}
6062
6063QualType ASTContext::getTemplateSpecializationType(
6064 ElaboratedTypeKeyword Keyword, TemplateName Template,
6065 ArrayRef<TemplateArgument> SpecifiedArgs,
6066 ArrayRef<TemplateArgument> CanonicalArgs, QualType Underlying) const {
6067 const auto *TD = Template.getAsTemplateDecl(/*IgnoreDeduced=*/true);
6068 bool IsTypeAlias = TD && TD->isTypeAlias();
6069 if (Underlying.isNull()) {
6070 TemplateName CanonTemplate =
6071 getCanonicalTemplateName(Name: Template, /*IgnoreDeduced=*/true);
6072 ElaboratedTypeKeyword CanonKeyword =
6073 CanonTemplate.getAsDependentTemplateName()
6074 ? getCanonicalElaboratedTypeKeyword(Keyword)
6075 : ElaboratedTypeKeyword::None;
6076 bool NonCanonical = Template != CanonTemplate || Keyword != CanonKeyword;
6077 SmallVector<TemplateArgument, 4> CanonArgsVec;
6078 if (CanonicalArgs.empty()) {
6079 CanonArgsVec = SmallVector<TemplateArgument, 4>(SpecifiedArgs);
6080 NonCanonical |= canonicalizeTemplateArguments(Args: CanonArgsVec);
6081 CanonicalArgs = CanonArgsVec;
6082 } else {
6083 NonCanonical |= !llvm::equal(
6084 LRange&: SpecifiedArgs, RRange&: CanonicalArgs,
6085 P: [](const TemplateArgument &A, const TemplateArgument &B) {
6086 return A.structurallyEquals(Other: B);
6087 });
6088 }
6089
6090 // We can get here with an alias template when the specialization
6091 // contains a pack expansion that does not match up with a parameter
6092 // pack, or a builtin template which cannot be resolved due to dependency.
6093 assert((!isa_and_nonnull<TypeAliasTemplateDecl>(TD) ||
6094 hasAnyPackExpansions(CanonicalArgs)) &&
6095 "Caller must compute aliased type");
6096 IsTypeAlias = false;
6097
6098 Underlying = getCanonicalTemplateSpecializationType(
6099 Keyword: CanonKeyword, Template: CanonTemplate, Args: CanonicalArgs);
6100 if (!NonCanonical)
6101 return Underlying;
6102 }
6103 void *Mem = Allocate(Size: sizeof(TemplateSpecializationType) +
6104 sizeof(TemplateArgument) * SpecifiedArgs.size() +
6105 (IsTypeAlias ? sizeof(QualType) : 0),
6106 Align: alignof(TemplateSpecializationType));
6107 auto *Spec = new (Mem) TemplateSpecializationType(
6108 Keyword, Template, IsTypeAlias, SpecifiedArgs, Underlying);
6109 Types.push_back(Elt: Spec);
6110 return QualType(Spec, 0);
6111}
6112
6113QualType
6114ASTContext::getParenType(QualType InnerType) const {
6115 llvm::FoldingSetNodeID ID;
6116 ParenType::Profile(ID, Inner: InnerType);
6117
6118 void *InsertPos = nullptr;
6119 ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
6120 if (T)
6121 return QualType(T, 0);
6122
6123 QualType Canon = InnerType;
6124 if (!Canon.isCanonical()) {
6125 Canon = getCanonicalType(T: InnerType);
6126 ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
6127 assert(!CheckT && "Paren canonical type broken");
6128 (void)CheckT;
6129 }
6130
6131 T = new (*this, alignof(ParenType)) ParenType(InnerType, Canon);
6132 Types.push_back(Elt: T);
6133 ParenTypes.InsertNode(N: T, InsertPos);
6134 return QualType(T, 0);
6135}
6136
6137QualType
6138ASTContext::getMacroQualifiedType(QualType UnderlyingTy,
6139 const IdentifierInfo *MacroII) const {
6140 QualType Canon = UnderlyingTy;
6141 if (!Canon.isCanonical())
6142 Canon = getCanonicalType(T: UnderlyingTy);
6143
6144 auto *newType = new (*this, alignof(MacroQualifiedType))
6145 MacroQualifiedType(UnderlyingTy, Canon, MacroII);
6146 Types.push_back(Elt: newType);
6147 return QualType(newType, 0);
6148}
6149
6150QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword,
6151 NestedNameSpecifier NNS,
6152 const IdentifierInfo *Name) const {
6153 llvm::FoldingSetNodeID ID;
6154 DependentNameType::Profile(ID, Keyword, NNS, Name);
6155
6156 void *InsertPos = nullptr;
6157 if (DependentNameType *T =
6158 DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos))
6159 return QualType(T, 0);
6160
6161 ElaboratedTypeKeyword CanonKeyword =
6162 getCanonicalElaboratedTypeKeyword(Keyword);
6163 NestedNameSpecifier CanonNNS = NNS.getCanonical();
6164
6165 QualType Canon;
6166 if (CanonKeyword != Keyword || CanonNNS != NNS) {
6167 Canon = getDependentNameType(Keyword: CanonKeyword, NNS: CanonNNS, Name);
6168 [[maybe_unused]] DependentNameType *T =
6169 DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
6170 assert(!T && "broken canonicalization");
6171 assert(Canon.isCanonical());
6172 }
6173
6174 DependentNameType *T = new (*this, alignof(DependentNameType))
6175 DependentNameType(Keyword, NNS, Name, Canon);
6176 Types.push_back(Elt: T);
6177 DependentNameTypes.InsertNode(N: T, InsertPos);
6178 return QualType(T, 0);
6179}
6180
6181TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) const {
6182 TemplateArgument Arg;
6183 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Val: Param)) {
6184 QualType ArgType = getTypeDeclType(Decl: TTP);
6185 if (TTP->isParameterPack())
6186 ArgType = getPackExpansionType(Pattern: ArgType, NumExpansions: std::nullopt);
6187
6188 Arg = TemplateArgument(ArgType);
6189 } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Val: Param)) {
6190 QualType T =
6191 NTTP->getType().getNonPackExpansionType().getNonLValueExprType(Context: *this);
6192 // For class NTTPs, ensure we include the 'const' so the type matches that
6193 // of a real template argument.
6194 // FIXME: It would be more faithful to model this as something like an
6195 // lvalue-to-rvalue conversion applied to a const-qualified lvalue.
6196 ExprValueKind VK;
6197 if (T->isRecordType()) {
6198 // C++ [temp.param]p8: An id-expression naming a non-type
6199 // template-parameter of class type T denotes a static storage duration
6200 // object of type const T.
6201 T.addConst();
6202 VK = VK_LValue;
6203 } else {
6204 VK = Expr::getValueKindForType(T: NTTP->getType());
6205 }
6206 Expr *E = new (*this)
6207 DeclRefExpr(*this, NTTP, /*RefersToEnclosingVariableOrCapture=*/false,
6208 T, VK, NTTP->getLocation());
6209
6210 if (NTTP->isParameterPack())
6211 E = new (*this) PackExpansionExpr(E, NTTP->getLocation(), std::nullopt);
6212 Arg = TemplateArgument(E, /*IsCanonical=*/false);
6213 } else {
6214 auto *TTP = cast<TemplateTemplateParmDecl>(Val: Param);
6215 TemplateName Name = getQualifiedTemplateName(
6216 /*Qualifier=*/std::nullopt, /*TemplateKeyword=*/false,
6217 Template: TemplateName(TTP));
6218 if (TTP->isParameterPack())
6219 Arg = TemplateArgument(Name, /*NumExpansions=*/std::nullopt);
6220 else
6221 Arg = TemplateArgument(Name);
6222 }
6223
6224 if (Param->isTemplateParameterPack())
6225 Arg =
6226 TemplateArgument::CreatePackCopy(Context&: const_cast<ASTContext &>(*this), Args: Arg);
6227
6228 return Arg;
6229}
6230
6231QualType ASTContext::getPackExpansionType(QualType Pattern,
6232 UnsignedOrNone NumExpansions,
6233 bool ExpectPackInType) const {
6234 assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) &&
6235 "Pack expansions must expand one or more parameter packs");
6236
6237 llvm::FoldingSetNodeID ID;
6238 PackExpansionType::Profile(ID, Pattern, NumExpansions);
6239
6240 void *InsertPos = nullptr;
6241 PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
6242 if (T)
6243 return QualType(T, 0);
6244
6245 QualType Canon;
6246 if (!Pattern.isCanonical()) {
6247 Canon = getPackExpansionType(Pattern: getCanonicalType(T: Pattern), NumExpansions,
6248 /*ExpectPackInType=*/false);
6249
6250 // Find the insert position again, in case we inserted an element into
6251 // PackExpansionTypes and invalidated our insert position.
6252 PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
6253 }
6254
6255 T = new (*this, alignof(PackExpansionType))
6256 PackExpansionType(Pattern, Canon, NumExpansions);
6257 Types.push_back(Elt: T);
6258 PackExpansionTypes.InsertNode(N: T, InsertPos);
6259 return QualType(T, 0);
6260}
6261
6262/// CmpProtocolNames - Comparison predicate for sorting protocols
6263/// alphabetically.
6264static int CmpProtocolNames(ObjCProtocolDecl *const *LHS,
6265 ObjCProtocolDecl *const *RHS) {
6266 return DeclarationName::compare(LHS: (*LHS)->getDeclName(), RHS: (*RHS)->getDeclName());
6267}
6268
6269static bool areSortedAndUniqued(ArrayRef<ObjCProtocolDecl *> Protocols) {
6270 if (Protocols.empty()) return true;
6271
6272 if (Protocols[0]->getCanonicalDecl() != Protocols[0])
6273 return false;
6274
6275 for (unsigned i = 1; i != Protocols.size(); ++i)
6276 if (CmpProtocolNames(LHS: &Protocols[i - 1], RHS: &Protocols[i]) >= 0 ||
6277 Protocols[i]->getCanonicalDecl() != Protocols[i])
6278 return false;
6279 return true;
6280}
6281
6282static void
6283SortAndUniqueProtocols(SmallVectorImpl<ObjCProtocolDecl *> &Protocols) {
6284 // Sort protocols, keyed by name.
6285 llvm::array_pod_sort(Start: Protocols.begin(), End: Protocols.end(), Compare: CmpProtocolNames);
6286
6287 // Canonicalize.
6288 for (ObjCProtocolDecl *&P : Protocols)
6289 P = P->getCanonicalDecl();
6290
6291 // Remove duplicates.
6292 auto ProtocolsEnd = llvm::unique(R&: Protocols);
6293 Protocols.erase(CS: ProtocolsEnd, CE: Protocols.end());
6294}
6295
6296QualType ASTContext::getObjCObjectType(QualType BaseType,
6297 ObjCProtocolDecl * const *Protocols,
6298 unsigned NumProtocols) const {
6299 return getObjCObjectType(Base: BaseType, typeArgs: {}, protocols: ArrayRef(Protocols, NumProtocols),
6300 /*isKindOf=*/false);
6301}
6302
6303QualType ASTContext::getObjCObjectType(
6304 QualType baseType,
6305 ArrayRef<QualType> typeArgs,
6306 ArrayRef<ObjCProtocolDecl *> protocols,
6307 bool isKindOf) const {
6308 // If the base type is an interface and there aren't any protocols or
6309 // type arguments to add, then the interface type will do just fine.
6310 if (typeArgs.empty() && protocols.empty() && !isKindOf &&
6311 isa<ObjCInterfaceType>(Val: baseType))
6312 return baseType;
6313
6314 // Look in the folding set for an existing type.
6315 llvm::FoldingSetNodeID ID;
6316 ObjCObjectTypeImpl::Profile(ID, Base: baseType, typeArgs, protocols, isKindOf);
6317 void *InsertPos = nullptr;
6318 if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos))
6319 return QualType(QT, 0);
6320
6321 // Determine the type arguments to be used for canonicalization,
6322 // which may be explicitly specified here or written on the base
6323 // type.
6324 ArrayRef<QualType> effectiveTypeArgs = typeArgs;
6325 if (effectiveTypeArgs.empty()) {
6326 if (const auto *baseObject = baseType->getAs<ObjCObjectType>())
6327 effectiveTypeArgs = baseObject->getTypeArgs();
6328 }
6329
6330 // Build the canonical type, which has the canonical base type and a
6331 // sorted-and-uniqued list of protocols and the type arguments
6332 // canonicalized.
6333 QualType canonical;
6334 bool typeArgsAreCanonical = llvm::all_of(
6335 Range&: effectiveTypeArgs, P: [&](QualType type) { return type.isCanonical(); });
6336 bool protocolsSorted = areSortedAndUniqued(Protocols: protocols);
6337 if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) {
6338 // Determine the canonical type arguments.
6339 ArrayRef<QualType> canonTypeArgs;
6340 SmallVector<QualType, 4> canonTypeArgsVec;
6341 if (!typeArgsAreCanonical) {
6342 canonTypeArgsVec.reserve(N: effectiveTypeArgs.size());
6343 for (auto typeArg : effectiveTypeArgs)
6344 canonTypeArgsVec.push_back(Elt: getCanonicalType(T: typeArg));
6345 canonTypeArgs = canonTypeArgsVec;
6346 } else {
6347 canonTypeArgs = effectiveTypeArgs;
6348 }
6349
6350 ArrayRef<ObjCProtocolDecl *> canonProtocols;
6351 SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec;
6352 if (!protocolsSorted) {
6353 canonProtocolsVec.append(in_start: protocols.begin(), in_end: protocols.end());
6354 SortAndUniqueProtocols(Protocols&: canonProtocolsVec);
6355 canonProtocols = canonProtocolsVec;
6356 } else {
6357 canonProtocols = protocols;
6358 }
6359
6360 canonical = getObjCObjectType(baseType: getCanonicalType(T: baseType), typeArgs: canonTypeArgs,
6361 protocols: canonProtocols, isKindOf);
6362
6363 // Regenerate InsertPos.
6364 ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos);
6365 }
6366
6367 unsigned size = sizeof(ObjCObjectTypeImpl);
6368 size += typeArgs.size() * sizeof(QualType);
6369 size += protocols.size() * sizeof(ObjCProtocolDecl *);
6370 void *mem = Allocate(Size: size, Align: alignof(ObjCObjectTypeImpl));
6371 auto *T =
6372 new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols,
6373 isKindOf);
6374
6375 Types.push_back(Elt: T);
6376 ObjCObjectTypes.InsertNode(N: T, InsertPos);
6377 return QualType(T, 0);
6378}
6379
6380/// Apply Objective-C protocol qualifiers to the given type.
6381/// If this is for the canonical type of a type parameter, we can apply
6382/// protocol qualifiers on the ObjCObjectPointerType.
6383QualType
6384ASTContext::applyObjCProtocolQualifiers(QualType type,
6385 ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError,
6386 bool allowOnPointerType) const {
6387 hasError = false;
6388
6389 if (const auto *objT = dyn_cast<ObjCTypeParamType>(Val: type.getTypePtr())) {
6390 return getObjCTypeParamType(Decl: objT->getDecl(), protocols);
6391 }
6392
6393 // Apply protocol qualifiers to ObjCObjectPointerType.
6394 if (allowOnPointerType) {
6395 if (const auto *objPtr =
6396 dyn_cast<ObjCObjectPointerType>(Val: type.getTypePtr())) {
6397 const ObjCObjectType *objT = objPtr->getObjectType();
6398 // Merge protocol lists and construct ObjCObjectType.
6399 SmallVector<ObjCProtocolDecl*, 8> protocolsVec;
6400 protocolsVec.append(in_start: objT->qual_begin(),
6401 in_end: objT->qual_end());
6402 protocolsVec.append(in_start: protocols.begin(), in_end: protocols.end());
6403 ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec;
6404 type = getObjCObjectType(
6405 baseType: objT->getBaseType(),
6406 typeArgs: objT->getTypeArgsAsWritten(),
6407 protocols,
6408 isKindOf: objT->isKindOfTypeAsWritten());
6409 return getObjCObjectPointerType(OIT: type);
6410 }
6411 }
6412
6413 // Apply protocol qualifiers to ObjCObjectType.
6414 if (const auto *objT = dyn_cast<ObjCObjectType>(Val: type.getTypePtr())){
6415 // FIXME: Check for protocols to which the class type is already
6416 // known to conform.
6417
6418 return getObjCObjectType(baseType: objT->getBaseType(),
6419 typeArgs: objT->getTypeArgsAsWritten(),
6420 protocols,
6421 isKindOf: objT->isKindOfTypeAsWritten());
6422 }
6423
6424 // If the canonical type is ObjCObjectType, ...
6425 if (type->isObjCObjectType()) {
6426 // Silently overwrite any existing protocol qualifiers.
6427 // TODO: determine whether that's the right thing to do.
6428
6429 // FIXME: Check for protocols to which the class type is already
6430 // known to conform.
6431 return getObjCObjectType(baseType: type, typeArgs: {}, protocols, isKindOf: false);
6432 }
6433
6434 // id<protocol-list>
6435 if (type->isObjCIdType()) {
6436 const auto *objPtr = type->castAs<ObjCObjectPointerType>();
6437 type = getObjCObjectType(baseType: ObjCBuiltinIdTy, typeArgs: {}, protocols,
6438 isKindOf: objPtr->isKindOfType());
6439 return getObjCObjectPointerType(OIT: type);
6440 }
6441
6442 // Class<protocol-list>
6443 if (type->isObjCClassType()) {
6444 const auto *objPtr = type->castAs<ObjCObjectPointerType>();
6445 type = getObjCObjectType(baseType: ObjCBuiltinClassTy, typeArgs: {}, protocols,
6446 isKindOf: objPtr->isKindOfType());
6447 return getObjCObjectPointerType(OIT: type);
6448 }
6449
6450 hasError = true;
6451 return type;
6452}
6453
6454QualType
6455ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl,
6456 ArrayRef<ObjCProtocolDecl *> protocols) const {
6457 // Look in the folding set for an existing type.
6458 llvm::FoldingSetNodeID ID;
6459 ObjCTypeParamType::Profile(ID, OTPDecl: Decl, CanonicalType: Decl->getUnderlyingType(), protocols);
6460 void *InsertPos = nullptr;
6461 if (ObjCTypeParamType *TypeParam =
6462 ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos))
6463 return QualType(TypeParam, 0);
6464
6465 // We canonicalize to the underlying type.
6466 QualType Canonical = getCanonicalType(T: Decl->getUnderlyingType());
6467 if (!protocols.empty()) {
6468 // Apply the protocol qualifers.
6469 bool hasError;
6470 Canonical = getCanonicalType(T: applyObjCProtocolQualifiers(
6471 type: Canonical, protocols, hasError, allowOnPointerType: true /*allowOnPointerType*/));
6472 assert(!hasError && "Error when apply protocol qualifier to bound type");
6473 }
6474
6475 unsigned size = sizeof(ObjCTypeParamType);
6476 size += protocols.size() * sizeof(ObjCProtocolDecl *);
6477 void *mem = Allocate(Size: size, Align: alignof(ObjCTypeParamType));
6478 auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols);
6479
6480 Types.push_back(Elt: newType);
6481 ObjCTypeParamTypes.InsertNode(N: newType, InsertPos);
6482 return QualType(newType, 0);
6483}
6484
6485void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig,
6486 ObjCTypeParamDecl *New) const {
6487 New->setTypeSourceInfo(getTrivialTypeSourceInfo(T: Orig->getUnderlyingType()));
6488 // Update TypeForDecl after updating TypeSourceInfo.
6489 auto *NewTypeParamTy = cast<ObjCTypeParamType>(Val: New->TypeForDecl);
6490 SmallVector<ObjCProtocolDecl *, 8> protocols;
6491 protocols.append(in_start: NewTypeParamTy->qual_begin(), in_end: NewTypeParamTy->qual_end());
6492 QualType UpdatedTy = getObjCTypeParamType(Decl: New, protocols);
6493 New->TypeForDecl = UpdatedTy.getTypePtr();
6494}
6495
6496/// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's
6497/// protocol list adopt all protocols in QT's qualified-id protocol
6498/// list.
6499bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT,
6500 ObjCInterfaceDecl *IC) {
6501 if (!QT->isObjCQualifiedIdType())
6502 return false;
6503
6504 if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) {
6505 // If both the right and left sides have qualifiers.
6506 for (auto *Proto : OPT->quals()) {
6507 if (!IC->ClassImplementsProtocol(lProto: Proto, lookupCategory: false))
6508 return false;
6509 }
6510 return true;
6511 }
6512 return false;
6513}
6514
6515/// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in
6516/// QT's qualified-id protocol list adopt all protocols in IDecl's list
6517/// of protocols.
6518bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT,
6519 ObjCInterfaceDecl *IDecl) {
6520 if (!QT->isObjCQualifiedIdType())
6521 return false;
6522 const auto *OPT = QT->getAs<ObjCObjectPointerType>();
6523 if (!OPT)
6524 return false;
6525 if (!IDecl->hasDefinition())
6526 return false;
6527 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols;
6528 CollectInheritedProtocols(CDecl: IDecl, Protocols&: InheritedProtocols);
6529 if (InheritedProtocols.empty())
6530 return false;
6531 // Check that if every protocol in list of id<plist> conforms to a protocol
6532 // of IDecl's, then bridge casting is ok.
6533 bool Conforms = false;
6534 for (auto *Proto : OPT->quals()) {
6535 Conforms = false;
6536 for (auto *PI : InheritedProtocols) {
6537 if (ProtocolCompatibleWithProtocol(lProto: Proto, rProto: PI)) {
6538 Conforms = true;
6539 break;
6540 }
6541 }
6542 if (!Conforms)
6543 break;
6544 }
6545 if (Conforms)
6546 return true;
6547
6548 for (auto *PI : InheritedProtocols) {
6549 // If both the right and left sides have qualifiers.
6550 bool Adopts = false;
6551 for (auto *Proto : OPT->quals()) {
6552 // return 'true' if 'PI' is in the inheritance hierarchy of Proto
6553 if ((Adopts = ProtocolCompatibleWithProtocol(lProto: PI, rProto: Proto)))
6554 break;
6555 }
6556 if (!Adopts)
6557 return false;
6558 }
6559 return true;
6560}
6561
6562/// getObjCObjectPointerType - Return a ObjCObjectPointerType type for
6563/// the given object type.
6564QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const {
6565 llvm::FoldingSetNodeID ID;
6566 ObjCObjectPointerType::Profile(ID, T: ObjectT);
6567
6568 void *InsertPos = nullptr;
6569 if (ObjCObjectPointerType *QT =
6570 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
6571 return QualType(QT, 0);
6572
6573 // Find the canonical object type.
6574 QualType Canonical;
6575 if (!ObjectT.isCanonical()) {
6576 Canonical = getObjCObjectPointerType(ObjectT: getCanonicalType(T: ObjectT));
6577
6578 // Regenerate InsertPos.
6579 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
6580 }
6581
6582 // No match.
6583 void *Mem =
6584 Allocate(Size: sizeof(ObjCObjectPointerType), Align: alignof(ObjCObjectPointerType));
6585 auto *QType =
6586 new (Mem) ObjCObjectPointerType(Canonical, ObjectT);
6587
6588 Types.push_back(Elt: QType);
6589 ObjCObjectPointerTypes.InsertNode(N: QType, InsertPos);
6590 return QualType(QType, 0);
6591}
6592
6593/// getObjCInterfaceType - Return the unique reference to the type for the
6594/// specified ObjC interface decl. The list of protocols is optional.
6595QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl,
6596 ObjCInterfaceDecl *PrevDecl) const {
6597 if (Decl->TypeForDecl)
6598 return QualType(Decl->TypeForDecl, 0);
6599
6600 if (PrevDecl) {
6601 assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl");
6602 Decl->TypeForDecl = PrevDecl->TypeForDecl;
6603 return QualType(PrevDecl->TypeForDecl, 0);
6604 }
6605
6606 // Prefer the definition, if there is one.
6607 if (const ObjCInterfaceDecl *Def = Decl->getDefinition())
6608 Decl = Def;
6609
6610 void *Mem = Allocate(Size: sizeof(ObjCInterfaceType), Align: alignof(ObjCInterfaceType));
6611 auto *T = new (Mem) ObjCInterfaceType(Decl);
6612 Decl->TypeForDecl = T;
6613 Types.push_back(Elt: T);
6614 return QualType(T, 0);
6615}
6616
6617/// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique
6618/// TypeOfExprType AST's (since expression's are never shared). For example,
6619/// multiple declarations that refer to "typeof(x)" all contain different
6620/// DeclRefExpr's. This doesn't effect the type checker, since it operates
6621/// on canonical type's (which are always unique).
6622QualType ASTContext::getTypeOfExprType(Expr *tofExpr, TypeOfKind Kind) const {
6623 TypeOfExprType *toe;
6624 if (tofExpr->isTypeDependent()) {
6625 llvm::FoldingSetNodeID ID;
6626 DependentTypeOfExprType::Profile(ID, Context: *this, E: tofExpr,
6627 IsUnqual: Kind == TypeOfKind::Unqualified);
6628
6629 void *InsertPos = nullptr;
6630 DependentTypeOfExprType *Canon =
6631 DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos);
6632 if (Canon) {
6633 // We already have a "canonical" version of an identical, dependent
6634 // typeof(expr) type. Use that as our canonical type.
6635 toe = new (*this, alignof(TypeOfExprType)) TypeOfExprType(
6636 *this, tofExpr, Kind, QualType((TypeOfExprType *)Canon, 0));
6637 } else {
6638 // Build a new, canonical typeof(expr) type.
6639 Canon = new (*this, alignof(DependentTypeOfExprType))
6640 DependentTypeOfExprType(*this, tofExpr, Kind);
6641 DependentTypeOfExprTypes.InsertNode(N: Canon, InsertPos);
6642 toe = Canon;
6643 }
6644 } else {
6645 QualType Canonical = getCanonicalType(T: tofExpr->getType());
6646 toe = new (*this, alignof(TypeOfExprType))
6647 TypeOfExprType(*this, tofExpr, Kind, Canonical);
6648 }
6649 Types.push_back(Elt: toe);
6650 return QualType(toe, 0);
6651}
6652
6653/// getTypeOfType - Unlike many "get<Type>" functions, we don't unique
6654/// TypeOfType nodes. The only motivation to unique these nodes would be
6655/// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be
6656/// an issue. This doesn't affect the type checker, since it operates
6657/// on canonical types (which are always unique).
6658QualType ASTContext::getTypeOfType(QualType tofType, TypeOfKind Kind) const {
6659 QualType Canonical = getCanonicalType(T: tofType);
6660 auto *tot = new (*this, alignof(TypeOfType))
6661 TypeOfType(*this, tofType, Canonical, Kind);
6662 Types.push_back(Elt: tot);
6663 return QualType(tot, 0);
6664}
6665
6666/// getReferenceQualifiedType - Given an expr, will return the type for
6667/// that expression, as in [dcl.type.simple]p4 but without taking id-expressions
6668/// and class member access into account.
6669QualType ASTContext::getReferenceQualifiedType(const Expr *E) const {
6670 // C++11 [dcl.type.simple]p4:
6671 // [...]
6672 QualType T = E->getType();
6673 switch (E->getValueKind()) {
6674 // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the
6675 // type of e;
6676 case VK_XValue:
6677 return getRValueReferenceType(T);
6678 // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the
6679 // type of e;
6680 case VK_LValue:
6681 return getLValueReferenceType(T);
6682 // - otherwise, decltype(e) is the type of e.
6683 case VK_PRValue:
6684 return T;
6685 }
6686 llvm_unreachable("Unknown value kind");
6687}
6688
6689/// Unlike many "get<Type>" functions, we don't unique DecltypeType
6690/// nodes. This would never be helpful, since each such type has its own
6691/// expression, and would not give a significant memory saving, since there
6692/// is an Expr tree under each such type.
6693QualType ASTContext::getDecltypeType(Expr *E, QualType UnderlyingType) const {
6694 // C++11 [temp.type]p2:
6695 // If an expression e involves a template parameter, decltype(e) denotes a
6696 // unique dependent type. Two such decltype-specifiers refer to the same
6697 // type only if their expressions are equivalent (14.5.6.1).
6698 QualType CanonType;
6699 if (!E->isInstantiationDependent()) {
6700 CanonType = getCanonicalType(T: UnderlyingType);
6701 } else if (!UnderlyingType.isNull()) {
6702 CanonType = getDecltypeType(E, UnderlyingType: QualType());
6703 } else {
6704 llvm::FoldingSetNodeID ID;
6705 DependentDecltypeType::Profile(ID, Context: *this, E);
6706
6707 void *InsertPos = nullptr;
6708 if (DependentDecltypeType *Canon =
6709 DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos))
6710 return QualType(Canon, 0);
6711
6712 // Build a new, canonical decltype(expr) type.
6713 auto *DT =
6714 new (*this, alignof(DependentDecltypeType)) DependentDecltypeType(E);
6715 DependentDecltypeTypes.InsertNode(N: DT, InsertPos);
6716 Types.push_back(Elt: DT);
6717 return QualType(DT, 0);
6718 }
6719 auto *DT = new (*this, alignof(DecltypeType))
6720 DecltypeType(E, UnderlyingType, CanonType);
6721 Types.push_back(Elt: DT);
6722 return QualType(DT, 0);
6723}
6724
6725QualType ASTContext::getPackIndexingType(QualType Pattern, Expr *IndexExpr,
6726 bool FullySubstituted,
6727 ArrayRef<QualType> Expansions,
6728 UnsignedOrNone Index) const {
6729 QualType Canonical;
6730 if (FullySubstituted && Index) {
6731 Canonical = getCanonicalType(T: Expansions[*Index]);
6732 } else {
6733 llvm::FoldingSetNodeID ID;
6734 PackIndexingType::Profile(ID, Context: *this, Pattern: Pattern.getCanonicalType(), E: IndexExpr,
6735 FullySubstituted, Expansions);
6736 void *InsertPos = nullptr;
6737 PackIndexingType *Canon =
6738 DependentPackIndexingTypes.FindNodeOrInsertPos(ID, InsertPos);
6739 if (!Canon) {
6740 void *Mem = Allocate(
6741 Size: PackIndexingType::totalSizeToAlloc<QualType>(Counts: Expansions.size()),
6742 Align: TypeAlignment);
6743 Canon =
6744 new (Mem) PackIndexingType(QualType(), Pattern.getCanonicalType(),
6745 IndexExpr, FullySubstituted, Expansions);
6746 DependentPackIndexingTypes.InsertNode(N: Canon, InsertPos);
6747 }
6748 Canonical = QualType(Canon, 0);
6749 }
6750
6751 void *Mem =
6752 Allocate(Size: PackIndexingType::totalSizeToAlloc<QualType>(Counts: Expansions.size()),
6753 Align: TypeAlignment);
6754 auto *T = new (Mem) PackIndexingType(Canonical, Pattern, IndexExpr,
6755 FullySubstituted, Expansions);
6756 Types.push_back(Elt: T);
6757 return QualType(T, 0);
6758}
6759
6760/// getUnaryTransformationType - We don't unique these, since the memory
6761/// savings are minimal and these are rare.
6762QualType
6763ASTContext::getUnaryTransformType(QualType BaseType, QualType UnderlyingType,
6764 UnaryTransformType::UTTKind Kind) const {
6765
6766 llvm::FoldingSetNodeID ID;
6767 UnaryTransformType::Profile(ID, BaseType, UnderlyingType, UKind: Kind);
6768
6769 void *InsertPos = nullptr;
6770 if (UnaryTransformType *UT =
6771 UnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos))
6772 return QualType(UT, 0);
6773
6774 QualType CanonType;
6775 if (!BaseType->isDependentType()) {
6776 CanonType = UnderlyingType.getCanonicalType();
6777 } else {
6778 assert(UnderlyingType.isNull() || BaseType == UnderlyingType);
6779 UnderlyingType = QualType();
6780 if (QualType CanonBase = BaseType.getCanonicalType();
6781 BaseType != CanonBase) {
6782 CanonType = getUnaryTransformType(BaseType: CanonBase, UnderlyingType: QualType(), Kind);
6783 assert(CanonType.isCanonical());
6784
6785 // Find the insertion position again.
6786 [[maybe_unused]] UnaryTransformType *UT =
6787 UnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos);
6788 assert(!UT && "broken canonicalization");
6789 }
6790 }
6791
6792 auto *UT = new (*this, alignof(UnaryTransformType))
6793 UnaryTransformType(BaseType, UnderlyingType, Kind, CanonType);
6794 UnaryTransformTypes.InsertNode(N: UT, InsertPos);
6795 Types.push_back(Elt: UT);
6796 return QualType(UT, 0);
6797}
6798
6799/// getAutoType - Return the uniqued reference to the 'auto' type which has been
6800/// deduced to the given type, or to the canonical undeduced 'auto' type, or the
6801/// canonical deduced-but-dependent 'auto' type.
6802QualType
6803ASTContext::getAutoType(DeducedKind DK, QualType DeducedAsType,
6804 AutoTypeKeyword Keyword,
6805 TemplateDecl *TypeConstraintConcept,
6806 ArrayRef<TemplateArgument> TypeConstraintArgs) const {
6807 if (DK == DeducedKind::Undeduced && Keyword == AutoTypeKeyword::Auto &&
6808 !TypeConstraintConcept) {
6809 assert(DeducedAsType.isNull() && "");
6810 assert(TypeConstraintArgs.empty() && "");
6811 return getAutoDeductType();
6812 }
6813
6814 // Look in the folding set for an existing type.
6815 llvm::FoldingSetNodeID ID;
6816 AutoType::Profile(ID, Context: *this, DK, Deduced: DeducedAsType, Keyword,
6817 CD: TypeConstraintConcept, Arguments: TypeConstraintArgs);
6818 if (auto const AT_iter = AutoTypes.find(Val: ID); AT_iter != AutoTypes.end())
6819 return QualType(AT_iter->getSecond(), 0);
6820
6821 if (DK == DeducedKind::Deduced) {
6822 assert(!DeducedAsType.isNull() && "deduced type must be provided");
6823 } else {
6824 assert(DeducedAsType.isNull() && "deduced type must not be provided");
6825 if (TypeConstraintConcept) {
6826 bool AnyNonCanonArgs = false;
6827 auto *CanonicalConcept =
6828 cast<TemplateDecl>(Val: TypeConstraintConcept->getCanonicalDecl());
6829 auto CanonicalConceptArgs = ::getCanonicalTemplateArguments(
6830 C: *this, Args: TypeConstraintArgs, AnyNonCanonArgs);
6831 if (TypeConstraintConcept != CanonicalConcept || AnyNonCanonArgs)
6832 DeducedAsType = getAutoType(DK, DeducedAsType: QualType(), Keyword, TypeConstraintConcept: CanonicalConcept,
6833 TypeConstraintArgs: CanonicalConceptArgs);
6834 }
6835 }
6836
6837 void *Mem = Allocate(Size: sizeof(AutoType) +
6838 sizeof(TemplateArgument) * TypeConstraintArgs.size(),
6839 Align: alignof(AutoType));
6840 auto *AT = new (Mem) AutoType(DK, DeducedAsType, Keyword,
6841 TypeConstraintConcept, TypeConstraintArgs);
6842#ifndef NDEBUG
6843 llvm::FoldingSetNodeID InsertedID;
6844 AT->Profile(InsertedID, *this);
6845 assert(InsertedID == ID && "ID does not match");
6846#endif
6847 Types.push_back(Elt: AT);
6848 AutoTypes.try_emplace(Key: ID, Args&: AT);
6849 return QualType(AT, 0);
6850}
6851
6852QualType ASTContext::getUnconstrainedType(QualType T) const {
6853 QualType CanonT = T.getNonPackExpansionType().getCanonicalType();
6854
6855 // Remove a type-constraint from a top-level auto or decltype(auto).
6856 if (auto *AT = CanonT->getAs<AutoType>()) {
6857 if (!AT->isConstrained())
6858 return T;
6859 return getQualifiedType(
6860 T: getAutoType(DK: AT->getDeducedKind(), DeducedAsType: QualType(), Keyword: AT->getKeyword()),
6861 Qs: T.getQualifiers());
6862 }
6863
6864 // FIXME: We only support constrained auto at the top level in the type of a
6865 // non-type template parameter at the moment. Once we lift that restriction,
6866 // we'll need to recursively build types containing auto here.
6867 assert(!CanonT->getContainedAutoType() ||
6868 !CanonT->getContainedAutoType()->isConstrained());
6869 return T;
6870}
6871
6872/// Return the uniqued reference to the deduced template specialization type
6873/// which has been deduced to the given type, or to the canonical undeduced
6874/// such type, or the canonical deduced-but-dependent such type.
6875QualType ASTContext::getDeducedTemplateSpecializationType(
6876 DeducedKind DK, QualType DeducedAsType, ElaboratedTypeKeyword Keyword,
6877 TemplateName Template) const {
6878 // DeducedAsPack only ever occurs for lambda init-capture pack, which always
6879 // use AutoType.
6880 assert(DK != DeducedKind::DeducedAsPack &&
6881 "unexpected DeducedAsPack for DeducedTemplateSpecializationType");
6882
6883 // Look in the folding set for an existing type.
6884 void *InsertPos = nullptr;
6885 llvm::FoldingSetNodeID ID;
6886 DeducedTemplateSpecializationType::Profile(ID, DK, Deduced: DeducedAsType, Keyword,
6887 Template);
6888 if (DeducedTemplateSpecializationType *DTST =
6889 DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos))
6890 return QualType(DTST, 0);
6891
6892 if (DK == DeducedKind::Deduced) {
6893 assert(!DeducedAsType.isNull() && "deduced type must be provided");
6894 } else {
6895 assert(DeducedAsType.isNull() && "deduced type must not be provided");
6896 TemplateName CanonTemplateName = getCanonicalTemplateName(Name: Template);
6897 // FIXME: Can this be formed from a DependentTemplateName, such that the
6898 // keyword should be part of the canonical type?
6899 if (Keyword != ElaboratedTypeKeyword::None ||
6900 Template != CanonTemplateName) {
6901 DeducedAsType = getDeducedTemplateSpecializationType(
6902 DK, DeducedAsType: QualType(), Keyword: ElaboratedTypeKeyword::None, Template: CanonTemplateName);
6903 // Find the insertion position again.
6904 [[maybe_unused]] DeducedTemplateSpecializationType *DTST =
6905 DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
6906 assert(!DTST && "broken canonicalization");
6907 }
6908 }
6909
6910 auto *DTST = new (*this, alignof(DeducedTemplateSpecializationType))
6911 DeducedTemplateSpecializationType(DK, DeducedAsType, Keyword, Template);
6912
6913#ifndef NDEBUG
6914 llvm::FoldingSetNodeID TempID;
6915 DTST->Profile(TempID);
6916 assert(ID == TempID && "ID does not match");
6917#endif
6918 Types.push_back(Elt: DTST);
6919 DeducedTemplateSpecializationTypes.InsertNode(N: DTST, InsertPos);
6920 return QualType(DTST, 0);
6921}
6922
6923/// getAtomicType - Return the uniqued reference to the atomic type for
6924/// the given value type.
6925QualType ASTContext::getAtomicType(QualType T) const {
6926 // Unique pointers, to guarantee there is only one pointer of a particular
6927 // structure.
6928 llvm::FoldingSetNodeID ID;
6929 AtomicType::Profile(ID, T);
6930
6931 void *InsertPos = nullptr;
6932 if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos))
6933 return QualType(AT, 0);
6934
6935 // If the atomic value type isn't canonical, this won't be a canonical type
6936 // either, so fill in the canonical type field.
6937 QualType Canonical;
6938 if (!T.isCanonical()) {
6939 Canonical = getAtomicType(T: getCanonicalType(T));
6940
6941 // Get the new insert position for the node we care about.
6942 AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos);
6943 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
6944 }
6945 auto *New = new (*this, alignof(AtomicType)) AtomicType(T, Canonical);
6946 Types.push_back(Elt: New);
6947 AtomicTypes.InsertNode(N: New, InsertPos);
6948 return QualType(New, 0);
6949}
6950
6951/// getAutoDeductType - Get type pattern for deducing against 'auto'.
6952QualType ASTContext::getAutoDeductType() const {
6953 if (AutoDeductTy.isNull())
6954 AutoDeductTy = QualType(new (*this, alignof(AutoType))
6955 AutoType(DeducedKind::Undeduced, QualType(),
6956 AutoTypeKeyword::Auto,
6957 /*TypeConstraintConcept=*/nullptr,
6958 /*TypeConstraintArgs=*/{}),
6959 0);
6960 return AutoDeductTy;
6961}
6962
6963/// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'.
6964QualType ASTContext::getAutoRRefDeductType() const {
6965 if (AutoRRefDeductTy.isNull())
6966 AutoRRefDeductTy = getRValueReferenceType(T: getAutoDeductType());
6967 assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern");
6968 return AutoRRefDeductTy;
6969}
6970
6971/// getSizeType - Return the unique type for "size_t" (C99 7.17), the result
6972/// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and
6973/// needs to agree with the definition in <stddef.h>.
6974QualType ASTContext::getSizeType() const {
6975 return getPredefinedSugarType(KD: PredefinedSugarType::Kind::SizeT);
6976}
6977
6978CanQualType ASTContext::getCanonicalSizeType() const {
6979 return getFromTargetType(Type: Target->getSizeType());
6980}
6981
6982/// Return the unique signed counterpart of the integer type
6983/// corresponding to size_t.
6984QualType ASTContext::getSignedSizeType() const {
6985 return getPredefinedSugarType(KD: PredefinedSugarType::Kind::SignedSizeT);
6986}
6987
6988/// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17)
6989/// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
6990QualType ASTContext::getPointerDiffType() const {
6991 return getPredefinedSugarType(KD: PredefinedSugarType::Kind::PtrdiffT);
6992}
6993
6994/// Return the unique unsigned counterpart of "ptrdiff_t"
6995/// integer type. The standard (C11 7.21.6.1p7) refers to this type
6996/// in the definition of %tu format specifier.
6997QualType ASTContext::getUnsignedPointerDiffType() const {
6998 return getFromTargetType(Type: Target->getUnsignedPtrDiffType(AddrSpace: LangAS::Default));
6999}
7000
7001/// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5).
7002CanQualType ASTContext::getIntMaxType() const {
7003 return getFromTargetType(Type: Target->getIntMaxType());
7004}
7005
7006/// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5).
7007CanQualType ASTContext::getUIntMaxType() const {
7008 return getFromTargetType(Type: Target->getUIntMaxType());
7009}
7010
7011/// getSignedWCharType - Return the type of "signed wchar_t".
7012/// Used when in C++, as a GCC extension.
7013QualType ASTContext::getSignedWCharType() const {
7014 // FIXME: derive from "Target" ?
7015 return WCharTy;
7016}
7017
7018/// getUnsignedWCharType - Return the type of "unsigned wchar_t".
7019/// Used when in C++, as a GCC extension.
7020QualType ASTContext::getUnsignedWCharType() const {
7021 // FIXME: derive from "Target" ?
7022 return UnsignedIntTy;
7023}
7024
7025QualType ASTContext::getIntPtrType() const {
7026 return getFromTargetType(Type: Target->getIntPtrType());
7027}
7028
7029QualType ASTContext::getUIntPtrType() const {
7030 return getCorrespondingUnsignedType(T: getIntPtrType());
7031}
7032
7033/// Return the unique type for "pid_t" defined in
7034/// <sys/types.h>. We need this to compute the correct type for vfork().
7035QualType ASTContext::getProcessIDType() const {
7036 return getFromTargetType(Type: Target->getProcessIDType());
7037}
7038
7039//===----------------------------------------------------------------------===//
7040// Type Operators
7041//===----------------------------------------------------------------------===//
7042
7043CanQualType ASTContext::getCanonicalParamType(QualType T) const {
7044 // Push qualifiers into arrays, and then discard any remaining
7045 // qualifiers.
7046 T = getCanonicalType(T);
7047 T = getVariableArrayDecayedType(type: T);
7048 const Type *Ty = T.getTypePtr();
7049 QualType Result;
7050 if (getLangOpts().HLSL && isa<ConstantArrayType>(Val: Ty)) {
7051 Result = getArrayParameterType(Ty: QualType(Ty, 0));
7052 } else if (isa<ArrayType>(Val: Ty)) {
7053 Result = getArrayDecayedType(T: QualType(Ty,0));
7054 } else if (isa<FunctionType>(Val: Ty)) {
7055 Result = getPointerType(T: QualType(Ty, 0));
7056 } else {
7057 Result = QualType(Ty, 0);
7058 }
7059
7060 return CanQualType::CreateUnsafe(Other: Result);
7061}
7062
7063QualType ASTContext::getUnqualifiedArrayType(QualType type,
7064 Qualifiers &quals) const {
7065 SplitQualType splitType = type.getSplitUnqualifiedType();
7066
7067 // FIXME: getSplitUnqualifiedType() actually walks all the way to
7068 // the unqualified desugared type and then drops it on the floor.
7069 // We then have to strip that sugar back off with
7070 // getUnqualifiedDesugaredType(), which is silly.
7071 const auto *AT =
7072 dyn_cast<ArrayType>(Val: splitType.Ty->getUnqualifiedDesugaredType());
7073
7074 // If we don't have an array, just use the results in splitType.
7075 if (!AT) {
7076 quals = splitType.Quals;
7077 return QualType(splitType.Ty, 0);
7078 }
7079
7080 // Otherwise, recurse on the array's element type.
7081 QualType elementType = AT->getElementType();
7082 QualType unqualElementType = getUnqualifiedArrayType(type: elementType, quals);
7083
7084 // If that didn't change the element type, AT has no qualifiers, so we
7085 // can just use the results in splitType.
7086 if (elementType == unqualElementType) {
7087 assert(quals.empty()); // from the recursive call
7088 quals = splitType.Quals;
7089 return QualType(splitType.Ty, 0);
7090 }
7091
7092 // Otherwise, add in the qualifiers from the outermost type, then
7093 // build the type back up.
7094 quals.addConsistentQualifiers(qs: splitType.Quals);
7095
7096 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: AT)) {
7097 return getConstantArrayType(EltTy: unqualElementType, ArySizeIn: CAT->getSize(),
7098 SizeExpr: CAT->getSizeExpr(), ASM: CAT->getSizeModifier(), IndexTypeQuals: 0);
7099 }
7100
7101 if (const auto *IAT = dyn_cast<IncompleteArrayType>(Val: AT)) {
7102 return getIncompleteArrayType(elementType: unqualElementType, ASM: IAT->getSizeModifier(), elementTypeQuals: 0);
7103 }
7104
7105 if (const auto *VAT = dyn_cast<VariableArrayType>(Val: AT)) {
7106 return getVariableArrayType(EltTy: unqualElementType, NumElts: VAT->getSizeExpr(),
7107 ASM: VAT->getSizeModifier(),
7108 IndexTypeQuals: VAT->getIndexTypeCVRQualifiers());
7109 }
7110
7111 const auto *DSAT = cast<DependentSizedArrayType>(Val: AT);
7112 return getDependentSizedArrayType(elementType: unqualElementType, numElements: DSAT->getSizeExpr(),
7113 ASM: DSAT->getSizeModifier(), elementTypeQuals: 0);
7114}
7115
7116/// Attempt to unwrap two types that may both be array types with the same bound
7117/// (or both be array types of unknown bound) for the purpose of comparing the
7118/// cv-decomposition of two types per C++ [conv.qual].
7119///
7120/// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in
7121/// C++20 [conv.qual], if permitted by the current language mode.
7122void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2,
7123 bool AllowPiMismatch) const {
7124 while (true) {
7125 auto *AT1 = getAsArrayType(T: T1);
7126 if (!AT1)
7127 return;
7128
7129 auto *AT2 = getAsArrayType(T: T2);
7130 if (!AT2)
7131 return;
7132
7133 // If we don't have two array types with the same constant bound nor two
7134 // incomplete array types, we've unwrapped everything we can.
7135 // C++20 also permits one type to be a constant array type and the other
7136 // to be an incomplete array type.
7137 // FIXME: Consider also unwrapping array of unknown bound and VLA.
7138 if (auto *CAT1 = dyn_cast<ConstantArrayType>(Val: AT1)) {
7139 auto *CAT2 = dyn_cast<ConstantArrayType>(Val: AT2);
7140 if (!((CAT2 && CAT1->getSize() == CAT2->getSize()) ||
7141 (AllowPiMismatch && getLangOpts().CPlusPlus20 &&
7142 isa<IncompleteArrayType>(Val: AT2))))
7143 return;
7144 } else if (isa<IncompleteArrayType>(Val: AT1)) {
7145 if (!(isa<IncompleteArrayType>(Val: AT2) ||
7146 (AllowPiMismatch && getLangOpts().CPlusPlus20 &&
7147 isa<ConstantArrayType>(Val: AT2))))
7148 return;
7149 } else {
7150 return;
7151 }
7152
7153 T1 = AT1->getElementType();
7154 T2 = AT2->getElementType();
7155 }
7156}
7157
7158/// Attempt to unwrap two types that may be similar (C++ [conv.qual]).
7159///
7160/// If T1 and T2 are both pointer types of the same kind, or both array types
7161/// with the same bound, unwraps layers from T1 and T2 until a pointer type is
7162/// unwrapped. Top-level qualifiers on T1 and T2 are ignored.
7163///
7164/// This function will typically be called in a loop that successively
7165/// "unwraps" pointer and pointer-to-member types to compare them at each
7166/// level.
7167///
7168/// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in
7169/// C++20 [conv.qual], if permitted by the current language mode.
7170///
7171/// \return \c true if a pointer type was unwrapped, \c false if we reached a
7172/// pair of types that can't be unwrapped further.
7173bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2,
7174 bool AllowPiMismatch) const {
7175 UnwrapSimilarArrayTypes(T1, T2, AllowPiMismatch);
7176
7177 const auto *T1PtrType = T1->getAs<PointerType>();
7178 const auto *T2PtrType = T2->getAs<PointerType>();
7179 if (T1PtrType && T2PtrType) {
7180 T1 = T1PtrType->getPointeeType();
7181 T2 = T2PtrType->getPointeeType();
7182 return true;
7183 }
7184
7185 if (const auto *T1MPType = T1->getAs<MemberPointerType>(),
7186 *T2MPType = T2->getAs<MemberPointerType>();
7187 T1MPType && T2MPType) {
7188 if (auto *RD1 = T1MPType->getMostRecentCXXRecordDecl(),
7189 *RD2 = T2MPType->getMostRecentCXXRecordDecl();
7190 RD1 != RD2 && RD1->getCanonicalDecl() != RD2->getCanonicalDecl())
7191 return false;
7192 if (T1MPType->getQualifier().getCanonical() !=
7193 T2MPType->getQualifier().getCanonical())
7194 return false;
7195 T1 = T1MPType->getPointeeType();
7196 T2 = T2MPType->getPointeeType();
7197 return true;
7198 }
7199
7200 if (getLangOpts().ObjC) {
7201 const auto *T1OPType = T1->getAs<ObjCObjectPointerType>();
7202 const auto *T2OPType = T2->getAs<ObjCObjectPointerType>();
7203 if (T1OPType && T2OPType) {
7204 T1 = T1OPType->getPointeeType();
7205 T2 = T2OPType->getPointeeType();
7206 return true;
7207 }
7208 }
7209
7210 // FIXME: Block pointers, too?
7211
7212 return false;
7213}
7214
7215bool ASTContext::hasSimilarType(QualType T1, QualType T2) const {
7216 while (true) {
7217 Qualifiers Quals;
7218 T1 = getUnqualifiedArrayType(type: T1, quals&: Quals);
7219 T2 = getUnqualifiedArrayType(type: T2, quals&: Quals);
7220 if (hasSameType(T1, T2))
7221 return true;
7222 if (!UnwrapSimilarTypes(T1, T2))
7223 return false;
7224 }
7225}
7226
7227bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) {
7228 while (true) {
7229 Qualifiers Quals1, Quals2;
7230 T1 = getUnqualifiedArrayType(type: T1, quals&: Quals1);
7231 T2 = getUnqualifiedArrayType(type: T2, quals&: Quals2);
7232
7233 Quals1.removeCVRQualifiers();
7234 Quals2.removeCVRQualifiers();
7235 if (Quals1 != Quals2)
7236 return false;
7237
7238 if (hasSameType(T1, T2))
7239 return true;
7240
7241 if (!UnwrapSimilarTypes(T1, T2, /*AllowPiMismatch*/ false))
7242 return false;
7243 }
7244}
7245
7246DeclarationNameInfo
7247ASTContext::getNameForTemplate(TemplateName Name,
7248 SourceLocation NameLoc) const {
7249 switch (Name.getKind()) {
7250 case TemplateName::QualifiedTemplate:
7251 case TemplateName::Template:
7252 // DNInfo work in progress: CHECKME: what about DNLoc?
7253 return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(),
7254 NameLoc);
7255
7256 case TemplateName::OverloadedTemplate: {
7257 OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate();
7258 // DNInfo work in progress: CHECKME: what about DNLoc?
7259 return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc);
7260 }
7261
7262 case TemplateName::AssumedTemplate: {
7263 AssumedTemplateStorage *Storage = Name.getAsAssumedTemplateName();
7264 return DeclarationNameInfo(Storage->getDeclName(), NameLoc);
7265 }
7266
7267 case TemplateName::DependentTemplate: {
7268 DependentTemplateName *DTN = Name.getAsDependentTemplateName();
7269 IdentifierOrOverloadedOperator TN = DTN->getName();
7270 DeclarationName DName;
7271 if (const IdentifierInfo *II = TN.getIdentifier()) {
7272 DName = DeclarationNames.getIdentifier(ID: II);
7273 return DeclarationNameInfo(DName, NameLoc);
7274 } else {
7275 DName = DeclarationNames.getCXXOperatorName(Op: TN.getOperator());
7276 // DNInfo work in progress: FIXME: source locations?
7277 DeclarationNameLoc DNLoc =
7278 DeclarationNameLoc::makeCXXOperatorNameLoc(Range: SourceRange());
7279 return DeclarationNameInfo(DName, NameLoc, DNLoc);
7280 }
7281 }
7282
7283 case TemplateName::SubstTemplateTemplateParm: {
7284 SubstTemplateTemplateParmStorage *subst
7285 = Name.getAsSubstTemplateTemplateParm();
7286 return DeclarationNameInfo(subst->getParameter()->getDeclName(),
7287 NameLoc);
7288 }
7289
7290 case TemplateName::SubstTemplateTemplateParmPack: {
7291 SubstTemplateTemplateParmPackStorage *subst
7292 = Name.getAsSubstTemplateTemplateParmPack();
7293 return DeclarationNameInfo(subst->getParameterPack()->getDeclName(),
7294 NameLoc);
7295 }
7296 case TemplateName::UsingTemplate:
7297 return DeclarationNameInfo(Name.getAsUsingShadowDecl()->getDeclName(),
7298 NameLoc);
7299 case TemplateName::DeducedTemplate: {
7300 DeducedTemplateStorage *DTS = Name.getAsDeducedTemplateName();
7301 return getNameForTemplate(Name: DTS->getUnderlying(), NameLoc);
7302 }
7303 }
7304
7305 llvm_unreachable("bad template name kind!");
7306}
7307
7308static const TemplateArgument *
7309getDefaultTemplateArgumentOrNone(const NamedDecl *P) {
7310 auto handleParam = [](auto *TP) -> const TemplateArgument * {
7311 if (!TP->hasDefaultArgument())
7312 return nullptr;
7313 return &TP->getDefaultArgument().getArgument();
7314 };
7315 switch (P->getKind()) {
7316 case NamedDecl::TemplateTypeParm:
7317 return handleParam(cast<TemplateTypeParmDecl>(Val: P));
7318 case NamedDecl::NonTypeTemplateParm:
7319 return handleParam(cast<NonTypeTemplateParmDecl>(Val: P));
7320 case NamedDecl::TemplateTemplateParm:
7321 return handleParam(cast<TemplateTemplateParmDecl>(Val: P));
7322 default:
7323 llvm_unreachable("Unexpected template parameter kind");
7324 }
7325}
7326
7327TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name,
7328 bool IgnoreDeduced) const {
7329 while (std::optional<TemplateName> UnderlyingOrNone =
7330 Name.desugar(IgnoreDeduced))
7331 Name = *UnderlyingOrNone;
7332
7333 switch (Name.getKind()) {
7334 case TemplateName::Template: {
7335 TemplateDecl *Template = Name.getAsTemplateDecl();
7336 if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Val: Template))
7337 Template = getCanonicalTemplateTemplateParmDecl(TTP);
7338
7339 // The canonical template name is the canonical template declaration.
7340 return TemplateName(cast<TemplateDecl>(Val: Template->getCanonicalDecl()));
7341 }
7342
7343 case TemplateName::AssumedTemplate:
7344 // An assumed template is just a name, so it is already canonical.
7345 return Name;
7346
7347 case TemplateName::OverloadedTemplate:
7348 llvm_unreachable("cannot canonicalize overloaded template");
7349
7350 case TemplateName::DependentTemplate: {
7351 DependentTemplateName *DTN = Name.getAsDependentTemplateName();
7352 assert(DTN && "Non-dependent template names must refer to template decls.");
7353 NestedNameSpecifier Qualifier = DTN->getQualifier();
7354 NestedNameSpecifier CanonQualifier = Qualifier.getCanonical();
7355 if (Qualifier != CanonQualifier || !DTN->hasTemplateKeyword())
7356 return getDependentTemplateName(Name: {CanonQualifier, DTN->getName(),
7357 /*HasTemplateKeyword=*/true});
7358 return Name;
7359 }
7360
7361 case TemplateName::SubstTemplateTemplateParmPack: {
7362 SubstTemplateTemplateParmPackStorage *subst =
7363 Name.getAsSubstTemplateTemplateParmPack();
7364 TemplateArgument canonArgPack =
7365 getCanonicalTemplateArgument(Arg: subst->getArgumentPack());
7366 return getSubstTemplateTemplateParmPack(
7367 ArgPack: canonArgPack, AssociatedDecl: subst->getAssociatedDecl()->getCanonicalDecl(),
7368 Index: subst->getIndex(), Final: subst->getFinal());
7369 }
7370 case TemplateName::DeducedTemplate: {
7371 assert(IgnoreDeduced == false);
7372 DeducedTemplateStorage *DTS = Name.getAsDeducedTemplateName();
7373 DefaultArguments DefArgs = DTS->getDefaultArguments();
7374 TemplateName Underlying = DTS->getUnderlying();
7375
7376 TemplateName CanonUnderlying =
7377 getCanonicalTemplateName(Name: Underlying, /*IgnoreDeduced=*/true);
7378 bool NonCanonical = CanonUnderlying != Underlying;
7379 auto CanonArgs =
7380 getCanonicalTemplateArguments(C: *this, Args: DefArgs.Args, AnyNonCanonArgs&: NonCanonical);
7381
7382 ArrayRef<NamedDecl *> Params =
7383 CanonUnderlying.getAsTemplateDecl()->getTemplateParameters()->asArray();
7384 assert(CanonArgs.size() <= Params.size());
7385 // A deduced template name which deduces the same default arguments already
7386 // declared in the underlying template is the same template as the
7387 // underlying template. We need need to note any arguments which differ from
7388 // the corresponding declaration. If any argument differs, we must build a
7389 // deduced template name.
7390 for (int I = CanonArgs.size() - 1; I >= 0; --I) {
7391 const TemplateArgument *A = getDefaultTemplateArgumentOrNone(P: Params[I]);
7392 if (!A)
7393 break;
7394 auto CanonParamDefArg = getCanonicalTemplateArgument(Arg: *A);
7395 TemplateArgument &CanonDefArg = CanonArgs[I];
7396 if (CanonDefArg.structurallyEquals(Other: CanonParamDefArg))
7397 continue;
7398 // Keep popping from the back any deault arguments which are the same.
7399 if (I == int(CanonArgs.size() - 1))
7400 CanonArgs.pop_back();
7401 NonCanonical = true;
7402 }
7403 return NonCanonical ? getDeducedTemplateName(
7404 Underlying: CanonUnderlying,
7405 /*DefaultArgs=*/{.StartPos: DefArgs.StartPos, .Args: CanonArgs})
7406 : Name;
7407 }
7408 case TemplateName::UsingTemplate:
7409 case TemplateName::QualifiedTemplate:
7410 case TemplateName::SubstTemplateTemplateParm:
7411 llvm_unreachable("always sugar node");
7412 }
7413
7414 llvm_unreachable("bad template name!");
7415}
7416
7417bool ASTContext::hasSameTemplateName(const TemplateName &X,
7418 const TemplateName &Y,
7419 bool IgnoreDeduced) const {
7420 return getCanonicalTemplateName(Name: X, IgnoreDeduced) ==
7421 getCanonicalTemplateName(Name: Y, IgnoreDeduced);
7422}
7423
7424bool ASTContext::isSameAssociatedConstraint(
7425 const AssociatedConstraint &ACX, const AssociatedConstraint &ACY) const {
7426 if (ACX.ArgPackSubstIndex != ACY.ArgPackSubstIndex)
7427 return false;
7428 if (!isSameConstraintExpr(XCE: ACX.ConstraintExpr, YCE: ACY.ConstraintExpr))
7429 return false;
7430 return true;
7431}
7432
7433bool ASTContext::isSameConstraintExpr(const Expr *XCE, const Expr *YCE) const {
7434 if (!XCE != !YCE)
7435 return false;
7436
7437 if (!XCE)
7438 return true;
7439
7440 llvm::FoldingSetNodeID XCEID, YCEID;
7441 XCE->Profile(ID&: XCEID, Context: *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true);
7442 YCE->Profile(ID&: YCEID, Context: *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true);
7443 return XCEID == YCEID;
7444}
7445
7446bool ASTContext::isSameTypeConstraint(const TypeConstraint *XTC,
7447 const TypeConstraint *YTC) const {
7448 if (!XTC != !YTC)
7449 return false;
7450
7451 if (!XTC)
7452 return true;
7453
7454 auto *NCX = XTC->getNamedConcept();
7455 auto *NCY = YTC->getNamedConcept();
7456 if (!NCX || !NCY || !isSameEntity(X: NCX, Y: NCY))
7457 return false;
7458 if (XTC->getConceptReference()->hasExplicitTemplateArgs() !=
7459 YTC->getConceptReference()->hasExplicitTemplateArgs())
7460 return false;
7461 if (XTC->getConceptReference()->hasExplicitTemplateArgs())
7462 if (XTC->getConceptReference()
7463 ->getTemplateArgsAsWritten()
7464 ->NumTemplateArgs !=
7465 YTC->getConceptReference()->getTemplateArgsAsWritten()->NumTemplateArgs)
7466 return false;
7467
7468 // Compare slowly by profiling.
7469 //
7470 // We couldn't compare the profiling result for the template
7471 // args here. Consider the following example in different modules:
7472 //
7473 // template <__integer_like _Tp, C<_Tp> Sentinel>
7474 // constexpr _Tp operator()(_Tp &&__t, Sentinel &&last) const {
7475 // return __t;
7476 // }
7477 //
7478 // When we compare the profiling result for `C<_Tp>` in different
7479 // modules, it will compare the type of `_Tp` in different modules.
7480 // However, the type of `_Tp` in different modules refer to different
7481 // types here naturally. So we couldn't compare the profiling result
7482 // for the template args directly.
7483 return isSameConstraintExpr(XCE: XTC->getImmediatelyDeclaredConstraint(),
7484 YCE: YTC->getImmediatelyDeclaredConstraint());
7485}
7486
7487bool ASTContext::isSameTemplateParameter(const NamedDecl *X,
7488 const NamedDecl *Y) const {
7489 if (X->getKind() != Y->getKind())
7490 return false;
7491
7492 if (auto *TX = dyn_cast<TemplateTypeParmDecl>(Val: X)) {
7493 auto *TY = cast<TemplateTypeParmDecl>(Val: Y);
7494 if (TX->isParameterPack() != TY->isParameterPack())
7495 return false;
7496 if (TX->hasTypeConstraint() != TY->hasTypeConstraint())
7497 return false;
7498 return isSameTypeConstraint(XTC: TX->getTypeConstraint(),
7499 YTC: TY->getTypeConstraint());
7500 }
7501
7502 if (auto *TX = dyn_cast<NonTypeTemplateParmDecl>(Val: X)) {
7503 auto *TY = cast<NonTypeTemplateParmDecl>(Val: Y);
7504 return TX->isParameterPack() == TY->isParameterPack() &&
7505 TX->getASTContext().hasSameType(T1: TX->getType(), T2: TY->getType()) &&
7506 isSameConstraintExpr(XCE: TX->getPlaceholderTypeConstraint(),
7507 YCE: TY->getPlaceholderTypeConstraint());
7508 }
7509
7510 auto *TX = cast<TemplateTemplateParmDecl>(Val: X);
7511 auto *TY = cast<TemplateTemplateParmDecl>(Val: Y);
7512 return TX->isParameterPack() == TY->isParameterPack() &&
7513 isSameTemplateParameterList(X: TX->getTemplateParameters(),
7514 Y: TY->getTemplateParameters());
7515}
7516
7517bool ASTContext::isSameTemplateParameterList(
7518 const TemplateParameterList *X, const TemplateParameterList *Y) const {
7519 if (X->size() != Y->size())
7520 return false;
7521
7522 for (unsigned I = 0, N = X->size(); I != N; ++I)
7523 if (!isSameTemplateParameter(X: X->getParam(Idx: I), Y: Y->getParam(Idx: I)))
7524 return false;
7525
7526 return isSameConstraintExpr(XCE: X->getRequiresClause(), YCE: Y->getRequiresClause());
7527}
7528
7529bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl *X,
7530 const NamedDecl *Y) const {
7531 // If the type parameter isn't the same already, we don't need to check the
7532 // default argument further.
7533 if (!isSameTemplateParameter(X, Y))
7534 return false;
7535
7536 if (auto *TTPX = dyn_cast<TemplateTypeParmDecl>(Val: X)) {
7537 auto *TTPY = cast<TemplateTypeParmDecl>(Val: Y);
7538 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument())
7539 return false;
7540
7541 return hasSameType(T1: TTPX->getDefaultArgument().getArgument().getAsType(),
7542 T2: TTPY->getDefaultArgument().getArgument().getAsType());
7543 }
7544
7545 if (auto *NTTPX = dyn_cast<NonTypeTemplateParmDecl>(Val: X)) {
7546 auto *NTTPY = cast<NonTypeTemplateParmDecl>(Val: Y);
7547 if (!NTTPX->hasDefaultArgument() || !NTTPY->hasDefaultArgument())
7548 return false;
7549
7550 Expr *DefaultArgumentX =
7551 NTTPX->getDefaultArgument().getArgument().getAsExpr()->IgnoreImpCasts();
7552 Expr *DefaultArgumentY =
7553 NTTPY->getDefaultArgument().getArgument().getAsExpr()->IgnoreImpCasts();
7554 llvm::FoldingSetNodeID XID, YID;
7555 DefaultArgumentX->Profile(ID&: XID, Context: *this, /*Canonical=*/true);
7556 DefaultArgumentY->Profile(ID&: YID, Context: *this, /*Canonical=*/true);
7557 return XID == YID;
7558 }
7559
7560 auto *TTPX = cast<TemplateTemplateParmDecl>(Val: X);
7561 auto *TTPY = cast<TemplateTemplateParmDecl>(Val: Y);
7562
7563 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument())
7564 return false;
7565
7566 const TemplateArgument &TAX = TTPX->getDefaultArgument().getArgument();
7567 const TemplateArgument &TAY = TTPY->getDefaultArgument().getArgument();
7568 return hasSameTemplateName(X: TAX.getAsTemplate(), Y: TAY.getAsTemplate());
7569}
7570
7571static bool isSameQualifier(const NestedNameSpecifier X,
7572 const NestedNameSpecifier Y) {
7573 if (X == Y)
7574 return true;
7575 if (!X || !Y)
7576 return false;
7577
7578 auto Kind = X.getKind();
7579 if (Kind != Y.getKind())
7580 return false;
7581
7582 // FIXME: For namespaces and types, we're permitted to check that the entity
7583 // is named via the same tokens. We should probably do so.
7584 switch (Kind) {
7585 case NestedNameSpecifier::Kind::Namespace: {
7586 auto [NamespaceX, PrefixX] = X.getAsNamespaceAndPrefix();
7587 auto [NamespaceY, PrefixY] = Y.getAsNamespaceAndPrefix();
7588 if (!declaresSameEntity(D1: NamespaceX->getNamespace(),
7589 D2: NamespaceY->getNamespace()))
7590 return false;
7591 return isSameQualifier(X: PrefixX, Y: PrefixY);
7592 }
7593 case NestedNameSpecifier::Kind::Type: {
7594 const auto *TX = X.getAsType(), *TY = Y.getAsType();
7595 if (TX->getCanonicalTypeInternal() != TY->getCanonicalTypeInternal())
7596 return false;
7597 return isSameQualifier(X: TX->getPrefix(), Y: TY->getPrefix());
7598 }
7599 case NestedNameSpecifier::Kind::Null:
7600 case NestedNameSpecifier::Kind::Global:
7601 case NestedNameSpecifier::Kind::MicrosoftSuper:
7602 return true;
7603 }
7604 llvm_unreachable("unhandled qualifier kind");
7605}
7606
7607static bool hasSameCudaAttrs(const FunctionDecl *A, const FunctionDecl *B) {
7608 if (!A->getASTContext().getLangOpts().CUDA)
7609 return true; // Target attributes are overloadable in CUDA compilation only.
7610 if (A->hasAttr<CUDADeviceAttr>() != B->hasAttr<CUDADeviceAttr>())
7611 return false;
7612 if (A->hasAttr<CUDADeviceAttr>() && B->hasAttr<CUDADeviceAttr>())
7613 return A->hasAttr<CUDAHostAttr>() == B->hasAttr<CUDAHostAttr>();
7614 return true; // unattributed and __host__ functions are the same.
7615}
7616
7617/// Determine whether the attributes we can overload on are identical for A and
7618/// B. Will ignore any overloadable attrs represented in the type of A and B.
7619static bool hasSameOverloadableAttrs(const FunctionDecl *A,
7620 const FunctionDecl *B) {
7621 // Note that pass_object_size attributes are represented in the function's
7622 // ExtParameterInfo, so we don't need to check them here.
7623
7624 llvm::FoldingSetNodeID Cand1ID, Cand2ID;
7625 auto AEnableIfAttrs = A->specific_attrs<EnableIfAttr>();
7626 auto BEnableIfAttrs = B->specific_attrs<EnableIfAttr>();
7627
7628 for (auto Pair : zip_longest(t&: AEnableIfAttrs, u&: BEnableIfAttrs)) {
7629 std::optional<EnableIfAttr *> Cand1A = std::get<0>(t&: Pair);
7630 std::optional<EnableIfAttr *> Cand2A = std::get<1>(t&: Pair);
7631
7632 // Return false if the number of enable_if attributes is different.
7633 if (!Cand1A || !Cand2A)
7634 return false;
7635
7636 Cand1ID.clear();
7637 Cand2ID.clear();
7638
7639 (*Cand1A)->getCond()->Profile(ID&: Cand1ID, Context: A->getASTContext(), Canonical: true);
7640 (*Cand2A)->getCond()->Profile(ID&: Cand2ID, Context: B->getASTContext(), Canonical: true);
7641
7642 // Return false if any of the enable_if expressions of A and B are
7643 // different.
7644 if (Cand1ID != Cand2ID)
7645 return false;
7646 }
7647 return hasSameCudaAttrs(A, B);
7648}
7649
7650bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) const {
7651 // Caution: this function is called by the AST reader during deserialization,
7652 // so it cannot rely on AST invariants being met. Non-trivial accessors
7653 // should be avoided, along with any traversal of redeclaration chains.
7654
7655 if (X == Y)
7656 return true;
7657
7658 if (X->getDeclName() != Y->getDeclName())
7659 return false;
7660
7661 // Must be in the same context.
7662 //
7663 // Note that we can't use DeclContext::Equals here, because the DeclContexts
7664 // could be two different declarations of the same function. (We will fix the
7665 // semantic DC to refer to the primary definition after merging.)
7666 if (!declaresSameEntity(D1: cast<Decl>(Val: X->getDeclContext()->getRedeclContext()),
7667 D2: cast<Decl>(Val: Y->getDeclContext()->getRedeclContext())))
7668 return false;
7669
7670 // If either X or Y are local to the owning module, they are only possible to
7671 // be the same entity if they are in the same module.
7672 if (X->isModuleLocal() || Y->isModuleLocal())
7673 if (!isInSameModule(M1: X->getOwningModule(), M2: Y->getOwningModule()))
7674 return false;
7675
7676 // Two typedefs refer to the same entity if they have the same underlying
7677 // type.
7678 if (const auto *TypedefX = dyn_cast<TypedefNameDecl>(Val: X))
7679 if (const auto *TypedefY = dyn_cast<TypedefNameDecl>(Val: Y))
7680 return hasSameType(T1: TypedefX->getUnderlyingType(),
7681 T2: TypedefY->getUnderlyingType());
7682
7683 // Must have the same kind.
7684 if (X->getKind() != Y->getKind())
7685 return false;
7686
7687 // Objective-C classes and protocols with the same name always match.
7688 if (isa<ObjCInterfaceDecl>(Val: X) || isa<ObjCProtocolDecl>(Val: X))
7689 return true;
7690
7691 if (isa<ClassTemplateSpecializationDecl>(Val: X)) {
7692 // No need to handle these here: we merge them when adding them to the
7693 // template.
7694 return false;
7695 }
7696
7697 // Compatible tags match.
7698 if (const auto *TagX = dyn_cast<TagDecl>(Val: X)) {
7699 const auto *TagY = cast<TagDecl>(Val: Y);
7700 return (TagX->getTagKind() == TagY->getTagKind()) ||
7701 ((TagX->getTagKind() == TagTypeKind::Struct ||
7702 TagX->getTagKind() == TagTypeKind::Class ||
7703 TagX->getTagKind() == TagTypeKind::Interface) &&
7704 (TagY->getTagKind() == TagTypeKind::Struct ||
7705 TagY->getTagKind() == TagTypeKind::Class ||
7706 TagY->getTagKind() == TagTypeKind::Interface));
7707 }
7708
7709 // Functions with the same type and linkage match.
7710 // FIXME: This needs to cope with merging of prototyped/non-prototyped
7711 // functions, etc.
7712 if (const auto *FuncX = dyn_cast<FunctionDecl>(Val: X)) {
7713 const auto *FuncY = cast<FunctionDecl>(Val: Y);
7714 if (const auto *CtorX = dyn_cast<CXXConstructorDecl>(Val: X)) {
7715 const auto *CtorY = cast<CXXConstructorDecl>(Val: Y);
7716 if (CtorX->getInheritedConstructor() &&
7717 !isSameEntity(X: CtorX->getInheritedConstructor().getConstructor(),
7718 Y: CtorY->getInheritedConstructor().getConstructor()))
7719 return false;
7720 }
7721
7722 if (FuncX->isMultiVersion() != FuncY->isMultiVersion())
7723 return false;
7724
7725 // Multiversioned functions with different feature strings are represented
7726 // as separate declarations.
7727 if (FuncX->isMultiVersion()) {
7728 const auto *TAX = FuncX->getAttr<TargetAttr>();
7729 const auto *TAY = FuncY->getAttr<TargetAttr>();
7730 assert(TAX && TAY && "Multiversion Function without target attribute");
7731
7732 if (TAX->getFeaturesStr() != TAY->getFeaturesStr())
7733 return false;
7734 }
7735
7736 // Per C++20 [temp.over.link]/4, friends in different classes are sometimes
7737 // not the same entity if they are constrained.
7738 if ((FuncX->isMemberLikeConstrainedFriend() ||
7739 FuncY->isMemberLikeConstrainedFriend()) &&
7740 !FuncX->getLexicalDeclContext()->Equals(
7741 DC: FuncY->getLexicalDeclContext())) {
7742 return false;
7743 }
7744
7745 if (!isSameAssociatedConstraint(ACX: FuncX->getTrailingRequiresClause(),
7746 ACY: FuncY->getTrailingRequiresClause()))
7747 return false;
7748
7749 auto GetTypeAsWritten = [](const FunctionDecl *FD) {
7750 // Map to the first declaration that we've already merged into this one.
7751 // The TSI of redeclarations might not match (due to calling conventions
7752 // being inherited onto the type but not the TSI), but the TSI type of
7753 // the first declaration of the function should match across modules.
7754 FD = FD->getCanonicalDecl();
7755 return FD->getTypeSourceInfo() ? FD->getTypeSourceInfo()->getType()
7756 : FD->getType();
7757 };
7758 QualType XT = GetTypeAsWritten(FuncX), YT = GetTypeAsWritten(FuncY);
7759 if (!hasSameType(T1: XT, T2: YT)) {
7760 // We can get functions with different types on the redecl chain in C++17
7761 // if they have differing exception specifications and at least one of
7762 // the excpetion specs is unresolved.
7763 auto *XFPT = XT->getAs<FunctionProtoType>();
7764 auto *YFPT = YT->getAs<FunctionProtoType>();
7765 if (getLangOpts().CPlusPlus17 && XFPT && YFPT &&
7766 (isUnresolvedExceptionSpec(ESpecType: XFPT->getExceptionSpecType()) ||
7767 isUnresolvedExceptionSpec(ESpecType: YFPT->getExceptionSpecType())) &&
7768 hasSameFunctionTypeIgnoringExceptionSpec(T: XT, U: YT))
7769 return true;
7770 return false;
7771 }
7772
7773 return FuncX->getLinkageInternal() == FuncY->getLinkageInternal() &&
7774 hasSameOverloadableAttrs(A: FuncX, B: FuncY);
7775 }
7776
7777 // Variables with the same type and linkage match.
7778 if (const auto *VarX = dyn_cast<VarDecl>(Val: X)) {
7779 const auto *VarY = cast<VarDecl>(Val: Y);
7780 if (VarX->getLinkageInternal() == VarY->getLinkageInternal()) {
7781 // During deserialization, we might compare variables before we load
7782 // their types. Assume the types will end up being the same.
7783 if (VarX->getType().isNull() || VarY->getType().isNull())
7784 return true;
7785
7786 if (hasSameType(T1: VarX->getType(), T2: VarY->getType()))
7787 return true;
7788
7789 // We can get decls with different types on the redecl chain. Eg.
7790 // template <typename T> struct S { static T Var[]; }; // #1
7791 // template <typename T> T S<T>::Var[sizeof(T)]; // #2
7792 // Only? happens when completing an incomplete array type. In this case
7793 // when comparing #1 and #2 we should go through their element type.
7794 const ArrayType *VarXTy = getAsArrayType(T: VarX->getType());
7795 const ArrayType *VarYTy = getAsArrayType(T: VarY->getType());
7796 if (!VarXTy || !VarYTy)
7797 return false;
7798 if (VarXTy->isIncompleteArrayType() || VarYTy->isIncompleteArrayType())
7799 return hasSameType(T1: VarXTy->getElementType(), T2: VarYTy->getElementType());
7800 }
7801 return false;
7802 }
7803
7804 // Namespaces with the same name and inlinedness match.
7805 if (const auto *NamespaceX = dyn_cast<NamespaceDecl>(Val: X)) {
7806 const auto *NamespaceY = cast<NamespaceDecl>(Val: Y);
7807 return NamespaceX->isInline() == NamespaceY->isInline();
7808 }
7809
7810 // Identical template names and kinds match if their template parameter lists
7811 // and patterns match.
7812 if (const auto *TemplateX = dyn_cast<TemplateDecl>(Val: X)) {
7813 const auto *TemplateY = cast<TemplateDecl>(Val: Y);
7814
7815 // ConceptDecl wouldn't be the same if their constraint expression differs.
7816 if (const auto *ConceptX = dyn_cast<ConceptDecl>(Val: X)) {
7817 const auto *ConceptY = cast<ConceptDecl>(Val: Y);
7818 if (!isSameConstraintExpr(XCE: ConceptX->getConstraintExpr(),
7819 YCE: ConceptY->getConstraintExpr()))
7820 return false;
7821 }
7822
7823 return isSameEntity(X: TemplateX->getTemplatedDecl(),
7824 Y: TemplateY->getTemplatedDecl()) &&
7825 isSameTemplateParameterList(X: TemplateX->getTemplateParameters(),
7826 Y: TemplateY->getTemplateParameters());
7827 }
7828
7829 // Fields with the same name and the same type match.
7830 if (const auto *FDX = dyn_cast<FieldDecl>(Val: X)) {
7831 const auto *FDY = cast<FieldDecl>(Val: Y);
7832 // FIXME: Also check the bitwidth is odr-equivalent, if any.
7833 return hasSameType(T1: FDX->getType(), T2: FDY->getType());
7834 }
7835
7836 // Indirect fields with the same target field match.
7837 if (const auto *IFDX = dyn_cast<IndirectFieldDecl>(Val: X)) {
7838 const auto *IFDY = cast<IndirectFieldDecl>(Val: Y);
7839 return IFDX->getAnonField()->getCanonicalDecl() ==
7840 IFDY->getAnonField()->getCanonicalDecl();
7841 }
7842
7843 // Enumerators with the same name match.
7844 if (isa<EnumConstantDecl>(Val: X))
7845 // FIXME: Also check the value is odr-equivalent.
7846 return true;
7847
7848 // Using shadow declarations with the same target match.
7849 if (const auto *USX = dyn_cast<UsingShadowDecl>(Val: X)) {
7850 const auto *USY = cast<UsingShadowDecl>(Val: Y);
7851 return declaresSameEntity(D1: USX->getTargetDecl(), D2: USY->getTargetDecl());
7852 }
7853
7854 // Using declarations with the same qualifier match. (We already know that
7855 // the name matches.)
7856 if (const auto *UX = dyn_cast<UsingDecl>(Val: X)) {
7857 const auto *UY = cast<UsingDecl>(Val: Y);
7858 return isSameQualifier(X: UX->getQualifier(), Y: UY->getQualifier()) &&
7859 UX->hasTypename() == UY->hasTypename() &&
7860 UX->isAccessDeclaration() == UY->isAccessDeclaration();
7861 }
7862 if (const auto *UX = dyn_cast<UnresolvedUsingValueDecl>(Val: X)) {
7863 const auto *UY = cast<UnresolvedUsingValueDecl>(Val: Y);
7864 return isSameQualifier(X: UX->getQualifier(), Y: UY->getQualifier()) &&
7865 UX->isAccessDeclaration() == UY->isAccessDeclaration();
7866 }
7867 if (const auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(Val: X)) {
7868 return isSameQualifier(
7869 X: UX->getQualifier(),
7870 Y: cast<UnresolvedUsingTypenameDecl>(Val: Y)->getQualifier());
7871 }
7872
7873 // Using-pack declarations are only created by instantiation, and match if
7874 // they're instantiated from matching UnresolvedUsing...Decls.
7875 if (const auto *UX = dyn_cast<UsingPackDecl>(Val: X)) {
7876 return declaresSameEntity(
7877 D1: UX->getInstantiatedFromUsingDecl(),
7878 D2: cast<UsingPackDecl>(Val: Y)->getInstantiatedFromUsingDecl());
7879 }
7880
7881 // Namespace alias definitions with the same target match.
7882 if (const auto *NAX = dyn_cast<NamespaceAliasDecl>(Val: X)) {
7883 const auto *NAY = cast<NamespaceAliasDecl>(Val: Y);
7884 return NAX->getNamespace()->Equals(DC: NAY->getNamespace());
7885 }
7886
7887 return false;
7888}
7889
7890TemplateArgument
7891ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const {
7892 switch (Arg.getKind()) {
7893 case TemplateArgument::Null:
7894 return Arg;
7895
7896 case TemplateArgument::Expression:
7897 return TemplateArgument(Arg.getAsExpr(), /*IsCanonical=*/true,
7898 Arg.getIsDefaulted());
7899
7900 case TemplateArgument::Declaration: {
7901 auto *D = cast<ValueDecl>(Val: Arg.getAsDecl()->getCanonicalDecl());
7902 return TemplateArgument(D, getCanonicalType(T: Arg.getParamTypeForDecl()),
7903 Arg.getIsDefaulted());
7904 }
7905
7906 case TemplateArgument::NullPtr:
7907 return TemplateArgument(getCanonicalType(T: Arg.getNullPtrType()),
7908 /*isNullPtr*/ true, Arg.getIsDefaulted());
7909
7910 case TemplateArgument::Template:
7911 return TemplateArgument(getCanonicalTemplateName(Name: Arg.getAsTemplate()),
7912 Arg.getIsDefaulted());
7913
7914 case TemplateArgument::TemplateExpansion:
7915 return TemplateArgument(
7916 getCanonicalTemplateName(Name: Arg.getAsTemplateOrTemplatePattern()),
7917 Arg.getNumTemplateExpansions(), Arg.getIsDefaulted());
7918
7919 case TemplateArgument::Integral:
7920 return TemplateArgument(Arg, getCanonicalType(T: Arg.getIntegralType()));
7921
7922 case TemplateArgument::StructuralValue:
7923 return TemplateArgument(*this,
7924 getCanonicalType(T: Arg.getStructuralValueType()),
7925 Arg.getAsStructuralValue(), Arg.getIsDefaulted());
7926
7927 case TemplateArgument::Type:
7928 return TemplateArgument(getCanonicalType(T: Arg.getAsType()),
7929 /*isNullPtr*/ false, Arg.getIsDefaulted());
7930
7931 case TemplateArgument::Pack: {
7932 bool AnyNonCanonArgs = false;
7933 auto CanonArgs = ::getCanonicalTemplateArguments(
7934 C: *this, Args: Arg.pack_elements(), AnyNonCanonArgs);
7935 if (!AnyNonCanonArgs)
7936 return Arg;
7937 auto NewArg = TemplateArgument::CreatePackCopy(
7938 Context&: const_cast<ASTContext &>(*this), Args: CanonArgs);
7939 NewArg.setIsDefaulted(Arg.getIsDefaulted());
7940 return NewArg;
7941 }
7942 }
7943
7944 // Silence GCC warning
7945 llvm_unreachable("Unhandled template argument kind");
7946}
7947
7948bool ASTContext::isSameTemplateArgument(const TemplateArgument &Arg1,
7949 const TemplateArgument &Arg2) const {
7950 if (Arg1.getKind() != Arg2.getKind())
7951 return false;
7952
7953 switch (Arg1.getKind()) {
7954 case TemplateArgument::Null:
7955 llvm_unreachable("Comparing NULL template argument");
7956
7957 case TemplateArgument::Type:
7958 return hasSameType(T1: Arg1.getAsType(), T2: Arg2.getAsType());
7959
7960 case TemplateArgument::Declaration:
7961 return Arg1.getAsDecl()->getUnderlyingDecl()->getCanonicalDecl() ==
7962 Arg2.getAsDecl()->getUnderlyingDecl()->getCanonicalDecl();
7963
7964 case TemplateArgument::NullPtr:
7965 return hasSameType(T1: Arg1.getNullPtrType(), T2: Arg2.getNullPtrType());
7966
7967 case TemplateArgument::Template:
7968 case TemplateArgument::TemplateExpansion:
7969 return getCanonicalTemplateName(Name: Arg1.getAsTemplateOrTemplatePattern()) ==
7970 getCanonicalTemplateName(Name: Arg2.getAsTemplateOrTemplatePattern());
7971
7972 case TemplateArgument::Integral:
7973 return llvm::APSInt::isSameValue(I1: Arg1.getAsIntegral(),
7974 I2: Arg2.getAsIntegral());
7975
7976 case TemplateArgument::StructuralValue:
7977 return Arg1.structurallyEquals(Other: Arg2);
7978
7979 case TemplateArgument::Expression: {
7980 llvm::FoldingSetNodeID ID1, ID2;
7981 Arg1.getAsExpr()->Profile(ID&: ID1, Context: *this, /*Canonical=*/true);
7982 Arg2.getAsExpr()->Profile(ID&: ID2, Context: *this, /*Canonical=*/true);
7983 return ID1 == ID2;
7984 }
7985
7986 case TemplateArgument::Pack:
7987 return llvm::equal(
7988 LRange: Arg1.getPackAsArray(), RRange: Arg2.getPackAsArray(),
7989 P: [&](const TemplateArgument &Arg1, const TemplateArgument &Arg2) {
7990 return isSameTemplateArgument(Arg1, Arg2);
7991 });
7992 }
7993
7994 llvm_unreachable("Unhandled template argument kind");
7995}
7996
7997const ArrayType *ASTContext::getAsArrayType(QualType T) const {
7998 // Handle the non-qualified case efficiently.
7999 if (!T.hasLocalQualifiers()) {
8000 // Handle the common positive case fast.
8001 if (const auto *AT = dyn_cast<ArrayType>(Val&: T))
8002 return AT;
8003 }
8004
8005 // Handle the common negative case fast.
8006 if (!isa<ArrayType>(Val: T.getCanonicalType()))
8007 return nullptr;
8008
8009 // Apply any qualifiers from the array type to the element type. This
8010 // implements C99 6.7.3p8: "If the specification of an array type includes
8011 // any type qualifiers, the element type is so qualified, not the array type."
8012
8013 // If we get here, we either have type qualifiers on the type, or we have
8014 // sugar such as a typedef in the way. If we have type qualifiers on the type
8015 // we must propagate them down into the element type.
8016
8017 SplitQualType split = T.getSplitDesugaredType();
8018 Qualifiers qs = split.Quals;
8019
8020 // If we have a simple case, just return now.
8021 const auto *ATy = dyn_cast<ArrayType>(Val: split.Ty);
8022 if (!ATy || qs.empty())
8023 return ATy;
8024
8025 // Otherwise, we have an array and we have qualifiers on it. Push the
8026 // qualifiers into the array element type and return a new array type.
8027 QualType NewEltTy = getQualifiedType(T: ATy->getElementType(), Qs: qs);
8028
8029 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: ATy))
8030 return cast<ArrayType>(Val: getConstantArrayType(EltTy: NewEltTy, ArySizeIn: CAT->getSize(),
8031 SizeExpr: CAT->getSizeExpr(),
8032 ASM: CAT->getSizeModifier(),
8033 IndexTypeQuals: CAT->getIndexTypeCVRQualifiers()));
8034 if (const auto *IAT = dyn_cast<IncompleteArrayType>(Val: ATy))
8035 return cast<ArrayType>(Val: getIncompleteArrayType(elementType: NewEltTy,
8036 ASM: IAT->getSizeModifier(),
8037 elementTypeQuals: IAT->getIndexTypeCVRQualifiers()));
8038
8039 if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(Val: ATy))
8040 return cast<ArrayType>(Val: getDependentSizedArrayType(
8041 elementType: NewEltTy, numElements: DSAT->getSizeExpr(), ASM: DSAT->getSizeModifier(),
8042 elementTypeQuals: DSAT->getIndexTypeCVRQualifiers()));
8043
8044 const auto *VAT = cast<VariableArrayType>(Val: ATy);
8045 return cast<ArrayType>(
8046 Val: getVariableArrayType(EltTy: NewEltTy, NumElts: VAT->getSizeExpr(), ASM: VAT->getSizeModifier(),
8047 IndexTypeQuals: VAT->getIndexTypeCVRQualifiers()));
8048}
8049
8050QualType ASTContext::getAdjustedParameterType(QualType T) const {
8051 if (getLangOpts().HLSL && T.getAddressSpace() == LangAS::hlsl_groupshared)
8052 return getLValueReferenceType(T);
8053 if (getLangOpts().HLSL && T->isConstantArrayType())
8054 return getArrayParameterType(Ty: T);
8055 if (T->isArrayType() || T->isFunctionType())
8056 return getDecayedType(T);
8057 return T;
8058}
8059
8060QualType ASTContext::getSignatureParameterType(QualType T) const {
8061 T = getVariableArrayDecayedType(type: T);
8062 T = getAdjustedParameterType(T);
8063 return T.getUnqualifiedType();
8064}
8065
8066QualType ASTContext::getExceptionObjectType(QualType T) const {
8067 // C++ [except.throw]p3:
8068 // A throw-expression initializes a temporary object, called the exception
8069 // object, the type of which is determined by removing any top-level
8070 // cv-qualifiers from the static type of the operand of throw and adjusting
8071 // the type from "array of T" or "function returning T" to "pointer to T"
8072 // or "pointer to function returning T", [...]
8073 T = getVariableArrayDecayedType(type: T);
8074 if (T->isArrayType() || T->isFunctionType())
8075 T = getDecayedType(T);
8076 return T.getUnqualifiedType();
8077}
8078
8079/// getArrayDecayedType - Return the properly qualified result of decaying the
8080/// specified array type to a pointer. This operation is non-trivial when
8081/// handling typedefs etc. The canonical type of "T" must be an array type,
8082/// this returns a pointer to a properly qualified element of the array.
8083///
8084/// See C99 6.7.5.3p7 and C99 6.3.2.1p3.
8085QualType ASTContext::getArrayDecayedType(QualType Ty) const {
8086 // Get the element type with 'getAsArrayType' so that we don't lose any
8087 // typedefs in the element type of the array. This also handles propagation
8088 // of type qualifiers from the array type into the element type if present
8089 // (C99 6.7.3p8).
8090 const ArrayType *PrettyArrayType = getAsArrayType(T: Ty);
8091 assert(PrettyArrayType && "Not an array type!");
8092
8093 QualType PtrTy = getPointerType(T: PrettyArrayType->getElementType());
8094
8095 // int x[restrict 4] -> int *restrict
8096 QualType Result = getQualifiedType(T: PtrTy,
8097 Qs: PrettyArrayType->getIndexTypeQualifiers());
8098
8099 // int x[_Nullable] -> int * _Nullable
8100 if (auto Nullability = Ty->getNullability()) {
8101 Result = const_cast<ASTContext *>(this)->getAttributedType(nullability: *Nullability,
8102 modifiedType: Result, equivalentType: Result);
8103 }
8104 return Result;
8105}
8106
8107QualType ASTContext::getBaseElementType(const ArrayType *array) const {
8108 return getBaseElementType(QT: array->getElementType());
8109}
8110
8111QualType ASTContext::getBaseElementType(QualType type) const {
8112 Qualifiers qs;
8113 while (true) {
8114 SplitQualType split = type.getSplitDesugaredType();
8115 const ArrayType *array = split.Ty->getAsArrayTypeUnsafe();
8116 if (!array) break;
8117
8118 type = array->getElementType();
8119 qs.addConsistentQualifiers(qs: split.Quals);
8120 }
8121
8122 return getQualifiedType(T: type, Qs: qs);
8123}
8124
8125/// getConstantArrayElementCount - Returns number of constant array elements.
8126uint64_t
8127ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const {
8128 uint64_t ElementCount = 1;
8129 do {
8130 ElementCount *= CA->getZExtSize();
8131 CA = dyn_cast_or_null<ConstantArrayType>(
8132 Val: CA->getElementType()->getAsArrayTypeUnsafe());
8133 } while (CA);
8134 return ElementCount;
8135}
8136
8137uint64_t ASTContext::getArrayInitLoopExprElementCount(
8138 const ArrayInitLoopExpr *AILE) const {
8139 if (!AILE)
8140 return 0;
8141
8142 uint64_t ElementCount = 1;
8143
8144 do {
8145 ElementCount *= AILE->getArraySize().getZExtValue();
8146 AILE = dyn_cast<ArrayInitLoopExpr>(Val: AILE->getSubExpr());
8147 } while (AILE);
8148
8149 return ElementCount;
8150}
8151
8152/// getFloatingRank - Return a relative rank for floating point types.
8153/// This routine will assert if passed a built-in type that isn't a float.
8154static FloatingRank getFloatingRank(QualType T) {
8155 if (const auto *CT = T->getAs<ComplexType>())
8156 return getFloatingRank(T: CT->getElementType());
8157
8158 switch (T->castAs<BuiltinType>()->getKind()) {
8159 default: llvm_unreachable("getFloatingRank(): not a floating type");
8160 case BuiltinType::Float16: return Float16Rank;
8161 case BuiltinType::Half: return HalfRank;
8162 case BuiltinType::Float: return FloatRank;
8163 case BuiltinType::Double: return DoubleRank;
8164 case BuiltinType::LongDouble: return LongDoubleRank;
8165 case BuiltinType::Float128: return Float128Rank;
8166 case BuiltinType::BFloat16: return BFloat16Rank;
8167 case BuiltinType::Ibm128: return Ibm128Rank;
8168 }
8169}
8170
8171/// getFloatingTypeOrder - Compare the rank of the two specified floating
8172/// point types, ignoring the domain of the type (i.e. 'double' ==
8173/// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If
8174/// LHS < RHS, return -1.
8175int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const {
8176 FloatingRank LHSR = getFloatingRank(T: LHS);
8177 FloatingRank RHSR = getFloatingRank(T: RHS);
8178
8179 if (LHSR == RHSR)
8180 return 0;
8181 if (LHSR > RHSR)
8182 return 1;
8183 return -1;
8184}
8185
8186int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const {
8187 if (&getFloatTypeSemantics(T: LHS) == &getFloatTypeSemantics(T: RHS))
8188 return 0;
8189 return getFloatingTypeOrder(LHS, RHS);
8190}
8191
8192/// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This
8193/// routine will assert if passed a built-in type that isn't an integer or enum,
8194/// or if it is not canonicalized.
8195unsigned ASTContext::getIntegerRank(const Type *T) const {
8196 assert(T->isCanonicalUnqualified() && "T should be canonicalized");
8197
8198 // Results in this 'losing' to any type of the same size, but winning if
8199 // larger.
8200 if (const auto *EIT = dyn_cast<BitIntType>(Val: T))
8201 return 0 + (EIT->getNumBits() << 3);
8202
8203 if (const auto *OBT = dyn_cast<OverflowBehaviorType>(Val: T))
8204 return getIntegerRank(T: OBT->getUnderlyingType().getTypePtr());
8205
8206 switch (cast<BuiltinType>(Val: T)->getKind()) {
8207 default: llvm_unreachable("getIntegerRank(): not a built-in integer");
8208 case BuiltinType::Bool:
8209 return 1 + (getIntWidth(T: BoolTy) << 3);
8210 case BuiltinType::Char_S:
8211 case BuiltinType::Char_U:
8212 case BuiltinType::SChar:
8213 case BuiltinType::UChar:
8214 return 2 + (getIntWidth(T: CharTy) << 3);
8215 case BuiltinType::Short:
8216 case BuiltinType::UShort:
8217 return 3 + (getIntWidth(T: ShortTy) << 3);
8218 case BuiltinType::Int:
8219 case BuiltinType::UInt:
8220 return 4 + (getIntWidth(T: IntTy) << 3);
8221 case BuiltinType::Long:
8222 case BuiltinType::ULong:
8223 return 5 + (getIntWidth(T: LongTy) << 3);
8224 case BuiltinType::LongLong:
8225 case BuiltinType::ULongLong:
8226 return 6 + (getIntWidth(T: LongLongTy) << 3);
8227 case BuiltinType::Int128:
8228 case BuiltinType::UInt128:
8229 return 7 + (getIntWidth(T: Int128Ty) << 3);
8230
8231 // "The ranks of char8_t, char16_t, char32_t, and wchar_t equal the ranks of
8232 // their underlying types" [c++20 conv.rank]
8233 case BuiltinType::Char8:
8234 return getIntegerRank(T: UnsignedCharTy.getTypePtr());
8235 case BuiltinType::Char16:
8236 return getIntegerRank(
8237 T: getFromTargetType(Type: Target->getChar16Type()).getTypePtr());
8238 case BuiltinType::Char32:
8239 return getIntegerRank(
8240 T: getFromTargetType(Type: Target->getChar32Type()).getTypePtr());
8241 case BuiltinType::WChar_S:
8242 case BuiltinType::WChar_U:
8243 return getIntegerRank(
8244 T: getFromTargetType(Type: Target->getWCharType()).getTypePtr());
8245 }
8246}
8247
8248/// Whether this is a promotable bitfield reference according
8249/// to C99 6.3.1.1p2, bullet 2 (and GCC extensions).
8250///
8251/// \returns the type this bit-field will promote to, or NULL if no
8252/// promotion occurs.
8253QualType ASTContext::isPromotableBitField(Expr *E) const {
8254 if (E->isTypeDependent() || E->isValueDependent())
8255 return {};
8256
8257 // C++ [conv.prom]p5:
8258 // If the bit-field has an enumerated type, it is treated as any other
8259 // value of that type for promotion purposes.
8260 if (getLangOpts().CPlusPlus && E->getType()->isEnumeralType())
8261 return {};
8262
8263 // FIXME: We should not do this unless E->refersToBitField() is true. This
8264 // matters in C where getSourceBitField() will find bit-fields for various
8265 // cases where the source expression is not a bit-field designator.
8266
8267 FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields?
8268 if (!Field)
8269 return {};
8270
8271 QualType FT = Field->getType();
8272
8273 uint64_t BitWidth = Field->getBitWidthValue();
8274 uint64_t IntSize = getTypeSize(T: IntTy);
8275 // C++ [conv.prom]p5:
8276 // A prvalue for an integral bit-field can be converted to a prvalue of type
8277 // int if int can represent all the values of the bit-field; otherwise, it
8278 // can be converted to unsigned int if unsigned int can represent all the
8279 // values of the bit-field. If the bit-field is larger yet, no integral
8280 // promotion applies to it.
8281 // C11 6.3.1.1/2:
8282 // [For a bit-field of type _Bool, int, signed int, or unsigned int:]
8283 // If an int can represent all values of the original type (as restricted by
8284 // the width, for a bit-field), the value is converted to an int; otherwise,
8285 // it is converted to an unsigned int.
8286 //
8287 // FIXME: C does not permit promotion of a 'long : 3' bitfield to int.
8288 // We perform that promotion here to match GCC and C++.
8289 // FIXME: C does not permit promotion of an enum bit-field whose rank is
8290 // greater than that of 'int'. We perform that promotion to match GCC.
8291 //
8292 // C23 6.3.1.1p2:
8293 // The value from a bit-field of a bit-precise integer type is converted to
8294 // the corresponding bit-precise integer type. (The rest is the same as in
8295 // C11.)
8296 if (QualType QT = Field->getType(); QT->isBitIntType())
8297 return QT;
8298
8299 if (BitWidth < IntSize)
8300 return IntTy;
8301
8302 if (BitWidth == IntSize)
8303 return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy;
8304
8305 // Bit-fields wider than int are not subject to promotions, and therefore act
8306 // like the base type. GCC has some weird bugs in this area that we
8307 // deliberately do not follow (GCC follows a pre-standard resolution to
8308 // C's DR315 which treats bit-width as being part of the type, and this leaks
8309 // into their semantics in some cases).
8310 return {};
8311}
8312
8313/// getPromotedIntegerType - Returns the type that Promotable will
8314/// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable
8315/// integer type.
8316QualType ASTContext::getPromotedIntegerType(QualType Promotable) const {
8317 assert(!Promotable.isNull());
8318 assert(isPromotableIntegerType(Promotable));
8319 if (const auto *ED = Promotable->getAsEnumDecl())
8320 return ED->getPromotionType();
8321
8322 // OverflowBehaviorTypes promote their underlying type and preserve OBT
8323 // qualifier.
8324 if (const auto *OBT = Promotable->getAs<OverflowBehaviorType>()) {
8325 QualType PromotedUnderlying =
8326 getPromotedIntegerType(Promotable: OBT->getUnderlyingType());
8327 return getOverflowBehaviorType(Kind: OBT->getBehaviorKind(), Underlying: PromotedUnderlying);
8328 }
8329
8330 if (const auto *BT = Promotable->getAs<BuiltinType>()) {
8331 // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t
8332 // (3.9.1) can be converted to a prvalue of the first of the following
8333 // types that can represent all the values of its underlying type:
8334 // int, unsigned int, long int, unsigned long int, long long int, or
8335 // unsigned long long int [...]
8336 // FIXME: Is there some better way to compute this?
8337 if (BT->getKind() == BuiltinType::WChar_S ||
8338 BT->getKind() == BuiltinType::WChar_U ||
8339 BT->getKind() == BuiltinType::Char8 ||
8340 BT->getKind() == BuiltinType::Char16 ||
8341 BT->getKind() == BuiltinType::Char32) {
8342 bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S;
8343 uint64_t FromSize = getTypeSize(T: BT);
8344 QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy,
8345 LongLongTy, UnsignedLongLongTy };
8346 for (const auto &PT : PromoteTypes) {
8347 uint64_t ToSize = getTypeSize(T: PT);
8348 if (FromSize < ToSize ||
8349 (FromSize == ToSize && FromIsSigned == PT->isSignedIntegerType()))
8350 return PT;
8351 }
8352 llvm_unreachable("char type should fit into long long");
8353 }
8354 }
8355
8356 // At this point, we should have a signed or unsigned integer type.
8357 if (Promotable->isSignedIntegerType())
8358 return IntTy;
8359 uint64_t PromotableSize = getIntWidth(T: Promotable);
8360 uint64_t IntSize = getIntWidth(T: IntTy);
8361 assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize);
8362 return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy;
8363}
8364
8365/// Recurses in pointer/array types until it finds an objc retainable
8366/// type and returns its ownership.
8367Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const {
8368 while (!T.isNull()) {
8369 if (T.getObjCLifetime() != Qualifiers::OCL_None)
8370 return T.getObjCLifetime();
8371 if (T->isArrayType())
8372 T = getBaseElementType(type: T);
8373 else if (const auto *PT = T->getAs<PointerType>())
8374 T = PT->getPointeeType();
8375 else if (const auto *RT = T->getAs<ReferenceType>())
8376 T = RT->getPointeeType();
8377 else
8378 break;
8379 }
8380
8381 return Qualifiers::OCL_None;
8382}
8383
8384static const Type *getIntegerTypeForEnum(const EnumType *ET) {
8385 // Incomplete enum types are not treated as integer types.
8386 // FIXME: In C++, enum types are never integer types.
8387 const EnumDecl *ED = ET->getDecl()->getDefinitionOrSelf();
8388 if (ED->isComplete() && !ED->isScoped())
8389 return ED->getIntegerType().getTypePtr();
8390 return nullptr;
8391}
8392
8393/// getIntegerTypeOrder - Returns the highest ranked integer type:
8394/// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If
8395/// LHS < RHS, return -1.
8396int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const {
8397 const Type *LHSC = getCanonicalType(T: LHS).getTypePtr();
8398 const Type *RHSC = getCanonicalType(T: RHS).getTypePtr();
8399
8400 // Unwrap enums to their underlying type.
8401 if (const auto *ET = dyn_cast<EnumType>(Val: LHSC))
8402 LHSC = getIntegerTypeForEnum(ET);
8403 if (const auto *ET = dyn_cast<EnumType>(Val: RHSC))
8404 RHSC = getIntegerTypeForEnum(ET);
8405
8406 if (LHSC == RHSC) return 0;
8407
8408 bool LHSUnsigned = LHSC->isUnsignedIntegerType();
8409 bool RHSUnsigned = RHSC->isUnsignedIntegerType();
8410
8411 unsigned LHSRank = getIntegerRank(T: LHSC);
8412 unsigned RHSRank = getIntegerRank(T: RHSC);
8413
8414 if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned.
8415 if (LHSRank == RHSRank) return 0;
8416 return LHSRank > RHSRank ? 1 : -1;
8417 }
8418
8419 // Otherwise, the LHS is signed and the RHS is unsigned or visa versa.
8420 if (LHSUnsigned) {
8421 // If the unsigned [LHS] type is larger, return it.
8422 if (LHSRank >= RHSRank)
8423 return 1;
8424
8425 // If the signed type can represent all values of the unsigned type, it
8426 // wins. Because we are dealing with 2's complement and types that are
8427 // powers of two larger than each other, this is always safe.
8428 return -1;
8429 }
8430
8431 // If the unsigned [RHS] type is larger, return it.
8432 if (RHSRank >= LHSRank)
8433 return -1;
8434
8435 // If the signed type can represent all values of the unsigned type, it
8436 // wins. Because we are dealing with 2's complement and types that are
8437 // powers of two larger than each other, this is always safe.
8438 return 1;
8439}
8440
8441TypedefDecl *ASTContext::getCFConstantStringDecl() const {
8442 if (CFConstantStringTypeDecl)
8443 return CFConstantStringTypeDecl;
8444
8445 assert(!CFConstantStringTagDecl &&
8446 "tag and typedef should be initialized together");
8447 CFConstantStringTagDecl = buildImplicitRecord(Name: "__NSConstantString_tag");
8448 CFConstantStringTagDecl->startDefinition();
8449
8450 struct {
8451 QualType Type;
8452 const char *Name;
8453 } Fields[5];
8454 unsigned Count = 0;
8455
8456 /// Objective-C ABI
8457 ///
8458 /// typedef struct __NSConstantString_tag {
8459 /// const int *isa;
8460 /// int flags;
8461 /// const char *str;
8462 /// long length;
8463 /// } __NSConstantString;
8464 ///
8465 /// Swift ABI (4.1, 4.2)
8466 ///
8467 /// typedef struct __NSConstantString_tag {
8468 /// uintptr_t _cfisa;
8469 /// uintptr_t _swift_rc;
8470 /// _Atomic(uint64_t) _cfinfoa;
8471 /// const char *_ptr;
8472 /// uint32_t _length;
8473 /// } __NSConstantString;
8474 ///
8475 /// Swift ABI (5.0)
8476 ///
8477 /// typedef struct __NSConstantString_tag {
8478 /// uintptr_t _cfisa;
8479 /// uintptr_t _swift_rc;
8480 /// _Atomic(uint64_t) _cfinfoa;
8481 /// const char *_ptr;
8482 /// uintptr_t _length;
8483 /// } __NSConstantString;
8484
8485 const auto CFRuntime = getLangOpts().CFRuntime;
8486 if (static_cast<unsigned>(CFRuntime) <
8487 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift)) {
8488 Fields[Count++] = { .Type: getPointerType(T: IntTy.withConst()), .Name: "isa" };
8489 Fields[Count++] = { .Type: IntTy, .Name: "flags" };
8490 Fields[Count++] = { .Type: getPointerType(T: CharTy.withConst()), .Name: "str" };
8491 Fields[Count++] = { .Type: LongTy, .Name: "length" };
8492 } else {
8493 Fields[Count++] = { .Type: getUIntPtrType(), .Name: "_cfisa" };
8494 Fields[Count++] = { .Type: getUIntPtrType(), .Name: "_swift_rc" };
8495 Fields[Count++] = { .Type: getFromTargetType(Type: Target->getUInt64Type()), .Name: "_swift_rc" };
8496 Fields[Count++] = { .Type: getPointerType(T: CharTy.withConst()), .Name: "_ptr" };
8497 if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 ||
8498 CFRuntime == LangOptions::CoreFoundationABI::Swift4_2)
8499 Fields[Count++] = { .Type: IntTy, .Name: "_ptr" };
8500 else
8501 Fields[Count++] = { .Type: getUIntPtrType(), .Name: "_ptr" };
8502 }
8503
8504 // Create fields
8505 for (unsigned i = 0; i < Count; ++i) {
8506 FieldDecl *Field =
8507 FieldDecl::Create(C: *this, DC: CFConstantStringTagDecl, StartLoc: SourceLocation(),
8508 IdLoc: SourceLocation(), Id: &Idents.get(Name: Fields[i].Name),
8509 T: Fields[i].Type, /*TInfo=*/nullptr,
8510 /*BitWidth=*/BW: nullptr, /*Mutable=*/false, InitStyle: ICIS_NoInit);
8511 Field->setAccess(AS_public);
8512 CFConstantStringTagDecl->addDecl(D: Field);
8513 }
8514
8515 CFConstantStringTagDecl->completeDefinition();
8516 // This type is designed to be compatible with NSConstantString, but cannot
8517 // use the same name, since NSConstantString is an interface.
8518 CanQualType tagType = getCanonicalTagType(TD: CFConstantStringTagDecl);
8519 CFConstantStringTypeDecl =
8520 buildImplicitTypedef(T: tagType, Name: "__NSConstantString");
8521
8522 return CFConstantStringTypeDecl;
8523}
8524
8525RecordDecl *ASTContext::getCFConstantStringTagDecl() const {
8526 if (!CFConstantStringTagDecl)
8527 getCFConstantStringDecl(); // Build the tag and the typedef.
8528 return CFConstantStringTagDecl;
8529}
8530
8531// getCFConstantStringType - Return the type used for constant CFStrings.
8532QualType ASTContext::getCFConstantStringType() const {
8533 return getTypedefType(Keyword: ElaboratedTypeKeyword::None, /*Qualifier=*/std::nullopt,
8534 Decl: getCFConstantStringDecl());
8535}
8536
8537QualType ASTContext::getObjCSuperType() const {
8538 if (ObjCSuperType.isNull()) {
8539 RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord(Name: "objc_super");
8540 getTranslationUnitDecl()->addDecl(D: ObjCSuperTypeDecl);
8541 ObjCSuperType = getCanonicalTagType(TD: ObjCSuperTypeDecl);
8542 }
8543 return ObjCSuperType;
8544}
8545
8546void ASTContext::setCFConstantStringType(QualType T) {
8547 const auto *TT = T->castAs<TypedefType>();
8548 CFConstantStringTypeDecl = cast<TypedefDecl>(Val: TT->getDecl());
8549 CFConstantStringTagDecl = TT->castAsRecordDecl();
8550}
8551
8552QualType ASTContext::getBlockDescriptorType() const {
8553 if (BlockDescriptorType)
8554 return getCanonicalTagType(TD: BlockDescriptorType);
8555
8556 RecordDecl *RD;
8557 // FIXME: Needs the FlagAppleBlock bit.
8558 RD = buildImplicitRecord(Name: "__block_descriptor");
8559 RD->startDefinition();
8560
8561 QualType FieldTypes[] = {
8562 UnsignedLongTy,
8563 UnsignedLongTy,
8564 };
8565
8566 static const char *const FieldNames[] = {
8567 "reserved",
8568 "Size"
8569 };
8570
8571 for (size_t i = 0; i < 2; ++i) {
8572 FieldDecl *Field = FieldDecl::Create(
8573 C: *this, DC: RD, StartLoc: SourceLocation(), IdLoc: SourceLocation(),
8574 Id: &Idents.get(Name: FieldNames[i]), T: FieldTypes[i], /*TInfo=*/nullptr,
8575 /*BitWidth=*/BW: nullptr, /*Mutable=*/false, InitStyle: ICIS_NoInit);
8576 Field->setAccess(AS_public);
8577 RD->addDecl(D: Field);
8578 }
8579
8580 RD->completeDefinition();
8581
8582 BlockDescriptorType = RD;
8583
8584 return getCanonicalTagType(TD: BlockDescriptorType);
8585}
8586
8587QualType ASTContext::getBlockDescriptorExtendedType() const {
8588 if (BlockDescriptorExtendedType)
8589 return getCanonicalTagType(TD: BlockDescriptorExtendedType);
8590
8591 RecordDecl *RD;
8592 // FIXME: Needs the FlagAppleBlock bit.
8593 RD = buildImplicitRecord(Name: "__block_descriptor_withcopydispose");
8594 RD->startDefinition();
8595
8596 QualType FieldTypes[] = {
8597 UnsignedLongTy,
8598 UnsignedLongTy,
8599 getPointerType(T: VoidPtrTy),
8600 getPointerType(T: VoidPtrTy)
8601 };
8602
8603 static const char *const FieldNames[] = {
8604 "reserved",
8605 "Size",
8606 "CopyFuncPtr",
8607 "DestroyFuncPtr"
8608 };
8609
8610 for (size_t i = 0; i < 4; ++i) {
8611 FieldDecl *Field = FieldDecl::Create(
8612 C: *this, DC: RD, StartLoc: SourceLocation(), IdLoc: SourceLocation(),
8613 Id: &Idents.get(Name: FieldNames[i]), T: FieldTypes[i], /*TInfo=*/nullptr,
8614 /*BitWidth=*/BW: nullptr,
8615 /*Mutable=*/false, InitStyle: ICIS_NoInit);
8616 Field->setAccess(AS_public);
8617 RD->addDecl(D: Field);
8618 }
8619
8620 RD->completeDefinition();
8621
8622 BlockDescriptorExtendedType = RD;
8623 return getCanonicalTagType(TD: BlockDescriptorExtendedType);
8624}
8625
8626OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const {
8627 const auto *BT = dyn_cast<BuiltinType>(Val: T);
8628
8629 if (!BT) {
8630 if (isa<PipeType>(Val: T))
8631 return OCLTK_Pipe;
8632
8633 return OCLTK_Default;
8634 }
8635
8636 switch (BT->getKind()) {
8637#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
8638 case BuiltinType::Id: \
8639 return OCLTK_Image;
8640#include "clang/Basic/OpenCLImageTypes.def"
8641
8642 case BuiltinType::OCLClkEvent:
8643 return OCLTK_ClkEvent;
8644
8645 case BuiltinType::OCLEvent:
8646 return OCLTK_Event;
8647
8648 case BuiltinType::OCLQueue:
8649 return OCLTK_Queue;
8650
8651 case BuiltinType::OCLReserveID:
8652 return OCLTK_ReserveID;
8653
8654 case BuiltinType::OCLSampler:
8655 return OCLTK_Sampler;
8656
8657 default:
8658 return OCLTK_Default;
8659 }
8660}
8661
8662LangAS ASTContext::getOpenCLTypeAddrSpace(const Type *T) const {
8663 return Target->getOpenCLTypeAddrSpace(TK: getOpenCLTypeKind(T));
8664}
8665
8666/// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty"
8667/// requires copy/dispose. Note that this must match the logic
8668/// in buildByrefHelpers.
8669bool ASTContext::BlockRequiresCopying(QualType Ty,
8670 const VarDecl *D) {
8671 if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) {
8672 const Expr *copyExpr = getBlockVarCopyInit(VD: D).getCopyExpr();
8673 if (!copyExpr && record->hasTrivialDestructor()) return false;
8674
8675 return true;
8676 }
8677
8678 if (Ty.hasAddressDiscriminatedPointerAuth())
8679 return true;
8680
8681 // The block needs copy/destroy helpers if Ty is non-trivial to destructively
8682 // move or destroy.
8683 if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType())
8684 return true;
8685
8686 if (!Ty->isObjCRetainableType()) return false;
8687
8688 Qualifiers qs = Ty.getQualifiers();
8689
8690 // If we have lifetime, that dominates.
8691 if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) {
8692 switch (lifetime) {
8693 case Qualifiers::OCL_None: llvm_unreachable("impossible");
8694
8695 // These are just bits as far as the runtime is concerned.
8696 case Qualifiers::OCL_ExplicitNone:
8697 case Qualifiers::OCL_Autoreleasing:
8698 return false;
8699
8700 // These cases should have been taken care of when checking the type's
8701 // non-triviality.
8702 case Qualifiers::OCL_Weak:
8703 case Qualifiers::OCL_Strong:
8704 llvm_unreachable("impossible");
8705 }
8706 llvm_unreachable("fell out of lifetime switch!");
8707 }
8708 return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) ||
8709 Ty->isObjCObjectPointerType());
8710}
8711
8712bool ASTContext::getByrefLifetime(QualType Ty,
8713 Qualifiers::ObjCLifetime &LifeTime,
8714 bool &HasByrefExtendedLayout) const {
8715 if (!getLangOpts().ObjC ||
8716 getLangOpts().getGC() != LangOptions::NonGC)
8717 return false;
8718
8719 HasByrefExtendedLayout = false;
8720 if (Ty->isRecordType()) {
8721 HasByrefExtendedLayout = true;
8722 LifeTime = Qualifiers::OCL_None;
8723 } else if ((LifeTime = Ty.getObjCLifetime())) {
8724 // Honor the ARC qualifiers.
8725 } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) {
8726 // The MRR rule.
8727 LifeTime = Qualifiers::OCL_ExplicitNone;
8728 } else {
8729 LifeTime = Qualifiers::OCL_None;
8730 }
8731 return true;
8732}
8733
8734CanQualType ASTContext::getNSUIntegerType() const {
8735 assert(Target && "Expected target to be initialized");
8736 const llvm::Triple &T = Target->getTriple();
8737 // Windows is LLP64 rather than LP64
8738 if (T.isOSWindows() && T.isArch64Bit())
8739 return UnsignedLongLongTy;
8740 return UnsignedLongTy;
8741}
8742
8743CanQualType ASTContext::getNSIntegerType() const {
8744 assert(Target && "Expected target to be initialized");
8745 const llvm::Triple &T = Target->getTriple();
8746 // Windows is LLP64 rather than LP64
8747 if (T.isOSWindows() && T.isArch64Bit())
8748 return LongLongTy;
8749 return LongTy;
8750}
8751
8752TypedefDecl *ASTContext::getObjCInstanceTypeDecl() {
8753 if (!ObjCInstanceTypeDecl)
8754 ObjCInstanceTypeDecl =
8755 buildImplicitTypedef(T: getObjCIdType(), Name: "instancetype");
8756 return ObjCInstanceTypeDecl;
8757}
8758
8759// This returns true if a type has been typedefed to BOOL:
8760// typedef <type> BOOL;
8761static bool isTypeTypedefedAsBOOL(QualType T) {
8762 if (const auto *TT = dyn_cast<TypedefType>(Val&: T))
8763 if (IdentifierInfo *II = TT->getDecl()->getIdentifier())
8764 return II->isStr(Str: "BOOL");
8765
8766 return false;
8767}
8768
8769/// getObjCEncodingTypeSize returns size of type for objective-c encoding
8770/// purpose.
8771CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const {
8772 if (!type->isIncompleteArrayType() && type->isIncompleteType())
8773 return CharUnits::Zero();
8774
8775 CharUnits sz = getTypeSizeInChars(T: type);
8776
8777 // Make all integer and enum types at least as large as an int
8778 if (sz.isPositive() && type->isIntegralOrEnumerationType())
8779 sz = std::max(a: sz, b: getTypeSizeInChars(T: IntTy));
8780 // Treat arrays as pointers, since that's how they're passed in.
8781 else if (type->isArrayType())
8782 sz = getTypeSizeInChars(T: VoidPtrTy);
8783 return sz;
8784}
8785
8786bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const {
8787 return getTargetInfo().getCXXABI().isMicrosoft() &&
8788 VD->isStaticDataMember() &&
8789 VD->getType()->isIntegralOrEnumerationType() &&
8790 !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit();
8791}
8792
8793ASTContext::InlineVariableDefinitionKind
8794ASTContext::getInlineVariableDefinitionKind(const VarDecl *VD) const {
8795 if (!VD->isInline())
8796 return InlineVariableDefinitionKind::None;
8797
8798 // In almost all cases, it's a weak definition.
8799 auto *First = VD->getFirstDecl();
8800 if (First->isInlineSpecified() || !First->isStaticDataMember())
8801 return InlineVariableDefinitionKind::Weak;
8802
8803 // If there's a file-context declaration in this translation unit, it's a
8804 // non-discardable definition.
8805 for (auto *D : VD->redecls())
8806 if (D->getLexicalDeclContext()->isFileContext() &&
8807 !D->isInlineSpecified() && (D->isConstexpr() || First->isConstexpr()))
8808 return InlineVariableDefinitionKind::Strong;
8809
8810 // If we've not seen one yet, we don't know.
8811 return InlineVariableDefinitionKind::WeakUnknown;
8812}
8813
8814static std::string charUnitsToString(const CharUnits &CU) {
8815 return llvm::itostr(X: CU.getQuantity());
8816}
8817
8818/// getObjCEncodingForBlock - Return the encoded type for this block
8819/// declaration.
8820std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const {
8821 std::string S;
8822
8823 const BlockDecl *Decl = Expr->getBlockDecl();
8824 QualType BlockTy =
8825 Expr->getType()->castAs<BlockPointerType>()->getPointeeType();
8826 QualType BlockReturnTy = BlockTy->castAs<FunctionType>()->getReturnType();
8827 // Encode result type.
8828 if (getLangOpts().EncodeExtendedBlockSig)
8829 getObjCEncodingForMethodParameter(QT: Decl::OBJC_TQ_None, T: BlockReturnTy, S,
8830 Extended: true /*Extended*/);
8831 else
8832 getObjCEncodingForType(T: BlockReturnTy, S);
8833 // Compute size of all parameters.
8834 // Start with computing size of a pointer in number of bytes.
8835 // FIXME: There might(should) be a better way of doing this computation!
8836 CharUnits PtrSize = getTypeSizeInChars(T: VoidPtrTy);
8837 CharUnits ParmOffset = PtrSize;
8838 for (auto *PI : Decl->parameters()) {
8839 QualType PType = PI->getType();
8840 CharUnits sz = getObjCEncodingTypeSize(type: PType);
8841 if (sz.isZero())
8842 continue;
8843 assert(sz.isPositive() && "BlockExpr - Incomplete param type");
8844 ParmOffset += sz;
8845 }
8846 // Size of the argument frame
8847 S += charUnitsToString(CU: ParmOffset);
8848 // Block pointer and offset.
8849 S += "@?0";
8850
8851 // Argument types.
8852 ParmOffset = PtrSize;
8853 for (auto *PVDecl : Decl->parameters()) {
8854 QualType PType = PVDecl->getOriginalType();
8855 if (const auto *AT =
8856 dyn_cast<ArrayType>(Val: PType->getCanonicalTypeInternal())) {
8857 // Use array's original type only if it has known number of
8858 // elements.
8859 if (!isa<ConstantArrayType>(Val: AT))
8860 PType = PVDecl->getType();
8861 } else if (PType->isFunctionType())
8862 PType = PVDecl->getType();
8863 if (getLangOpts().EncodeExtendedBlockSig)
8864 getObjCEncodingForMethodParameter(QT: Decl::OBJC_TQ_None, T: PType,
8865 S, Extended: true /*Extended*/);
8866 else
8867 getObjCEncodingForType(T: PType, S);
8868 S += charUnitsToString(CU: ParmOffset);
8869 ParmOffset += getObjCEncodingTypeSize(type: PType);
8870 }
8871
8872 return S;
8873}
8874
8875std::string
8876ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const {
8877 std::string S;
8878 // Encode result type.
8879 getObjCEncodingForType(T: Decl->getReturnType(), S);
8880 CharUnits ParmOffset;
8881 // Compute size of all parameters.
8882 for (auto *PI : Decl->parameters()) {
8883 QualType PType = PI->getType();
8884 CharUnits sz = getObjCEncodingTypeSize(type: PType);
8885 if (sz.isZero())
8886 continue;
8887
8888 assert(sz.isPositive() &&
8889 "getObjCEncodingForFunctionDecl - Incomplete param type");
8890 ParmOffset += sz;
8891 }
8892 S += charUnitsToString(CU: ParmOffset);
8893 ParmOffset = CharUnits::Zero();
8894
8895 // Argument types.
8896 for (auto *PVDecl : Decl->parameters()) {
8897 QualType PType = PVDecl->getOriginalType();
8898 if (const auto *AT =
8899 dyn_cast<ArrayType>(Val: PType->getCanonicalTypeInternal())) {
8900 // Use array's original type only if it has known number of
8901 // elements.
8902 if (!isa<ConstantArrayType>(Val: AT))
8903 PType = PVDecl->getType();
8904 } else if (PType->isFunctionType())
8905 PType = PVDecl->getType();
8906 getObjCEncodingForType(T: PType, S);
8907 S += charUnitsToString(CU: ParmOffset);
8908 ParmOffset += getObjCEncodingTypeSize(type: PType);
8909 }
8910
8911 return S;
8912}
8913
8914/// getObjCEncodingForMethodParameter - Return the encoded type for a single
8915/// method parameter or return type. If Extended, include class names and
8916/// block object types.
8917void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT,
8918 QualType T, std::string& S,
8919 bool Extended) const {
8920 // Encode type qualifier, 'in', 'inout', etc. for the parameter.
8921 getObjCEncodingForTypeQualifier(QT, S);
8922 // Encode parameter type.
8923 ObjCEncOptions Options = ObjCEncOptions()
8924 .setExpandPointedToStructures()
8925 .setExpandStructures()
8926 .setIsOutermostType();
8927 if (Extended)
8928 Options.setEncodeBlockParameters().setEncodeClassNames();
8929 getObjCEncodingForTypeImpl(t: T, S, Options, /*Field=*/nullptr);
8930}
8931
8932/// getObjCEncodingForMethodDecl - Return the encoded type for this method
8933/// declaration.
8934std::string ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl,
8935 bool Extended) const {
8936 // FIXME: This is not very efficient.
8937 // Encode return type.
8938 std::string S;
8939 getObjCEncodingForMethodParameter(QT: Decl->getObjCDeclQualifier(),
8940 T: Decl->getReturnType(), S, Extended);
8941 // Compute size of all parameters.
8942 // Start with computing size of a pointer in number of bytes.
8943 // FIXME: There might(should) be a better way of doing this computation!
8944 CharUnits PtrSize = getTypeSizeInChars(T: VoidPtrTy);
8945 // The first two arguments (self and _cmd) are pointers; account for
8946 // their size.
8947 CharUnits ParmOffset = 2 * PtrSize;
8948 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(),
8949 E = Decl->sel_param_end(); PI != E; ++PI) {
8950 QualType PType = (*PI)->getType();
8951 CharUnits sz = getObjCEncodingTypeSize(type: PType);
8952 if (sz.isZero())
8953 continue;
8954
8955 assert(sz.isPositive() &&
8956 "getObjCEncodingForMethodDecl - Incomplete param type");
8957 ParmOffset += sz;
8958 }
8959 S += charUnitsToString(CU: ParmOffset);
8960 S += "@0:";
8961 S += charUnitsToString(CU: PtrSize);
8962
8963 // Argument types.
8964 ParmOffset = 2 * PtrSize;
8965 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(),
8966 E = Decl->sel_param_end(); PI != E; ++PI) {
8967 const ParmVarDecl *PVDecl = *PI;
8968 QualType PType = PVDecl->getOriginalType();
8969 if (const auto *AT =
8970 dyn_cast<ArrayType>(Val: PType->getCanonicalTypeInternal())) {
8971 // Use array's original type only if it has known number of
8972 // elements.
8973 if (!isa<ConstantArrayType>(Val: AT))
8974 PType = PVDecl->getType();
8975 } else if (PType->isFunctionType())
8976 PType = PVDecl->getType();
8977 getObjCEncodingForMethodParameter(QT: PVDecl->getObjCDeclQualifier(),
8978 T: PType, S, Extended);
8979 S += charUnitsToString(CU: ParmOffset);
8980 ParmOffset += getObjCEncodingTypeSize(type: PType);
8981 }
8982
8983 return S;
8984}
8985
8986ObjCPropertyImplDecl *
8987ASTContext::getObjCPropertyImplDeclForPropertyDecl(
8988 const ObjCPropertyDecl *PD,
8989 const Decl *Container) const {
8990 if (!Container)
8991 return nullptr;
8992 if (const auto *CID = dyn_cast<ObjCCategoryImplDecl>(Val: Container)) {
8993 for (auto *PID : CID->property_impls())
8994 if (PID->getPropertyDecl() == PD)
8995 return PID;
8996 } else {
8997 const auto *OID = cast<ObjCImplementationDecl>(Val: Container);
8998 for (auto *PID : OID->property_impls())
8999 if (PID->getPropertyDecl() == PD)
9000 return PID;
9001 }
9002 return nullptr;
9003}
9004
9005/// getObjCEncodingForPropertyDecl - Return the encoded type for this
9006/// property declaration. If non-NULL, Container must be either an
9007/// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be
9008/// NULL when getting encodings for protocol properties.
9009/// Property attributes are stored as a comma-delimited C string. The simple
9010/// attributes readonly and bycopy are encoded as single characters. The
9011/// parametrized attributes, getter=name, setter=name, and ivar=name, are
9012/// encoded as single characters, followed by an identifier. Property types
9013/// are also encoded as a parametrized attribute. The characters used to encode
9014/// these attributes are defined by the following enumeration:
9015/// @code
9016/// enum PropertyAttributes {
9017/// kPropertyReadOnly = 'R', // property is read-only.
9018/// kPropertyBycopy = 'C', // property is a copy of the value last assigned
9019/// kPropertyByref = '&', // property is a reference to the value last assigned
9020/// kPropertyDynamic = 'D', // property is dynamic
9021/// kPropertyGetter = 'G', // followed by getter selector name
9022/// kPropertySetter = 'S', // followed by setter selector name
9023/// kPropertyInstanceVariable = 'V' // followed by instance variable name
9024/// kPropertyType = 'T' // followed by old-style type encoding.
9025/// kPropertyWeak = 'W' // 'weak' property
9026/// kPropertyStrong = 'P' // property GC'able
9027/// kPropertyNonAtomic = 'N' // property non-atomic
9028/// kPropertyOptional = '?' // property optional
9029/// };
9030/// @endcode
9031std::string
9032ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
9033 const Decl *Container) const {
9034 // Collect information from the property implementation decl(s).
9035 bool Dynamic = false;
9036 ObjCPropertyImplDecl *SynthesizePID = nullptr;
9037
9038 if (ObjCPropertyImplDecl *PropertyImpDecl =
9039 getObjCPropertyImplDeclForPropertyDecl(PD, Container)) {
9040 if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
9041 Dynamic = true;
9042 else
9043 SynthesizePID = PropertyImpDecl;
9044 }
9045
9046 // FIXME: This is not very efficient.
9047 std::string S = "T";
9048
9049 // Encode result type.
9050 // GCC has some special rules regarding encoding of properties which
9051 // closely resembles encoding of ivars.
9052 getObjCEncodingForPropertyType(T: PD->getType(), S);
9053
9054 if (PD->isOptional())
9055 S += ",?";
9056
9057 if (PD->isReadOnly()) {
9058 S += ",R";
9059 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy)
9060 S += ",C";
9061 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain)
9062 S += ",&";
9063 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak)
9064 S += ",W";
9065 } else {
9066 switch (PD->getSetterKind()) {
9067 case ObjCPropertyDecl::Assign: break;
9068 case ObjCPropertyDecl::Copy: S += ",C"; break;
9069 case ObjCPropertyDecl::Retain: S += ",&"; break;
9070 case ObjCPropertyDecl::Weak: S += ",W"; break;
9071 }
9072 }
9073
9074 // It really isn't clear at all what this means, since properties
9075 // are "dynamic by default".
9076 if (Dynamic)
9077 S += ",D";
9078
9079 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic)
9080 S += ",N";
9081
9082 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) {
9083 S += ",G";
9084 S += PD->getGetterName().getAsString();
9085 }
9086
9087 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) {
9088 S += ",S";
9089 S += PD->getSetterName().getAsString();
9090 }
9091
9092 if (SynthesizePID) {
9093 const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl();
9094 S += ",V";
9095 S += OID->getNameAsString();
9096 }
9097
9098 // FIXME: OBJCGC: weak & strong
9099 return S;
9100}
9101
9102/// getLegacyIntegralTypeEncoding -
9103/// Another legacy compatibility encoding: 32-bit longs are encoded as
9104/// 'l' or 'L' , but not always. For typedefs, we need to use
9105/// 'i' or 'I' instead if encoding a struct field, or a pointer!
9106void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const {
9107 if (PointeeTy->getAs<TypedefType>()) {
9108 if (const auto *BT = PointeeTy->getAs<BuiltinType>()) {
9109 if (BT->getKind() == BuiltinType::ULong && getIntWidth(T: PointeeTy) == 32)
9110 PointeeTy = UnsignedIntTy;
9111 else
9112 if (BT->getKind() == BuiltinType::Long && getIntWidth(T: PointeeTy) == 32)
9113 PointeeTy = IntTy;
9114 }
9115 }
9116}
9117
9118void ASTContext::getObjCEncodingForType(QualType T, std::string& S,
9119 const FieldDecl *Field,
9120 QualType *NotEncodedT) const {
9121 // We follow the behavior of gcc, expanding structures which are
9122 // directly pointed to, and expanding embedded structures. Note that
9123 // these rules are sufficient to prevent recursive encoding of the
9124 // same type.
9125 getObjCEncodingForTypeImpl(t: T, S,
9126 Options: ObjCEncOptions()
9127 .setExpandPointedToStructures()
9128 .setExpandStructures()
9129 .setIsOutermostType(),
9130 Field, NotEncodedT);
9131}
9132
9133void ASTContext::getObjCEncodingForPropertyType(QualType T,
9134 std::string& S) const {
9135 // Encode result type.
9136 // GCC has some special rules regarding encoding of properties which
9137 // closely resembles encoding of ivars.
9138 getObjCEncodingForTypeImpl(t: T, S,
9139 Options: ObjCEncOptions()
9140 .setExpandPointedToStructures()
9141 .setExpandStructures()
9142 .setIsOutermostType()
9143 .setEncodingProperty(),
9144 /*Field=*/nullptr);
9145}
9146
9147static char getObjCEncodingForPrimitiveType(const ASTContext *C,
9148 const BuiltinType *BT) {
9149 BuiltinType::Kind kind = BT->getKind();
9150 switch (kind) {
9151 case BuiltinType::Void: return 'v';
9152 case BuiltinType::Bool: return 'B';
9153 case BuiltinType::Char8:
9154 case BuiltinType::Char_U:
9155 case BuiltinType::UChar: return 'C';
9156 case BuiltinType::Char16:
9157 case BuiltinType::UShort: return 'S';
9158 case BuiltinType::Char32:
9159 case BuiltinType::UInt: return 'I';
9160 case BuiltinType::ULong:
9161 return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q';
9162 case BuiltinType::UInt128: return 'T';
9163 case BuiltinType::ULongLong: return 'Q';
9164 case BuiltinType::Char_S:
9165 case BuiltinType::SChar: return 'c';
9166 case BuiltinType::Short: return 's';
9167 case BuiltinType::WChar_S:
9168 case BuiltinType::WChar_U:
9169 case BuiltinType::Int: return 'i';
9170 case BuiltinType::Long:
9171 return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q';
9172 case BuiltinType::LongLong: return 'q';
9173 case BuiltinType::Int128: return 't';
9174 case BuiltinType::Float: return 'f';
9175 case BuiltinType::Double: return 'd';
9176 case BuiltinType::LongDouble: return 'D';
9177 case BuiltinType::NullPtr: return '*'; // like char*
9178
9179 case BuiltinType::BFloat16:
9180 case BuiltinType::Float16:
9181 case BuiltinType::Float128:
9182 case BuiltinType::Ibm128:
9183 case BuiltinType::Half:
9184 case BuiltinType::ShortAccum:
9185 case BuiltinType::Accum:
9186 case BuiltinType::LongAccum:
9187 case BuiltinType::UShortAccum:
9188 case BuiltinType::UAccum:
9189 case BuiltinType::ULongAccum:
9190 case BuiltinType::ShortFract:
9191 case BuiltinType::Fract:
9192 case BuiltinType::LongFract:
9193 case BuiltinType::UShortFract:
9194 case BuiltinType::UFract:
9195 case BuiltinType::ULongFract:
9196 case BuiltinType::SatShortAccum:
9197 case BuiltinType::SatAccum:
9198 case BuiltinType::SatLongAccum:
9199 case BuiltinType::SatUShortAccum:
9200 case BuiltinType::SatUAccum:
9201 case BuiltinType::SatULongAccum:
9202 case BuiltinType::SatShortFract:
9203 case BuiltinType::SatFract:
9204 case BuiltinType::SatLongFract:
9205 case BuiltinType::SatUShortFract:
9206 case BuiltinType::SatUFract:
9207 case BuiltinType::SatULongFract:
9208 // FIXME: potentially need @encodes for these!
9209 return ' ';
9210
9211#define SVE_TYPE(Name, Id, SingletonId) \
9212 case BuiltinType::Id:
9213#include "clang/Basic/AArch64ACLETypes.def"
9214#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
9215#include "clang/Basic/RISCVVTypes.def"
9216#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
9217#include "clang/Basic/WebAssemblyReferenceTypes.def"
9218#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) case BuiltinType::Id:
9219#include "clang/Basic/AMDGPUTypes.def"
9220 {
9221 DiagnosticsEngine &Diags = C->getDiagnostics();
9222 Diags.Report(DiagID: diag::err_unsupported_objc_primitive_encoding)
9223 << QualType(BT, 0);
9224 return ' ';
9225 }
9226
9227 case BuiltinType::ObjCId:
9228 case BuiltinType::ObjCClass:
9229 case BuiltinType::ObjCSel:
9230 llvm_unreachable("@encoding ObjC primitive type");
9231
9232 // OpenCL and placeholder types don't need @encodings.
9233#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
9234 case BuiltinType::Id:
9235#include "clang/Basic/OpenCLImageTypes.def"
9236#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
9237 case BuiltinType::Id:
9238#include "clang/Basic/OpenCLExtensionTypes.def"
9239 case BuiltinType::OCLEvent:
9240 case BuiltinType::OCLClkEvent:
9241 case BuiltinType::OCLQueue:
9242 case BuiltinType::OCLReserveID:
9243 case BuiltinType::OCLSampler:
9244 case BuiltinType::Dependent:
9245#define PPC_VECTOR_TYPE(Name, Id, Size) \
9246 case BuiltinType::Id:
9247#include "clang/Basic/PPCTypes.def"
9248#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
9249#include "clang/Basic/HLSLIntangibleTypes.def"
9250#define BUILTIN_TYPE(KIND, ID)
9251#define PLACEHOLDER_TYPE(KIND, ID) \
9252 case BuiltinType::KIND:
9253#include "clang/AST/BuiltinTypes.def"
9254 llvm_unreachable("invalid builtin type for @encode");
9255 }
9256 llvm_unreachable("invalid BuiltinType::Kind value");
9257}
9258
9259static char ObjCEncodingForEnumDecl(const ASTContext *C, const EnumDecl *ED) {
9260 EnumDecl *Enum = ED->getDefinitionOrSelf();
9261
9262 // The encoding of an non-fixed enum type is always 'i', regardless of size.
9263 if (!Enum->isFixed())
9264 return 'i';
9265
9266 // The encoding of a fixed enum type matches its fixed underlying type.
9267 const auto *BT = Enum->getIntegerType()->castAs<BuiltinType>();
9268 return getObjCEncodingForPrimitiveType(C, BT);
9269}
9270
9271static void EncodeBitField(const ASTContext *Ctx, std::string& S,
9272 QualType T, const FieldDecl *FD) {
9273 assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl");
9274 S += 'b';
9275 // The NeXT runtime encodes bit fields as b followed by the number of bits.
9276 // The GNU runtime requires more information; bitfields are encoded as b,
9277 // then the offset (in bits) of the first element, then the type of the
9278 // bitfield, then the size in bits. For example, in this structure:
9279 //
9280 // struct
9281 // {
9282 // int integer;
9283 // int flags:2;
9284 // };
9285 // On a 32-bit system, the encoding for flags would be b2 for the NeXT
9286 // runtime, but b32i2 for the GNU runtime. The reason for this extra
9287 // information is not especially sensible, but we're stuck with it for
9288 // compatibility with GCC, although providing it breaks anything that
9289 // actually uses runtime introspection and wants to work on both runtimes...
9290 if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) {
9291 uint64_t Offset;
9292
9293 if (const auto *IVD = dyn_cast<ObjCIvarDecl>(Val: FD)) {
9294 Offset = Ctx->lookupFieldBitOffset(OID: IVD->getContainingInterface(), Ivar: IVD);
9295 } else {
9296 const RecordDecl *RD = FD->getParent();
9297 const ASTRecordLayout &RL = Ctx->getASTRecordLayout(D: RD);
9298 Offset = RL.getFieldOffset(FieldNo: FD->getFieldIndex());
9299 }
9300
9301 S += llvm::utostr(X: Offset);
9302
9303 if (const auto *ET = T->getAsCanonical<EnumType>())
9304 S += ObjCEncodingForEnumDecl(C: Ctx, ED: ET->getDecl());
9305 else {
9306 const auto *BT = T->castAs<BuiltinType>();
9307 S += getObjCEncodingForPrimitiveType(C: Ctx, BT);
9308 }
9309 }
9310 S += llvm::utostr(X: FD->getBitWidthValue());
9311}
9312
9313// Helper function for determining whether the encoded type string would include
9314// a template specialization type.
9315static bool hasTemplateSpecializationInEncodedString(const Type *T,
9316 bool VisitBasesAndFields) {
9317 T = T->getBaseElementTypeUnsafe();
9318
9319 if (auto *PT = T->getAs<PointerType>())
9320 return hasTemplateSpecializationInEncodedString(
9321 T: PT->getPointeeType().getTypePtr(), VisitBasesAndFields: false);
9322
9323 auto *CXXRD = T->getAsCXXRecordDecl();
9324
9325 if (!CXXRD)
9326 return false;
9327
9328 if (isa<ClassTemplateSpecializationDecl>(Val: CXXRD))
9329 return true;
9330
9331 if (!CXXRD->hasDefinition() || !VisitBasesAndFields)
9332 return false;
9333
9334 for (const auto &B : CXXRD->bases())
9335 if (hasTemplateSpecializationInEncodedString(T: B.getType().getTypePtr(),
9336 VisitBasesAndFields: true))
9337 return true;
9338
9339 for (auto *FD : CXXRD->fields())
9340 if (hasTemplateSpecializationInEncodedString(T: FD->getType().getTypePtr(),
9341 VisitBasesAndFields: true))
9342 return true;
9343
9344 return false;
9345}
9346
9347// FIXME: Use SmallString for accumulating string.
9348void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
9349 const ObjCEncOptions Options,
9350 const FieldDecl *FD,
9351 QualType *NotEncodedT) const {
9352 CanQualType CT = getCanonicalType(T);
9353 switch (CT->getTypeClass()) {
9354 case Type::Builtin:
9355 case Type::Enum:
9356 if (FD && FD->isBitField())
9357 return EncodeBitField(Ctx: this, S, T, FD);
9358 if (const auto *BT = dyn_cast<BuiltinType>(Val&: CT))
9359 S += getObjCEncodingForPrimitiveType(C: this, BT);
9360 else
9361 S += ObjCEncodingForEnumDecl(C: this, ED: cast<EnumType>(Val&: CT)->getDecl());
9362 return;
9363
9364 case Type::Complex:
9365 S += 'j';
9366 getObjCEncodingForTypeImpl(T: T->castAs<ComplexType>()->getElementType(), S,
9367 Options: ObjCEncOptions(),
9368 /*Field=*/FD: nullptr);
9369 return;
9370
9371 case Type::Atomic:
9372 S += 'A';
9373 getObjCEncodingForTypeImpl(T: T->castAs<AtomicType>()->getValueType(), S,
9374 Options: ObjCEncOptions(),
9375 /*Field=*/FD: nullptr);
9376 return;
9377
9378 // encoding for pointer or reference types.
9379 case Type::Pointer:
9380 case Type::LValueReference:
9381 case Type::RValueReference: {
9382 QualType PointeeTy;
9383 if (isa<PointerType>(Val: CT)) {
9384 const auto *PT = T->castAs<PointerType>();
9385 if (PT->isObjCSelType()) {
9386 S += ':';
9387 return;
9388 }
9389 PointeeTy = PT->getPointeeType();
9390 } else {
9391 PointeeTy = T->castAs<ReferenceType>()->getPointeeType();
9392 }
9393
9394 bool isReadOnly = false;
9395 // For historical/compatibility reasons, the read-only qualifier of the
9396 // pointee gets emitted _before_ the '^'. The read-only qualifier of
9397 // the pointer itself gets ignored, _unless_ we are looking at a typedef!
9398 // Also, do not emit the 'r' for anything but the outermost type!
9399 if (T->getAs<TypedefType>()) {
9400 if (Options.IsOutermostType() && T.isConstQualified()) {
9401 isReadOnly = true;
9402 S += 'r';
9403 }
9404 } else if (Options.IsOutermostType()) {
9405 QualType P = PointeeTy;
9406 while (auto PT = P->getAs<PointerType>())
9407 P = PT->getPointeeType();
9408 if (P.isConstQualified()) {
9409 isReadOnly = true;
9410 S += 'r';
9411 }
9412 }
9413 if (isReadOnly) {
9414 // Another legacy compatibility encoding. Some ObjC qualifier and type
9415 // combinations need to be rearranged.
9416 // Rewrite "in const" from "nr" to "rn"
9417 if (StringRef(S).ends_with(Suffix: "nr"))
9418 S.replace(i1: S.end()-2, i2: S.end(), s: "rn");
9419 }
9420
9421 if (PointeeTy->isCharType()) {
9422 // char pointer types should be encoded as '*' unless it is a
9423 // type that has been typedef'd to 'BOOL'.
9424 if (!isTypeTypedefedAsBOOL(T: PointeeTy)) {
9425 S += '*';
9426 return;
9427 }
9428 } else if (const auto *RTy = PointeeTy->getAsCanonical<RecordType>()) {
9429 const IdentifierInfo *II = RTy->getDecl()->getIdentifier();
9430 // GCC binary compat: Need to convert "struct objc_class *" to "#".
9431 if (II == &Idents.get(Name: "objc_class")) {
9432 S += '#';
9433 return;
9434 }
9435 // GCC binary compat: Need to convert "struct objc_object *" to "@".
9436 if (II == &Idents.get(Name: "objc_object")) {
9437 S += '@';
9438 return;
9439 }
9440 // If the encoded string for the class includes template names, just emit
9441 // "^v" for pointers to the class.
9442 if (getLangOpts().CPlusPlus &&
9443 (!getLangOpts().EncodeCXXClassTemplateSpec &&
9444 hasTemplateSpecializationInEncodedString(
9445 T: RTy, VisitBasesAndFields: Options.ExpandPointedToStructures()))) {
9446 S += "^v";
9447 return;
9448 }
9449 // fall through...
9450 }
9451 S += '^';
9452 getLegacyIntegralTypeEncoding(PointeeTy);
9453
9454 ObjCEncOptions NewOptions;
9455 if (Options.ExpandPointedToStructures())
9456 NewOptions.setExpandStructures();
9457 getObjCEncodingForTypeImpl(T: PointeeTy, S, Options: NewOptions,
9458 /*Field=*/FD: nullptr, NotEncodedT);
9459 return;
9460 }
9461
9462 case Type::ConstantArray:
9463 case Type::IncompleteArray:
9464 case Type::VariableArray: {
9465 const auto *AT = cast<ArrayType>(Val&: CT);
9466
9467 if (isa<IncompleteArrayType>(Val: AT) && !Options.IsStructField()) {
9468 // Incomplete arrays are encoded as a pointer to the array element.
9469 S += '^';
9470
9471 getObjCEncodingForTypeImpl(
9472 T: AT->getElementType(), S,
9473 Options: Options.keepingOnly(Mask: ObjCEncOptions().setExpandStructures()), FD);
9474 } else {
9475 S += '[';
9476
9477 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: AT))
9478 S += llvm::utostr(X: CAT->getZExtSize());
9479 else {
9480 //Variable length arrays are encoded as a regular array with 0 elements.
9481 assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) &&
9482 "Unknown array type!");
9483 S += '0';
9484 }
9485
9486 getObjCEncodingForTypeImpl(
9487 T: AT->getElementType(), S,
9488 Options: Options.keepingOnly(Mask: ObjCEncOptions().setExpandStructures()), FD,
9489 NotEncodedT);
9490 S += ']';
9491 }
9492 return;
9493 }
9494
9495 case Type::FunctionNoProto:
9496 case Type::FunctionProto:
9497 S += '?';
9498 return;
9499
9500 case Type::Record: {
9501 RecordDecl *RDecl = cast<RecordType>(Val&: CT)->getDecl();
9502 S += RDecl->isUnion() ? '(' : '{';
9503 // Anonymous structures print as '?'
9504 if (const IdentifierInfo *II = RDecl->getIdentifier()) {
9505 S += II->getName();
9506 if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(Val: RDecl)) {
9507 const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
9508 llvm::raw_string_ostream OS(S);
9509 printTemplateArgumentList(OS, Args: TemplateArgs.asArray(),
9510 Policy: getPrintingPolicy());
9511 }
9512 } else {
9513 S += '?';
9514 }
9515 if (Options.ExpandStructures()) {
9516 S += '=';
9517 if (!RDecl->isUnion()) {
9518 getObjCEncodingForStructureImpl(RD: RDecl, S, Field: FD, includeVBases: true, NotEncodedT);
9519 } else {
9520 for (const auto *Field : RDecl->fields()) {
9521 if (FD) {
9522 S += '"';
9523 S += Field->getNameAsString();
9524 S += '"';
9525 }
9526
9527 // Special case bit-fields.
9528 if (Field->isBitField()) {
9529 getObjCEncodingForTypeImpl(T: Field->getType(), S,
9530 Options: ObjCEncOptions().setExpandStructures(),
9531 FD: Field);
9532 } else {
9533 QualType qt = Field->getType();
9534 getLegacyIntegralTypeEncoding(PointeeTy&: qt);
9535 getObjCEncodingForTypeImpl(
9536 T: qt, S,
9537 Options: ObjCEncOptions().setExpandStructures().setIsStructField(), FD,
9538 NotEncodedT);
9539 }
9540 }
9541 }
9542 }
9543 S += RDecl->isUnion() ? ')' : '}';
9544 return;
9545 }
9546
9547 case Type::BlockPointer: {
9548 const auto *BT = T->castAs<BlockPointerType>();
9549 S += "@?"; // Unlike a pointer-to-function, which is "^?".
9550 if (Options.EncodeBlockParameters()) {
9551 const auto *FT = BT->getPointeeType()->castAs<FunctionType>();
9552
9553 S += '<';
9554 // Block return type
9555 getObjCEncodingForTypeImpl(T: FT->getReturnType(), S,
9556 Options: Options.forComponentType(), FD, NotEncodedT);
9557 // Block self
9558 S += "@?";
9559 // Block parameters
9560 if (const auto *FPT = dyn_cast<FunctionProtoType>(Val: FT)) {
9561 for (const auto &I : FPT->param_types())
9562 getObjCEncodingForTypeImpl(T: I, S, Options: Options.forComponentType(), FD,
9563 NotEncodedT);
9564 }
9565 S += '>';
9566 }
9567 return;
9568 }
9569
9570 case Type::ObjCObject: {
9571 // hack to match legacy encoding of *id and *Class
9572 QualType Ty = getObjCObjectPointerType(ObjectT: CT);
9573 if (Ty->isObjCIdType()) {
9574 S += "{objc_object=}";
9575 return;
9576 }
9577 else if (Ty->isObjCClassType()) {
9578 S += "{objc_class=}";
9579 return;
9580 }
9581 // TODO: Double check to make sure this intentionally falls through.
9582 [[fallthrough]];
9583 }
9584
9585 case Type::ObjCInterface: {
9586 // Ignore protocol qualifiers when mangling at this level.
9587 // @encode(class_name)
9588 ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface();
9589 S += '{';
9590 S += OI->getObjCRuntimeNameAsString();
9591 if (Options.ExpandStructures()) {
9592 S += '=';
9593 SmallVector<const ObjCIvarDecl*, 32> Ivars;
9594 DeepCollectObjCIvars(OI, leafClass: true, Ivars);
9595 for (unsigned i = 0, e = Ivars.size(); i != e; ++i) {
9596 const FieldDecl *Field = Ivars[i];
9597 if (Field->isBitField())
9598 getObjCEncodingForTypeImpl(T: Field->getType(), S,
9599 Options: ObjCEncOptions().setExpandStructures(),
9600 FD: Field);
9601 else
9602 getObjCEncodingForTypeImpl(T: Field->getType(), S,
9603 Options: ObjCEncOptions().setExpandStructures(), FD,
9604 NotEncodedT);
9605 }
9606 }
9607 S += '}';
9608 return;
9609 }
9610
9611 case Type::ObjCObjectPointer: {
9612 const auto *OPT = T->castAs<ObjCObjectPointerType>();
9613 if (OPT->isObjCIdType()) {
9614 S += '@';
9615 return;
9616 }
9617
9618 if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) {
9619 // FIXME: Consider if we need to output qualifiers for 'Class<p>'.
9620 // Since this is a binary compatibility issue, need to consult with
9621 // runtime folks. Fortunately, this is a *very* obscure construct.
9622 S += '#';
9623 return;
9624 }
9625
9626 if (OPT->isObjCQualifiedIdType()) {
9627 getObjCEncodingForTypeImpl(
9628 T: getObjCIdType(), S,
9629 Options: Options.keepingOnly(Mask: ObjCEncOptions()
9630 .setExpandPointedToStructures()
9631 .setExpandStructures()),
9632 FD);
9633 if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) {
9634 // Note that we do extended encoding of protocol qualifier list
9635 // Only when doing ivar or property encoding.
9636 S += '"';
9637 for (const auto *I : OPT->quals()) {
9638 S += '<';
9639 S += I->getObjCRuntimeNameAsString();
9640 S += '>';
9641 }
9642 S += '"';
9643 }
9644 return;
9645 }
9646
9647 S += '@';
9648 if (OPT->getInterfaceDecl() &&
9649 (FD || Options.EncodingProperty() || Options.EncodeClassNames())) {
9650 S += '"';
9651 S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString();
9652 for (const auto *I : OPT->quals()) {
9653 S += '<';
9654 S += I->getObjCRuntimeNameAsString();
9655 S += '>';
9656 }
9657 S += '"';
9658 }
9659 return;
9660 }
9661
9662 // gcc just blithely ignores member pointers.
9663 // FIXME: we should do better than that. 'M' is available.
9664 case Type::MemberPointer:
9665 // This matches gcc's encoding, even though technically it is insufficient.
9666 //FIXME. We should do a better job than gcc.
9667 case Type::Vector:
9668 case Type::ExtVector:
9669 // Until we have a coherent encoding of these three types, issue warning.
9670 if (NotEncodedT)
9671 *NotEncodedT = T;
9672 return;
9673
9674 case Type::ConstantMatrix:
9675 if (NotEncodedT)
9676 *NotEncodedT = T;
9677 return;
9678
9679 case Type::BitInt:
9680 if (NotEncodedT)
9681 *NotEncodedT = T;
9682 return;
9683
9684 // We could see an undeduced auto type here during error recovery.
9685 // Just ignore it.
9686 case Type::Auto:
9687 case Type::DeducedTemplateSpecialization:
9688 return;
9689
9690 case Type::HLSLAttributedResource:
9691 case Type::HLSLInlineSpirv:
9692 case Type::OverflowBehavior:
9693 llvm_unreachable("unexpected type");
9694
9695 case Type::ArrayParameter:
9696 case Type::Pipe:
9697#define ABSTRACT_TYPE(KIND, BASE)
9698#define TYPE(KIND, BASE)
9699#define DEPENDENT_TYPE(KIND, BASE) \
9700 case Type::KIND:
9701#define NON_CANONICAL_TYPE(KIND, BASE) \
9702 case Type::KIND:
9703#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \
9704 case Type::KIND:
9705#include "clang/AST/TypeNodes.inc"
9706 llvm_unreachable("@encode for dependent type!");
9707 }
9708 llvm_unreachable("bad type kind!");
9709}
9710
9711void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
9712 std::string &S,
9713 const FieldDecl *FD,
9714 bool includeVBases,
9715 QualType *NotEncodedT) const {
9716 assert(RDecl && "Expected non-null RecordDecl");
9717 assert(!RDecl->isUnion() && "Should not be called for unions");
9718 if (!RDecl->getDefinition() || RDecl->getDefinition()->isInvalidDecl())
9719 return;
9720
9721 const auto *CXXRec = dyn_cast<CXXRecordDecl>(Val: RDecl);
9722 std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets;
9723 const ASTRecordLayout &layout = getASTRecordLayout(D: RDecl);
9724
9725 if (CXXRec) {
9726 for (const auto &BI : CXXRec->bases()) {
9727 if (!BI.isVirtual()) {
9728 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl();
9729 if (base->isEmpty())
9730 continue;
9731 uint64_t offs = toBits(CharSize: layout.getBaseClassOffset(Base: base));
9732 FieldOrBaseOffsets.insert(position: FieldOrBaseOffsets.upper_bound(x: offs),
9733 x: std::make_pair(x&: offs, y&: base));
9734 }
9735 }
9736 }
9737
9738 for (FieldDecl *Field : RDecl->fields()) {
9739 if (!Field->isZeroLengthBitField() && Field->isZeroSize(Ctx: *this))
9740 continue;
9741 uint64_t offs = layout.getFieldOffset(FieldNo: Field->getFieldIndex());
9742 FieldOrBaseOffsets.insert(position: FieldOrBaseOffsets.upper_bound(x: offs),
9743 x: std::make_pair(x&: offs, y&: Field));
9744 }
9745
9746 if (CXXRec && includeVBases) {
9747 for (const auto &BI : CXXRec->vbases()) {
9748 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl();
9749 if (base->isEmpty())
9750 continue;
9751 uint64_t offs = toBits(CharSize: layout.getVBaseClassOffset(VBase: base));
9752 if (offs >= uint64_t(toBits(CharSize: layout.getNonVirtualSize())) &&
9753 FieldOrBaseOffsets.find(x: offs) == FieldOrBaseOffsets.end())
9754 FieldOrBaseOffsets.insert(position: FieldOrBaseOffsets.end(),
9755 x: std::make_pair(x&: offs, y&: base));
9756 }
9757 }
9758
9759 CharUnits size;
9760 if (CXXRec) {
9761 size = includeVBases ? layout.getSize() : layout.getNonVirtualSize();
9762 } else {
9763 size = layout.getSize();
9764 }
9765
9766#ifndef NDEBUG
9767 uint64_t CurOffs = 0;
9768#endif
9769 std::multimap<uint64_t, NamedDecl *>::iterator
9770 CurLayObj = FieldOrBaseOffsets.begin();
9771
9772 if (CXXRec && CXXRec->isDynamicClass() &&
9773 (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) {
9774 if (FD) {
9775 S += "\"_vptr$";
9776 std::string recname = CXXRec->getNameAsString();
9777 if (recname.empty()) recname = "?";
9778 S += recname;
9779 S += '"';
9780 }
9781 S += "^^?";
9782#ifndef NDEBUG
9783 CurOffs += getTypeSize(VoidPtrTy);
9784#endif
9785 }
9786
9787 if (!RDecl->hasFlexibleArrayMember()) {
9788 // Mark the end of the structure.
9789 uint64_t offs = toBits(CharSize: size);
9790 FieldOrBaseOffsets.insert(position: FieldOrBaseOffsets.upper_bound(x: offs),
9791 x: std::make_pair(x&: offs, y: nullptr));
9792 }
9793
9794 for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) {
9795#ifndef NDEBUG
9796 assert(CurOffs <= CurLayObj->first);
9797 if (CurOffs < CurLayObj->first) {
9798 uint64_t padding = CurLayObj->first - CurOffs;
9799 // FIXME: There doesn't seem to be a way to indicate in the encoding that
9800 // packing/alignment of members is different that normal, in which case
9801 // the encoding will be out-of-sync with the real layout.
9802 // If the runtime switches to just consider the size of types without
9803 // taking into account alignment, we could make padding explicit in the
9804 // encoding (e.g. using arrays of chars). The encoding strings would be
9805 // longer then though.
9806 CurOffs += padding;
9807 }
9808#endif
9809
9810 NamedDecl *dcl = CurLayObj->second;
9811 if (!dcl)
9812 break; // reached end of structure.
9813
9814 if (auto *base = dyn_cast<CXXRecordDecl>(Val: dcl)) {
9815 // We expand the bases without their virtual bases since those are going
9816 // in the initial structure. Note that this differs from gcc which
9817 // expands virtual bases each time one is encountered in the hierarchy,
9818 // making the encoding type bigger than it really is.
9819 getObjCEncodingForStructureImpl(RDecl: base, S, FD, /*includeVBases*/false,
9820 NotEncodedT);
9821 assert(!base->isEmpty());
9822#ifndef NDEBUG
9823 CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize());
9824#endif
9825 } else {
9826 const auto *field = cast<FieldDecl>(Val: dcl);
9827 if (FD) {
9828 S += '"';
9829 S += field->getNameAsString();
9830 S += '"';
9831 }
9832
9833 if (field->isBitField()) {
9834 EncodeBitField(Ctx: this, S, T: field->getType(), FD: field);
9835#ifndef NDEBUG
9836 CurOffs += field->getBitWidthValue();
9837#endif
9838 } else {
9839 QualType qt = field->getType();
9840 getLegacyIntegralTypeEncoding(PointeeTy&: qt);
9841 getObjCEncodingForTypeImpl(
9842 T: qt, S, Options: ObjCEncOptions().setExpandStructures().setIsStructField(),
9843 FD, NotEncodedT);
9844#ifndef NDEBUG
9845 CurOffs += getTypeSize(field->getType());
9846#endif
9847 }
9848 }
9849 }
9850}
9851
9852void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT,
9853 std::string& S) const {
9854 if (QT & Decl::OBJC_TQ_In)
9855 S += 'n';
9856 if (QT & Decl::OBJC_TQ_Inout)
9857 S += 'N';
9858 if (QT & Decl::OBJC_TQ_Out)
9859 S += 'o';
9860 if (QT & Decl::OBJC_TQ_Bycopy)
9861 S += 'O';
9862 if (QT & Decl::OBJC_TQ_Byref)
9863 S += 'R';
9864 if (QT & Decl::OBJC_TQ_Oneway)
9865 S += 'V';
9866}
9867
9868TypedefDecl *ASTContext::getObjCIdDecl() const {
9869 if (!ObjCIdDecl) {
9870 QualType T = getObjCObjectType(BaseType: ObjCBuiltinIdTy, Protocols: {}, NumProtocols: {});
9871 T = getObjCObjectPointerType(ObjectT: T);
9872 ObjCIdDecl = buildImplicitTypedef(T, Name: "id");
9873 }
9874 return ObjCIdDecl;
9875}
9876
9877TypedefDecl *ASTContext::getObjCSelDecl() const {
9878 if (!ObjCSelDecl) {
9879 QualType T = getPointerType(T: ObjCBuiltinSelTy);
9880 ObjCSelDecl = buildImplicitTypedef(T, Name: "SEL");
9881 }
9882 return ObjCSelDecl;
9883}
9884
9885TypedefDecl *ASTContext::getObjCClassDecl() const {
9886 if (!ObjCClassDecl) {
9887 QualType T = getObjCObjectType(BaseType: ObjCBuiltinClassTy, Protocols: {}, NumProtocols: {});
9888 T = getObjCObjectPointerType(ObjectT: T);
9889 ObjCClassDecl = buildImplicitTypedef(T, Name: "Class");
9890 }
9891 return ObjCClassDecl;
9892}
9893
9894ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const {
9895 if (!ObjCProtocolClassDecl) {
9896 ObjCProtocolClassDecl
9897 = ObjCInterfaceDecl::Create(C: *this, DC: getTranslationUnitDecl(),
9898 atLoc: SourceLocation(),
9899 Id: &Idents.get(Name: "Protocol"),
9900 /*typeParamList=*/nullptr,
9901 /*PrevDecl=*/nullptr,
9902 ClassLoc: SourceLocation(), isInternal: true);
9903 }
9904
9905 return ObjCProtocolClassDecl;
9906}
9907
9908PointerAuthQualifier ASTContext::getObjCMemberSelTypePtrAuth() {
9909 if (!getLangOpts().PointerAuthObjcInterfaceSel)
9910 return PointerAuthQualifier();
9911 return PointerAuthQualifier::Create(
9912 Key: getLangOpts().PointerAuthObjcInterfaceSelKey,
9913 /*isAddressDiscriminated=*/IsAddressDiscriminated: true, ExtraDiscriminator: SelPointerConstantDiscriminator,
9914 AuthenticationMode: PointerAuthenticationMode::SignAndAuth,
9915 /*isIsaPointer=*/IsIsaPointer: false,
9916 /*authenticatesNullValues=*/AuthenticatesNullValues: false);
9917}
9918
9919//===----------------------------------------------------------------------===//
9920// __builtin_va_list Construction Functions
9921//===----------------------------------------------------------------------===//
9922
9923static TypedefDecl *CreateCharPtrNamedVaListDecl(const ASTContext *Context,
9924 StringRef Name) {
9925 // typedef char* __builtin[_ms]_va_list;
9926 QualType T = Context->getPointerType(T: Context->CharTy);
9927 return Context->buildImplicitTypedef(T, Name);
9928}
9929
9930static TypedefDecl *CreateMSVaListDecl(const ASTContext *Context) {
9931 return CreateCharPtrNamedVaListDecl(Context, Name: "__builtin_ms_va_list");
9932}
9933
9934static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) {
9935 return CreateCharPtrNamedVaListDecl(Context, Name: "__builtin_va_list");
9936}
9937
9938static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) {
9939 // typedef void* __builtin_va_list;
9940 QualType T = Context->getPointerType(T: Context->VoidTy);
9941 return Context->buildImplicitTypedef(T, Name: "__builtin_va_list");
9942}
9943
9944static TypedefDecl *
9945CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) {
9946 // struct __va_list
9947 RecordDecl *VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list");
9948 if (Context->getLangOpts().CPlusPlus) {
9949 // namespace std { struct __va_list {
9950 auto *NS = NamespaceDecl::Create(
9951 C&: const_cast<ASTContext &>(*Context), DC: Context->getTranslationUnitDecl(),
9952 /*Inline=*/false, StartLoc: SourceLocation(), IdLoc: SourceLocation(),
9953 Id: &Context->Idents.get(Name: "std"),
9954 /*PrevDecl=*/nullptr, /*Nested=*/false);
9955 NS->setImplicit();
9956 VaListTagDecl->setDeclContext(NS);
9957 }
9958
9959 VaListTagDecl->startDefinition();
9960
9961 const size_t NumFields = 5;
9962 QualType FieldTypes[NumFields];
9963 const char *FieldNames[NumFields];
9964
9965 // void *__stack;
9966 FieldTypes[0] = Context->getPointerType(T: Context->VoidTy);
9967 FieldNames[0] = "__stack";
9968
9969 // void *__gr_top;
9970 FieldTypes[1] = Context->getPointerType(T: Context->VoidTy);
9971 FieldNames[1] = "__gr_top";
9972
9973 // void *__vr_top;
9974 FieldTypes[2] = Context->getPointerType(T: Context->VoidTy);
9975 FieldNames[2] = "__vr_top";
9976
9977 // int __gr_offs;
9978 FieldTypes[3] = Context->IntTy;
9979 FieldNames[3] = "__gr_offs";
9980
9981 // int __vr_offs;
9982 FieldTypes[4] = Context->IntTy;
9983 FieldNames[4] = "__vr_offs";
9984
9985 // Create fields
9986 for (unsigned i = 0; i < NumFields; ++i) {
9987 FieldDecl *Field = FieldDecl::Create(C: const_cast<ASTContext &>(*Context),
9988 DC: VaListTagDecl,
9989 StartLoc: SourceLocation(),
9990 IdLoc: SourceLocation(),
9991 Id: &Context->Idents.get(Name: FieldNames[i]),
9992 T: FieldTypes[i], /*TInfo=*/nullptr,
9993 /*BitWidth=*/BW: nullptr,
9994 /*Mutable=*/false,
9995 InitStyle: ICIS_NoInit);
9996 Field->setAccess(AS_public);
9997 VaListTagDecl->addDecl(D: Field);
9998 }
9999 VaListTagDecl->completeDefinition();
10000 Context->VaListTagDecl = VaListTagDecl;
10001 CanQualType VaListTagType = Context->getCanonicalTagType(TD: VaListTagDecl);
10002
10003 // } __builtin_va_list;
10004 return Context->buildImplicitTypedef(T: VaListTagType, Name: "__builtin_va_list");
10005}
10006
10007static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) {
10008 // typedef struct __va_list_tag {
10009 RecordDecl *VaListTagDecl;
10010
10011 VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
10012 VaListTagDecl->startDefinition();
10013
10014 const size_t NumFields = 5;
10015 QualType FieldTypes[NumFields];
10016 const char *FieldNames[NumFields];
10017
10018 // unsigned char gpr;
10019 FieldTypes[0] = Context->UnsignedCharTy;
10020 FieldNames[0] = "gpr";
10021
10022 // unsigned char fpr;
10023 FieldTypes[1] = Context->UnsignedCharTy;
10024 FieldNames[1] = "fpr";
10025
10026 // unsigned short reserved;
10027 FieldTypes[2] = Context->UnsignedShortTy;
10028 FieldNames[2] = "reserved";
10029
10030 // void* overflow_arg_area;
10031 FieldTypes[3] = Context->getPointerType(T: Context->VoidTy);
10032 FieldNames[3] = "overflow_arg_area";
10033
10034 // void* reg_save_area;
10035 FieldTypes[4] = Context->getPointerType(T: Context->VoidTy);
10036 FieldNames[4] = "reg_save_area";
10037
10038 // Create fields
10039 for (unsigned i = 0; i < NumFields; ++i) {
10040 FieldDecl *Field = FieldDecl::Create(C: *Context, DC: VaListTagDecl,
10041 StartLoc: SourceLocation(),
10042 IdLoc: SourceLocation(),
10043 Id: &Context->Idents.get(Name: FieldNames[i]),
10044 T: FieldTypes[i], /*TInfo=*/nullptr,
10045 /*BitWidth=*/BW: nullptr,
10046 /*Mutable=*/false,
10047 InitStyle: ICIS_NoInit);
10048 Field->setAccess(AS_public);
10049 VaListTagDecl->addDecl(D: Field);
10050 }
10051 VaListTagDecl->completeDefinition();
10052 Context->VaListTagDecl = VaListTagDecl;
10053 CanQualType VaListTagType = Context->getCanonicalTagType(TD: VaListTagDecl);
10054
10055 // } __va_list_tag;
10056 TypedefDecl *VaListTagTypedefDecl =
10057 Context->buildImplicitTypedef(T: VaListTagType, Name: "__va_list_tag");
10058
10059 QualType VaListTagTypedefType =
10060 Context->getTypedefType(Keyword: ElaboratedTypeKeyword::None,
10061 /*Qualifier=*/std::nullopt, Decl: VaListTagTypedefDecl);
10062
10063 // typedef __va_list_tag __builtin_va_list[1];
10064 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 1);
10065 QualType VaListTagArrayType = Context->getConstantArrayType(
10066 EltTy: VaListTagTypedefType, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
10067 return Context->buildImplicitTypedef(T: VaListTagArrayType, Name: "__builtin_va_list");
10068}
10069
10070static TypedefDecl *
10071CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) {
10072 // struct __va_list_tag {
10073 RecordDecl *VaListTagDecl;
10074 VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
10075 VaListTagDecl->startDefinition();
10076
10077 const size_t NumFields = 4;
10078 QualType FieldTypes[NumFields];
10079 const char *FieldNames[NumFields];
10080
10081 // unsigned gp_offset;
10082 FieldTypes[0] = Context->UnsignedIntTy;
10083 FieldNames[0] = "gp_offset";
10084
10085 // unsigned fp_offset;
10086 FieldTypes[1] = Context->UnsignedIntTy;
10087 FieldNames[1] = "fp_offset";
10088
10089 // void* overflow_arg_area;
10090 FieldTypes[2] = Context->getPointerType(T: Context->VoidTy);
10091 FieldNames[2] = "overflow_arg_area";
10092
10093 // void* reg_save_area;
10094 FieldTypes[3] = Context->getPointerType(T: Context->VoidTy);
10095 FieldNames[3] = "reg_save_area";
10096
10097 // Create fields
10098 for (unsigned i = 0; i < NumFields; ++i) {
10099 FieldDecl *Field = FieldDecl::Create(C: const_cast<ASTContext &>(*Context),
10100 DC: VaListTagDecl,
10101 StartLoc: SourceLocation(),
10102 IdLoc: SourceLocation(),
10103 Id: &Context->Idents.get(Name: FieldNames[i]),
10104 T: FieldTypes[i], /*TInfo=*/nullptr,
10105 /*BitWidth=*/BW: nullptr,
10106 /*Mutable=*/false,
10107 InitStyle: ICIS_NoInit);
10108 Field->setAccess(AS_public);
10109 VaListTagDecl->addDecl(D: Field);
10110 }
10111 VaListTagDecl->completeDefinition();
10112 Context->VaListTagDecl = VaListTagDecl;
10113 CanQualType VaListTagType = Context->getCanonicalTagType(TD: VaListTagDecl);
10114
10115 // };
10116
10117 // typedef struct __va_list_tag __builtin_va_list[1];
10118 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 1);
10119 QualType VaListTagArrayType = Context->getConstantArrayType(
10120 EltTy: VaListTagType, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
10121 return Context->buildImplicitTypedef(T: VaListTagArrayType, Name: "__builtin_va_list");
10122}
10123
10124static TypedefDecl *
10125CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) {
10126 // struct __va_list
10127 RecordDecl *VaListDecl = Context->buildImplicitRecord(Name: "__va_list");
10128 if (Context->getLangOpts().CPlusPlus) {
10129 // namespace std { struct __va_list {
10130 NamespaceDecl *NS;
10131 NS = NamespaceDecl::Create(C&: const_cast<ASTContext &>(*Context),
10132 DC: Context->getTranslationUnitDecl(),
10133 /*Inline=*/false, StartLoc: SourceLocation(),
10134 IdLoc: SourceLocation(), Id: &Context->Idents.get(Name: "std"),
10135 /*PrevDecl=*/nullptr, /*Nested=*/false);
10136 NS->setImplicit();
10137 VaListDecl->setDeclContext(NS);
10138 }
10139
10140 VaListDecl->startDefinition();
10141
10142 // void * __ap;
10143 FieldDecl *Field = FieldDecl::Create(C: const_cast<ASTContext &>(*Context),
10144 DC: VaListDecl,
10145 StartLoc: SourceLocation(),
10146 IdLoc: SourceLocation(),
10147 Id: &Context->Idents.get(Name: "__ap"),
10148 T: Context->getPointerType(T: Context->VoidTy),
10149 /*TInfo=*/nullptr,
10150 /*BitWidth=*/BW: nullptr,
10151 /*Mutable=*/false,
10152 InitStyle: ICIS_NoInit);
10153 Field->setAccess(AS_public);
10154 VaListDecl->addDecl(D: Field);
10155
10156 // };
10157 VaListDecl->completeDefinition();
10158 Context->VaListTagDecl = VaListDecl;
10159
10160 // typedef struct __va_list __builtin_va_list;
10161 CanQualType T = Context->getCanonicalTagType(TD: VaListDecl);
10162 return Context->buildImplicitTypedef(T, Name: "__builtin_va_list");
10163}
10164
10165static TypedefDecl *
10166CreateSystemZBuiltinVaListDecl(const ASTContext *Context) {
10167 // struct __va_list_tag {
10168 RecordDecl *VaListTagDecl;
10169 VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
10170 VaListTagDecl->startDefinition();
10171
10172 const size_t NumFields = 4;
10173 QualType FieldTypes[NumFields];
10174 const char *FieldNames[NumFields];
10175
10176 // long __gpr;
10177 FieldTypes[0] = Context->LongTy;
10178 FieldNames[0] = "__gpr";
10179
10180 // long __fpr;
10181 FieldTypes[1] = Context->LongTy;
10182 FieldNames[1] = "__fpr";
10183
10184 // void *__overflow_arg_area;
10185 FieldTypes[2] = Context->getPointerType(T: Context->VoidTy);
10186 FieldNames[2] = "__overflow_arg_area";
10187
10188 // void *__reg_save_area;
10189 FieldTypes[3] = Context->getPointerType(T: Context->VoidTy);
10190 FieldNames[3] = "__reg_save_area";
10191
10192 // Create fields
10193 for (unsigned i = 0; i < NumFields; ++i) {
10194 FieldDecl *Field = FieldDecl::Create(C: const_cast<ASTContext &>(*Context),
10195 DC: VaListTagDecl,
10196 StartLoc: SourceLocation(),
10197 IdLoc: SourceLocation(),
10198 Id: &Context->Idents.get(Name: FieldNames[i]),
10199 T: FieldTypes[i], /*TInfo=*/nullptr,
10200 /*BitWidth=*/BW: nullptr,
10201 /*Mutable=*/false,
10202 InitStyle: ICIS_NoInit);
10203 Field->setAccess(AS_public);
10204 VaListTagDecl->addDecl(D: Field);
10205 }
10206 VaListTagDecl->completeDefinition();
10207 Context->VaListTagDecl = VaListTagDecl;
10208 CanQualType VaListTagType = Context->getCanonicalTagType(TD: VaListTagDecl);
10209
10210 // };
10211
10212 // typedef __va_list_tag __builtin_va_list[1];
10213 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 1);
10214 QualType VaListTagArrayType = Context->getConstantArrayType(
10215 EltTy: VaListTagType, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
10216
10217 return Context->buildImplicitTypedef(T: VaListTagArrayType, Name: "__builtin_va_list");
10218}
10219
10220static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) {
10221 // typedef struct __va_list_tag {
10222 RecordDecl *VaListTagDecl;
10223 VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
10224 VaListTagDecl->startDefinition();
10225
10226 const size_t NumFields = 3;
10227 QualType FieldTypes[NumFields];
10228 const char *FieldNames[NumFields];
10229
10230 // void *CurrentSavedRegisterArea;
10231 FieldTypes[0] = Context->getPointerType(T: Context->VoidTy);
10232 FieldNames[0] = "__current_saved_reg_area_pointer";
10233
10234 // void *SavedRegAreaEnd;
10235 FieldTypes[1] = Context->getPointerType(T: Context->VoidTy);
10236 FieldNames[1] = "__saved_reg_area_end_pointer";
10237
10238 // void *OverflowArea;
10239 FieldTypes[2] = Context->getPointerType(T: Context->VoidTy);
10240 FieldNames[2] = "__overflow_area_pointer";
10241
10242 // Create fields
10243 for (unsigned i = 0; i < NumFields; ++i) {
10244 FieldDecl *Field = FieldDecl::Create(
10245 C: const_cast<ASTContext &>(*Context), DC: VaListTagDecl, StartLoc: SourceLocation(),
10246 IdLoc: SourceLocation(), Id: &Context->Idents.get(Name: FieldNames[i]), T: FieldTypes[i],
10247 /*TInfo=*/nullptr,
10248 /*BitWidth=*/BW: nullptr,
10249 /*Mutable=*/false, InitStyle: ICIS_NoInit);
10250 Field->setAccess(AS_public);
10251 VaListTagDecl->addDecl(D: Field);
10252 }
10253 VaListTagDecl->completeDefinition();
10254 Context->VaListTagDecl = VaListTagDecl;
10255 CanQualType VaListTagType = Context->getCanonicalTagType(TD: VaListTagDecl);
10256
10257 // } __va_list_tag;
10258 TypedefDecl *VaListTagTypedefDecl =
10259 Context->buildImplicitTypedef(T: VaListTagType, Name: "__va_list_tag");
10260
10261 QualType VaListTagTypedefType =
10262 Context->getTypedefType(Keyword: ElaboratedTypeKeyword::None,
10263 /*Qualifier=*/std::nullopt, Decl: VaListTagTypedefDecl);
10264
10265 // typedef __va_list_tag __builtin_va_list[1];
10266 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 1);
10267 QualType VaListTagArrayType = Context->getConstantArrayType(
10268 EltTy: VaListTagTypedefType, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
10269
10270 return Context->buildImplicitTypedef(T: VaListTagArrayType, Name: "__builtin_va_list");
10271}
10272
10273static TypedefDecl *
10274CreateXtensaABIBuiltinVaListDecl(const ASTContext *Context) {
10275 // typedef struct __va_list_tag {
10276 RecordDecl *VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
10277
10278 VaListTagDecl->startDefinition();
10279
10280 // int* __va_stk;
10281 // int* __va_reg;
10282 // int __va_ndx;
10283 constexpr size_t NumFields = 3;
10284 QualType FieldTypes[NumFields] = {Context->getPointerType(T: Context->IntTy),
10285 Context->getPointerType(T: Context->IntTy),
10286 Context->IntTy};
10287 const char *FieldNames[NumFields] = {"__va_stk", "__va_reg", "__va_ndx"};
10288
10289 // Create fields
10290 for (unsigned i = 0; i < NumFields; ++i) {
10291 FieldDecl *Field = FieldDecl::Create(
10292 C: *Context, DC: VaListTagDecl, StartLoc: SourceLocation(), IdLoc: SourceLocation(),
10293 Id: &Context->Idents.get(Name: FieldNames[i]), T: FieldTypes[i], /*TInfo=*/nullptr,
10294 /*BitWidth=*/BW: nullptr,
10295 /*Mutable=*/false, InitStyle: ICIS_NoInit);
10296 Field->setAccess(AS_public);
10297 VaListTagDecl->addDecl(D: Field);
10298 }
10299 VaListTagDecl->completeDefinition();
10300 Context->VaListTagDecl = VaListTagDecl;
10301 CanQualType VaListTagType = Context->getCanonicalTagType(TD: VaListTagDecl);
10302
10303 // } __va_list_tag;
10304 TypedefDecl *VaListTagTypedefDecl =
10305 Context->buildImplicitTypedef(T: VaListTagType, Name: "__builtin_va_list");
10306
10307 return VaListTagTypedefDecl;
10308}
10309
10310static TypedefDecl *CreateVaListDecl(const ASTContext *Context,
10311 TargetInfo::BuiltinVaListKind Kind) {
10312 switch (Kind) {
10313 case TargetInfo::CharPtrBuiltinVaList:
10314 return CreateCharPtrBuiltinVaListDecl(Context);
10315 case TargetInfo::VoidPtrBuiltinVaList:
10316 return CreateVoidPtrBuiltinVaListDecl(Context);
10317 case TargetInfo::AArch64ABIBuiltinVaList:
10318 return CreateAArch64ABIBuiltinVaListDecl(Context);
10319 case TargetInfo::PowerABIBuiltinVaList:
10320 return CreatePowerABIBuiltinVaListDecl(Context);
10321 case TargetInfo::X86_64ABIBuiltinVaList:
10322 return CreateX86_64ABIBuiltinVaListDecl(Context);
10323 case TargetInfo::AAPCSABIBuiltinVaList:
10324 return CreateAAPCSABIBuiltinVaListDecl(Context);
10325 case TargetInfo::SystemZBuiltinVaList:
10326 return CreateSystemZBuiltinVaListDecl(Context);
10327 case TargetInfo::HexagonBuiltinVaList:
10328 return CreateHexagonBuiltinVaListDecl(Context);
10329 case TargetInfo::XtensaABIBuiltinVaList:
10330 return CreateXtensaABIBuiltinVaListDecl(Context);
10331 }
10332
10333 llvm_unreachable("Unhandled __builtin_va_list type kind");
10334}
10335
10336TypedefDecl *ASTContext::getBuiltinVaListDecl() const {
10337 if (!BuiltinVaListDecl) {
10338 BuiltinVaListDecl = CreateVaListDecl(Context: this, Kind: Target->getBuiltinVaListKind());
10339 assert(BuiltinVaListDecl->isImplicit());
10340 }
10341
10342 return BuiltinVaListDecl;
10343}
10344
10345Decl *ASTContext::getVaListTagDecl() const {
10346 // Force the creation of VaListTagDecl by building the __builtin_va_list
10347 // declaration.
10348 if (!VaListTagDecl)
10349 (void)getBuiltinVaListDecl();
10350
10351 return VaListTagDecl;
10352}
10353
10354TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const {
10355 if (!BuiltinMSVaListDecl)
10356 BuiltinMSVaListDecl = CreateMSVaListDecl(Context: this);
10357
10358 return BuiltinMSVaListDecl;
10359}
10360
10361bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const {
10362 // Allow redecl custom type checking builtin for HLSL.
10363 if (LangOpts.HLSL && FD->getBuiltinID() != Builtin::NotBuiltin &&
10364 BuiltinInfo.hasCustomTypechecking(ID: FD->getBuiltinID()))
10365 return true;
10366 // Allow redecl custom type checking builtin for SPIR-V.
10367 if (getTargetInfo().getTriple().isSPIROrSPIRV() &&
10368 BuiltinInfo.isTSBuiltin(ID: FD->getBuiltinID()) &&
10369 BuiltinInfo.hasCustomTypechecking(ID: FD->getBuiltinID()))
10370 return true;
10371 return BuiltinInfo.canBeRedeclared(ID: FD->getBuiltinID());
10372}
10373
10374void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) {
10375 assert(ObjCConstantStringType.isNull() &&
10376 "'NSConstantString' type already set!");
10377
10378 ObjCConstantStringType = getObjCInterfaceType(Decl);
10379}
10380
10381/// Retrieve the template name that corresponds to a non-empty
10382/// lookup.
10383TemplateName
10384ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin,
10385 UnresolvedSetIterator End) const {
10386 unsigned size = End - Begin;
10387 assert(size > 1 && "set is not overloaded!");
10388
10389 void *memory = Allocate(Size: sizeof(OverloadedTemplateStorage) +
10390 size * sizeof(FunctionTemplateDecl*));
10391 auto *OT = new (memory) OverloadedTemplateStorage(size);
10392
10393 NamedDecl **Storage = OT->getStorage();
10394 for (UnresolvedSetIterator I = Begin; I != End; ++I) {
10395 NamedDecl *D = *I;
10396 assert(isa<FunctionTemplateDecl>(D) ||
10397 isa<UnresolvedUsingValueDecl>(D) ||
10398 (isa<UsingShadowDecl>(D) &&
10399 isa<FunctionTemplateDecl>(D->getUnderlyingDecl())));
10400 *Storage++ = D;
10401 }
10402
10403 return TemplateName(OT);
10404}
10405
10406/// Retrieve a template name representing an unqualified-id that has been
10407/// assumed to name a template for ADL purposes.
10408TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const {
10409 auto *OT = new (*this) AssumedTemplateStorage(Name);
10410 return TemplateName(OT);
10411}
10412
10413/// Retrieve the template name that represents a qualified
10414/// template name such as \c std::vector.
10415TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier Qualifier,
10416 bool TemplateKeyword,
10417 TemplateName Template) const {
10418 assert(Template.getKind() == TemplateName::Template ||
10419 Template.getKind() == TemplateName::UsingTemplate);
10420
10421 if (Template.getAsTemplateDecl()->getKind() == Decl::TemplateTemplateParm) {
10422 assert(!Qualifier && "unexpected qualified template template parameter");
10423 assert(TemplateKeyword == false);
10424 return Template;
10425 }
10426
10427 // FIXME: Canonicalization?
10428 llvm::FoldingSetNodeID ID;
10429 QualifiedTemplateName::Profile(ID, NNS: Qualifier, TemplateKeyword, TN: Template);
10430
10431 void *InsertPos = nullptr;
10432 QualifiedTemplateName *QTN =
10433 QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
10434 if (!QTN) {
10435 QTN = new (*this, alignof(QualifiedTemplateName))
10436 QualifiedTemplateName(Qualifier, TemplateKeyword, Template);
10437 QualifiedTemplateNames.InsertNode(N: QTN, InsertPos);
10438 }
10439
10440 return TemplateName(QTN);
10441}
10442
10443/// Retrieve the template name that represents a dependent
10444/// template name such as \c MetaFun::template operator+.
10445TemplateName
10446ASTContext::getDependentTemplateName(const DependentTemplateStorage &S) const {
10447 llvm::FoldingSetNodeID ID;
10448 S.Profile(ID);
10449
10450 void *InsertPos = nullptr;
10451 if (DependentTemplateName *QTN =
10452 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos))
10453 return TemplateName(QTN);
10454
10455 DependentTemplateName *QTN =
10456 new (*this, alignof(DependentTemplateName)) DependentTemplateName(S);
10457 DependentTemplateNames.InsertNode(N: QTN, InsertPos);
10458 return TemplateName(QTN);
10459}
10460
10461TemplateName ASTContext::getSubstTemplateTemplateParm(TemplateName Replacement,
10462 Decl *AssociatedDecl,
10463 unsigned Index,
10464 UnsignedOrNone PackIndex,
10465 bool Final) const {
10466 llvm::FoldingSetNodeID ID;
10467 SubstTemplateTemplateParmStorage::Profile(ID, Replacement, AssociatedDecl,
10468 Index, PackIndex, Final);
10469
10470 void *insertPos = nullptr;
10471 SubstTemplateTemplateParmStorage *subst
10472 = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
10473
10474 if (!subst) {
10475 subst = new (*this) SubstTemplateTemplateParmStorage(
10476 Replacement, AssociatedDecl, Index, PackIndex, Final);
10477 SubstTemplateTemplateParms.InsertNode(N: subst, InsertPos: insertPos);
10478 }
10479
10480 return TemplateName(subst);
10481}
10482
10483TemplateName
10484ASTContext::getSubstTemplateTemplateParmPack(const TemplateArgument &ArgPack,
10485 Decl *AssociatedDecl,
10486 unsigned Index, bool Final) const {
10487 auto &Self = const_cast<ASTContext &>(*this);
10488 llvm::FoldingSetNodeID ID;
10489 SubstTemplateTemplateParmPackStorage::Profile(ID, Context&: Self, ArgPack,
10490 AssociatedDecl, Index, Final);
10491
10492 void *InsertPos = nullptr;
10493 SubstTemplateTemplateParmPackStorage *Subst
10494 = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos);
10495
10496 if (!Subst) {
10497 Subst = new (*this) SubstTemplateTemplateParmPackStorage(
10498 ArgPack.pack_elements(), AssociatedDecl, Index, Final);
10499 SubstTemplateTemplateParmPacks.InsertNode(N: Subst, InsertPos);
10500 }
10501
10502 return TemplateName(Subst);
10503}
10504
10505/// Retrieve the template name that represents a template name
10506/// deduced from a specialization.
10507TemplateName
10508ASTContext::getDeducedTemplateName(TemplateName Underlying,
10509 DefaultArguments DefaultArgs) const {
10510 if (!DefaultArgs)
10511 return Underlying;
10512
10513 llvm::FoldingSetNodeID ID;
10514 DeducedTemplateStorage::Profile(ID, Context: *this, Underlying, DefArgs: DefaultArgs);
10515
10516 void *InsertPos = nullptr;
10517 DeducedTemplateStorage *DTS =
10518 DeducedTemplates.FindNodeOrInsertPos(ID, InsertPos);
10519 if (!DTS) {
10520 void *Mem = Allocate(Size: sizeof(DeducedTemplateStorage) +
10521 sizeof(TemplateArgument) * DefaultArgs.Args.size(),
10522 Align: alignof(DeducedTemplateStorage));
10523 DTS = new (Mem) DeducedTemplateStorage(Underlying, DefaultArgs);
10524 DeducedTemplates.InsertNode(N: DTS, InsertPos);
10525 }
10526 return TemplateName(DTS);
10527}
10528
10529/// getFromTargetType - Given one of the integer types provided by
10530/// TargetInfo, produce the corresponding type. The unsigned @p Type
10531/// is actually a value of type @c TargetInfo::IntType.
10532CanQualType ASTContext::getFromTargetType(unsigned Type) const {
10533 switch (Type) {
10534 case TargetInfo::NoInt: return {};
10535 case TargetInfo::SignedChar: return SignedCharTy;
10536 case TargetInfo::UnsignedChar: return UnsignedCharTy;
10537 case TargetInfo::SignedShort: return ShortTy;
10538 case TargetInfo::UnsignedShort: return UnsignedShortTy;
10539 case TargetInfo::SignedInt: return IntTy;
10540 case TargetInfo::UnsignedInt: return UnsignedIntTy;
10541 case TargetInfo::SignedLong: return LongTy;
10542 case TargetInfo::UnsignedLong: return UnsignedLongTy;
10543 case TargetInfo::SignedLongLong: return LongLongTy;
10544 case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy;
10545 }
10546
10547 llvm_unreachable("Unhandled TargetInfo::IntType value");
10548}
10549
10550//===----------------------------------------------------------------------===//
10551// Type Predicates.
10552//===----------------------------------------------------------------------===//
10553
10554/// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's
10555/// garbage collection attribute.
10556///
10557Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const {
10558 if (getLangOpts().getGC() == LangOptions::NonGC)
10559 return Qualifiers::GCNone;
10560
10561 assert(getLangOpts().ObjC);
10562 Qualifiers::GC GCAttrs = Ty.getObjCGCAttr();
10563
10564 // Default behaviour under objective-C's gc is for ObjC pointers
10565 // (or pointers to them) be treated as though they were declared
10566 // as __strong.
10567 if (GCAttrs == Qualifiers::GCNone) {
10568 if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType())
10569 return Qualifiers::Strong;
10570 else if (Ty->isPointerType())
10571 return getObjCGCAttrKind(Ty: Ty->castAs<PointerType>()->getPointeeType());
10572 } else {
10573 // It's not valid to set GC attributes on anything that isn't a
10574 // pointer.
10575#ifndef NDEBUG
10576 QualType CT = Ty->getCanonicalTypeInternal();
10577 while (const auto *AT = dyn_cast<ArrayType>(CT))
10578 CT = AT->getElementType();
10579 assert(CT->isAnyPointerType() || CT->isBlockPointerType());
10580#endif
10581 }
10582 return GCAttrs;
10583}
10584
10585//===----------------------------------------------------------------------===//
10586// Type Compatibility Testing
10587//===----------------------------------------------------------------------===//
10588
10589/// areCompatVectorTypes - Return true if the two specified vector types are
10590/// compatible.
10591static bool areCompatVectorTypes(const VectorType *LHS,
10592 const VectorType *RHS) {
10593 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified());
10594 return LHS->getElementType() == RHS->getElementType() &&
10595 LHS->getNumElements() == RHS->getNumElements();
10596}
10597
10598/// areCompatMatrixTypes - Return true if the two specified matrix types are
10599/// compatible.
10600static bool areCompatMatrixTypes(const ConstantMatrixType *LHS,
10601 const ConstantMatrixType *RHS) {
10602 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified());
10603 return LHS->getElementType() == RHS->getElementType() &&
10604 LHS->getNumRows() == RHS->getNumRows() &&
10605 LHS->getNumColumns() == RHS->getNumColumns();
10606}
10607
10608bool ASTContext::areCompatibleVectorTypes(QualType FirstVec,
10609 QualType SecondVec) {
10610 assert(FirstVec->isVectorType() && "FirstVec should be a vector type");
10611 assert(SecondVec->isVectorType() && "SecondVec should be a vector type");
10612
10613 if (hasSameUnqualifiedType(T1: FirstVec, T2: SecondVec))
10614 return true;
10615
10616 // Treat Neon vector types and most AltiVec vector types as if they are the
10617 // equivalent GCC vector types.
10618 const auto *First = FirstVec->castAs<VectorType>();
10619 const auto *Second = SecondVec->castAs<VectorType>();
10620 if (First->getNumElements() == Second->getNumElements() &&
10621 hasSameType(T1: First->getElementType(), T2: Second->getElementType()) &&
10622 First->getVectorKind() != VectorKind::AltiVecPixel &&
10623 First->getVectorKind() != VectorKind::AltiVecBool &&
10624 Second->getVectorKind() != VectorKind::AltiVecPixel &&
10625 Second->getVectorKind() != VectorKind::AltiVecBool &&
10626 First->getVectorKind() != VectorKind::SveFixedLengthData &&
10627 First->getVectorKind() != VectorKind::SveFixedLengthPredicate &&
10628 Second->getVectorKind() != VectorKind::SveFixedLengthData &&
10629 Second->getVectorKind() != VectorKind::SveFixedLengthPredicate &&
10630 First->getVectorKind() != VectorKind::RVVFixedLengthData &&
10631 Second->getVectorKind() != VectorKind::RVVFixedLengthData &&
10632 First->getVectorKind() != VectorKind::RVVFixedLengthMask &&
10633 Second->getVectorKind() != VectorKind::RVVFixedLengthMask &&
10634 First->getVectorKind() != VectorKind::RVVFixedLengthMask_1 &&
10635 Second->getVectorKind() != VectorKind::RVVFixedLengthMask_1 &&
10636 First->getVectorKind() != VectorKind::RVVFixedLengthMask_2 &&
10637 Second->getVectorKind() != VectorKind::RVVFixedLengthMask_2 &&
10638 First->getVectorKind() != VectorKind::RVVFixedLengthMask_4 &&
10639 Second->getVectorKind() != VectorKind::RVVFixedLengthMask_4)
10640 return true;
10641
10642 // In OpenCL, treat half and _Float16 vector types as compatible.
10643 if (getLangOpts().OpenCL &&
10644 First->getNumElements() == Second->getNumElements()) {
10645 QualType FirstElt = First->getElementType();
10646 QualType SecondElt = Second->getElementType();
10647
10648 if ((FirstElt->isFloat16Type() && SecondElt->isHalfType()) ||
10649 (FirstElt->isHalfType() && SecondElt->isFloat16Type())) {
10650 if (First->getVectorKind() != VectorKind::AltiVecPixel &&
10651 First->getVectorKind() != VectorKind::AltiVecBool &&
10652 Second->getVectorKind() != VectorKind::AltiVecPixel &&
10653 Second->getVectorKind() != VectorKind::AltiVecBool)
10654 return true;
10655 }
10656 }
10657 return false;
10658}
10659
10660bool ASTContext::areCompatibleOverflowBehaviorTypes(QualType LHS,
10661 QualType RHS) {
10662 auto Result = checkOBTAssignmentCompatibility(LHS, RHS);
10663 return Result != OBTAssignResult::IncompatibleKinds;
10664}
10665
10666ASTContext::OBTAssignResult
10667ASTContext::checkOBTAssignmentCompatibility(QualType LHS, QualType RHS) {
10668 const auto *LHSOBT = LHS->getAs<OverflowBehaviorType>();
10669 const auto *RHSOBT = RHS->getAs<OverflowBehaviorType>();
10670
10671 if (!LHSOBT && !RHSOBT)
10672 return OBTAssignResult::Compatible;
10673
10674 if (LHSOBT && RHSOBT) {
10675 if (LHSOBT->getBehaviorKind() != RHSOBT->getBehaviorKind())
10676 return OBTAssignResult::IncompatibleKinds;
10677 return OBTAssignResult::Compatible;
10678 }
10679
10680 QualType LHSUnderlying = LHSOBT ? LHSOBT->desugar() : LHS;
10681 QualType RHSUnderlying = RHSOBT ? RHSOBT->desugar() : RHS;
10682
10683 if (RHSOBT && !LHSOBT) {
10684 if (LHSUnderlying->isIntegerType() && RHSUnderlying->isIntegerType())
10685 return OBTAssignResult::Discards;
10686 }
10687
10688 return OBTAssignResult::NotApplicable;
10689}
10690
10691/// getRVVTypeSize - Return RVV vector register size.
10692static uint64_t getRVVTypeSize(ASTContext &Context, const BuiltinType *Ty) {
10693 assert(Ty->isRVVVLSBuiltinType() && "Invalid RVV Type");
10694 auto VScale = Context.getTargetInfo().getVScaleRange(
10695 LangOpts: Context.getLangOpts(), Mode: TargetInfo::ArmStreamingKind::NotStreaming);
10696 if (!VScale)
10697 return 0;
10698
10699 ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(Ty);
10700
10701 uint64_t EltSize = Context.getTypeSize(T: Info.ElementType);
10702 if (Info.ElementType == Context.BoolTy)
10703 EltSize = 1;
10704
10705 uint64_t MinElts = Info.EC.getKnownMinValue();
10706 return VScale->first * MinElts * EltSize;
10707}
10708
10709bool ASTContext::areCompatibleRVVTypes(QualType FirstType,
10710 QualType SecondType) {
10711 assert(
10712 ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) ||
10713 (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) &&
10714 "Expected RVV builtin type and vector type!");
10715
10716 auto IsValidCast = [this](QualType FirstType, QualType SecondType) {
10717 if (const auto *BT = FirstType->getAs<BuiltinType>()) {
10718 if (const auto *VT = SecondType->getAs<VectorType>()) {
10719 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask) {
10720 BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(Ty: BT);
10721 return FirstType->isRVVVLSBuiltinType() &&
10722 Info.ElementType == BoolTy &&
10723 getTypeSize(T: SecondType) == ((getRVVTypeSize(Context&: *this, Ty: BT)));
10724 }
10725 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask_1) {
10726 BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(Ty: BT);
10727 return FirstType->isRVVVLSBuiltinType() &&
10728 Info.ElementType == BoolTy &&
10729 getTypeSize(T: SecondType) == ((getRVVTypeSize(Context&: *this, Ty: BT) * 8));
10730 }
10731 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask_2) {
10732 BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(Ty: BT);
10733 return FirstType->isRVVVLSBuiltinType() &&
10734 Info.ElementType == BoolTy &&
10735 getTypeSize(T: SecondType) == ((getRVVTypeSize(Context&: *this, Ty: BT)) * 4);
10736 }
10737 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask_4) {
10738 BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(Ty: BT);
10739 return FirstType->isRVVVLSBuiltinType() &&
10740 Info.ElementType == BoolTy &&
10741 getTypeSize(T: SecondType) == ((getRVVTypeSize(Context&: *this, Ty: BT)) * 2);
10742 }
10743 if (VT->getVectorKind() == VectorKind::RVVFixedLengthData ||
10744 VT->getVectorKind() == VectorKind::Generic)
10745 return FirstType->isRVVVLSBuiltinType() &&
10746 getTypeSize(T: SecondType) == getRVVTypeSize(Context&: *this, Ty: BT) &&
10747 hasSameType(T1: VT->getElementType(),
10748 T2: getBuiltinVectorTypeInfo(Ty: BT).ElementType);
10749 }
10750 }
10751 return false;
10752 };
10753
10754 return IsValidCast(FirstType, SecondType) ||
10755 IsValidCast(SecondType, FirstType);
10756}
10757
10758bool ASTContext::areLaxCompatibleRVVTypes(QualType FirstType,
10759 QualType SecondType) {
10760 assert(
10761 ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) ||
10762 (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) &&
10763 "Expected RVV builtin type and vector type!");
10764
10765 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) {
10766 const auto *BT = FirstType->getAs<BuiltinType>();
10767 if (!BT)
10768 return false;
10769
10770 if (!BT->isRVVVLSBuiltinType())
10771 return false;
10772
10773 const auto *VecTy = SecondType->getAs<VectorType>();
10774 if (VecTy && VecTy->getVectorKind() == VectorKind::Generic) {
10775 const LangOptions::LaxVectorConversionKind LVCKind =
10776 getLangOpts().getLaxVectorConversions();
10777
10778 // If __riscv_v_fixed_vlen != N do not allow vector lax conversion.
10779 if (getTypeSize(T: SecondType) != getRVVTypeSize(Context&: *this, Ty: BT))
10780 return false;
10781
10782 // If -flax-vector-conversions=all is specified, the types are
10783 // certainly compatible.
10784 if (LVCKind == LangOptions::LaxVectorConversionKind::All)
10785 return true;
10786
10787 // If -flax-vector-conversions=integer is specified, the types are
10788 // compatible if the elements are integer types.
10789 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer)
10790 return VecTy->getElementType().getCanonicalType()->isIntegerType() &&
10791 FirstType->getRVVEltType(Ctx: *this)->isIntegerType();
10792 }
10793
10794 return false;
10795 };
10796
10797 return IsLaxCompatible(FirstType, SecondType) ||
10798 IsLaxCompatible(SecondType, FirstType);
10799}
10800
10801bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const {
10802 while (true) {
10803 // __strong id
10804 if (const AttributedType *Attr = dyn_cast<AttributedType>(Val&: Ty)) {
10805 if (Attr->getAttrKind() == attr::ObjCOwnership)
10806 return true;
10807
10808 Ty = Attr->getModifiedType();
10809
10810 // X *__strong (...)
10811 } else if (const ParenType *Paren = dyn_cast<ParenType>(Val&: Ty)) {
10812 Ty = Paren->getInnerType();
10813
10814 // We do not want to look through typedefs, typeof(expr),
10815 // typeof(type), or any other way that the type is somehow
10816 // abstracted.
10817 } else {
10818 return false;
10819 }
10820 }
10821}
10822
10823//===----------------------------------------------------------------------===//
10824// ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's.
10825//===----------------------------------------------------------------------===//
10826
10827/// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the
10828/// inheritance hierarchy of 'rProto'.
10829bool
10830ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto,
10831 ObjCProtocolDecl *rProto) const {
10832 if (declaresSameEntity(D1: lProto, D2: rProto))
10833 return true;
10834 for (auto *PI : rProto->protocols())
10835 if (ProtocolCompatibleWithProtocol(lProto, rProto: PI))
10836 return true;
10837 return false;
10838}
10839
10840/// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and
10841/// Class<pr1, ...>.
10842bool ASTContext::ObjCQualifiedClassTypesAreCompatible(
10843 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs) {
10844 for (auto *lhsProto : lhs->quals()) {
10845 bool match = false;
10846 for (auto *rhsProto : rhs->quals()) {
10847 if (ProtocolCompatibleWithProtocol(lProto: lhsProto, rProto: rhsProto)) {
10848 match = true;
10849 break;
10850 }
10851 }
10852 if (!match)
10853 return false;
10854 }
10855 return true;
10856}
10857
10858/// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an
10859/// ObjCQualifiedIDType.
10860bool ASTContext::ObjCQualifiedIdTypesAreCompatible(
10861 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs,
10862 bool compare) {
10863 // Allow id<P..> and an 'id' in all cases.
10864 if (lhs->isObjCIdType() || rhs->isObjCIdType())
10865 return true;
10866
10867 // Don't allow id<P..> to convert to Class or Class<P..> in either direction.
10868 if (lhs->isObjCClassType() || lhs->isObjCQualifiedClassType() ||
10869 rhs->isObjCClassType() || rhs->isObjCQualifiedClassType())
10870 return false;
10871
10872 if (lhs->isObjCQualifiedIdType()) {
10873 if (rhs->qual_empty()) {
10874 // If the RHS is a unqualified interface pointer "NSString*",
10875 // make sure we check the class hierarchy.
10876 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) {
10877 for (auto *I : lhs->quals()) {
10878 // when comparing an id<P> on lhs with a static type on rhs,
10879 // see if static class implements all of id's protocols, directly or
10880 // through its super class and categories.
10881 if (!rhsID->ClassImplementsProtocol(lProto: I, lookupCategory: true))
10882 return false;
10883 }
10884 }
10885 // If there are no qualifiers and no interface, we have an 'id'.
10886 return true;
10887 }
10888 // Both the right and left sides have qualifiers.
10889 for (auto *lhsProto : lhs->quals()) {
10890 bool match = false;
10891
10892 // when comparing an id<P> on lhs with a static type on rhs,
10893 // see if static class implements all of id's protocols, directly or
10894 // through its super class and categories.
10895 for (auto *rhsProto : rhs->quals()) {
10896 if (ProtocolCompatibleWithProtocol(lProto: lhsProto, rProto: rhsProto) ||
10897 (compare && ProtocolCompatibleWithProtocol(lProto: rhsProto, rProto: lhsProto))) {
10898 match = true;
10899 break;
10900 }
10901 }
10902 // If the RHS is a qualified interface pointer "NSString<P>*",
10903 // make sure we check the class hierarchy.
10904 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) {
10905 for (auto *I : lhs->quals()) {
10906 // when comparing an id<P> on lhs with a static type on rhs,
10907 // see if static class implements all of id's protocols, directly or
10908 // through its super class and categories.
10909 if (rhsID->ClassImplementsProtocol(lProto: I, lookupCategory: true)) {
10910 match = true;
10911 break;
10912 }
10913 }
10914 }
10915 if (!match)
10916 return false;
10917 }
10918
10919 return true;
10920 }
10921
10922 assert(rhs->isObjCQualifiedIdType() && "One of the LHS/RHS should be id<x>");
10923
10924 if (lhs->getInterfaceType()) {
10925 // If both the right and left sides have qualifiers.
10926 for (auto *lhsProto : lhs->quals()) {
10927 bool match = false;
10928
10929 // when comparing an id<P> on rhs with a static type on lhs,
10930 // see if static class implements all of id's protocols, directly or
10931 // through its super class and categories.
10932 // First, lhs protocols in the qualifier list must be found, direct
10933 // or indirect in rhs's qualifier list or it is a mismatch.
10934 for (auto *rhsProto : rhs->quals()) {
10935 if (ProtocolCompatibleWithProtocol(lProto: lhsProto, rProto: rhsProto) ||
10936 (compare && ProtocolCompatibleWithProtocol(lProto: rhsProto, rProto: lhsProto))) {
10937 match = true;
10938 break;
10939 }
10940 }
10941 if (!match)
10942 return false;
10943 }
10944
10945 // Static class's protocols, or its super class or category protocols
10946 // must be found, direct or indirect in rhs's qualifier list or it is a mismatch.
10947 if (ObjCInterfaceDecl *lhsID = lhs->getInterfaceDecl()) {
10948 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols;
10949 CollectInheritedProtocols(CDecl: lhsID, Protocols&: LHSInheritedProtocols);
10950 // This is rather dubious but matches gcc's behavior. If lhs has
10951 // no type qualifier and its class has no static protocol(s)
10952 // assume that it is mismatch.
10953 if (LHSInheritedProtocols.empty() && lhs->qual_empty())
10954 return false;
10955 for (auto *lhsProto : LHSInheritedProtocols) {
10956 bool match = false;
10957 for (auto *rhsProto : rhs->quals()) {
10958 if (ProtocolCompatibleWithProtocol(lProto: lhsProto, rProto: rhsProto) ||
10959 (compare && ProtocolCompatibleWithProtocol(lProto: rhsProto, rProto: lhsProto))) {
10960 match = true;
10961 break;
10962 }
10963 }
10964 if (!match)
10965 return false;
10966 }
10967 }
10968 return true;
10969 }
10970 return false;
10971}
10972
10973/// canAssignObjCInterfaces - Return true if the two interface types are
10974/// compatible for assignment from RHS to LHS. This handles validation of any
10975/// protocol qualifiers on the LHS or RHS.
10976bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT,
10977 const ObjCObjectPointerType *RHSOPT) {
10978 const ObjCObjectType* LHS = LHSOPT->getObjectType();
10979 const ObjCObjectType* RHS = RHSOPT->getObjectType();
10980
10981 // If either type represents the built-in 'id' type, return true.
10982 if (LHS->isObjCUnqualifiedId() || RHS->isObjCUnqualifiedId())
10983 return true;
10984
10985 // Function object that propagates a successful result or handles
10986 // __kindof types.
10987 auto finish = [&](bool succeeded) -> bool {
10988 if (succeeded)
10989 return true;
10990
10991 if (!RHS->isKindOfType())
10992 return false;
10993
10994 // Strip off __kindof and protocol qualifiers, then check whether
10995 // we can assign the other way.
10996 return canAssignObjCInterfaces(LHSOPT: RHSOPT->stripObjCKindOfTypeAndQuals(ctx: *this),
10997 RHSOPT: LHSOPT->stripObjCKindOfTypeAndQuals(ctx: *this));
10998 };
10999
11000 // Casts from or to id<P> are allowed when the other side has compatible
11001 // protocols.
11002 if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) {
11003 return finish(ObjCQualifiedIdTypesAreCompatible(lhs: LHSOPT, rhs: RHSOPT, compare: false));
11004 }
11005
11006 // Verify protocol compatibility for casts from Class<P1> to Class<P2>.
11007 if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) {
11008 return finish(ObjCQualifiedClassTypesAreCompatible(lhs: LHSOPT, rhs: RHSOPT));
11009 }
11010
11011 // Casts from Class to Class<Foo>, or vice-versa, are allowed.
11012 if (LHS->isObjCClass() && RHS->isObjCClass()) {
11013 return true;
11014 }
11015
11016 // If we have 2 user-defined types, fall into that path.
11017 if (LHS->getInterface() && RHS->getInterface()) {
11018 return finish(canAssignObjCInterfaces(LHS, RHS));
11019 }
11020
11021 return false;
11022}
11023
11024/// canAssignObjCInterfacesInBlockPointer - This routine is specifically written
11025/// for providing type-safety for objective-c pointers used to pass/return
11026/// arguments in block literals. When passed as arguments, passing 'A*' where
11027/// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is
11028/// not OK. For the return type, the opposite is not OK.
11029bool ASTContext::canAssignObjCInterfacesInBlockPointer(
11030 const ObjCObjectPointerType *LHSOPT,
11031 const ObjCObjectPointerType *RHSOPT,
11032 bool BlockReturnType) {
11033
11034 // Function object that propagates a successful result or handles
11035 // __kindof types.
11036 auto finish = [&](bool succeeded) -> bool {
11037 if (succeeded)
11038 return true;
11039
11040 const ObjCObjectPointerType *Expected = BlockReturnType ? RHSOPT : LHSOPT;
11041 if (!Expected->isKindOfType())
11042 return false;
11043
11044 // Strip off __kindof and protocol qualifiers, then check whether
11045 // we can assign the other way.
11046 return canAssignObjCInterfacesInBlockPointer(
11047 LHSOPT: RHSOPT->stripObjCKindOfTypeAndQuals(ctx: *this),
11048 RHSOPT: LHSOPT->stripObjCKindOfTypeAndQuals(ctx: *this),
11049 BlockReturnType);
11050 };
11051
11052 if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType())
11053 return true;
11054
11055 if (LHSOPT->isObjCBuiltinType()) {
11056 return finish(RHSOPT->isObjCBuiltinType() ||
11057 RHSOPT->isObjCQualifiedIdType());
11058 }
11059
11060 if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) {
11061 if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking)
11062 // Use for block parameters previous type checking for compatibility.
11063 return finish(ObjCQualifiedIdTypesAreCompatible(lhs: LHSOPT, rhs: RHSOPT, compare: false) ||
11064 // Or corrected type checking as in non-compat mode.
11065 (!BlockReturnType &&
11066 ObjCQualifiedIdTypesAreCompatible(lhs: RHSOPT, rhs: LHSOPT, compare: false)));
11067 else
11068 return finish(ObjCQualifiedIdTypesAreCompatible(
11069 lhs: (BlockReturnType ? LHSOPT : RHSOPT),
11070 rhs: (BlockReturnType ? RHSOPT : LHSOPT), compare: false));
11071 }
11072
11073 const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType();
11074 const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType();
11075 if (LHS && RHS) { // We have 2 user-defined types.
11076 if (LHS != RHS) {
11077 if (LHS->getDecl()->isSuperClassOf(I: RHS->getDecl()))
11078 return finish(BlockReturnType);
11079 if (RHS->getDecl()->isSuperClassOf(I: LHS->getDecl()))
11080 return finish(!BlockReturnType);
11081 }
11082 else
11083 return true;
11084 }
11085 return false;
11086}
11087
11088/// Comparison routine for Objective-C protocols to be used with
11089/// llvm::array_pod_sort.
11090static int compareObjCProtocolsByName(ObjCProtocolDecl * const *lhs,
11091 ObjCProtocolDecl * const *rhs) {
11092 return (*lhs)->getName().compare(RHS: (*rhs)->getName());
11093}
11094
11095/// getIntersectionOfProtocols - This routine finds the intersection of set
11096/// of protocols inherited from two distinct objective-c pointer objects with
11097/// the given common base.
11098/// It is used to build composite qualifier list of the composite type of
11099/// the conditional expression involving two objective-c pointer objects.
11100static
11101void getIntersectionOfProtocols(ASTContext &Context,
11102 const ObjCInterfaceDecl *CommonBase,
11103 const ObjCObjectPointerType *LHSOPT,
11104 const ObjCObjectPointerType *RHSOPT,
11105 SmallVectorImpl<ObjCProtocolDecl *> &IntersectionSet) {
11106
11107 const ObjCObjectType* LHS = LHSOPT->getObjectType();
11108 const ObjCObjectType* RHS = RHSOPT->getObjectType();
11109 assert(LHS->getInterface() && "LHS must have an interface base");
11110 assert(RHS->getInterface() && "RHS must have an interface base");
11111
11112 // Add all of the protocols for the LHS.
11113 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSProtocolSet;
11114
11115 // Start with the protocol qualifiers.
11116 for (auto *proto : LHS->quals()) {
11117 Context.CollectInheritedProtocols(CDecl: proto, Protocols&: LHSProtocolSet);
11118 }
11119
11120 // Also add the protocols associated with the LHS interface.
11121 Context.CollectInheritedProtocols(CDecl: LHS->getInterface(), Protocols&: LHSProtocolSet);
11122
11123 // Add all of the protocols for the RHS.
11124 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet;
11125
11126 // Start with the protocol qualifiers.
11127 for (auto *proto : RHS->quals()) {
11128 Context.CollectInheritedProtocols(CDecl: proto, Protocols&: RHSProtocolSet);
11129 }
11130
11131 // Also add the protocols associated with the RHS interface.
11132 Context.CollectInheritedProtocols(CDecl: RHS->getInterface(), Protocols&: RHSProtocolSet);
11133
11134 // Compute the intersection of the collected protocol sets.
11135 for (auto *proto : LHSProtocolSet) {
11136 if (RHSProtocolSet.count(Ptr: proto))
11137 IntersectionSet.push_back(Elt: proto);
11138 }
11139
11140 // Compute the set of protocols that is implied by either the common type or
11141 // the protocols within the intersection.
11142 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> ImpliedProtocols;
11143 Context.CollectInheritedProtocols(CDecl: CommonBase, Protocols&: ImpliedProtocols);
11144
11145 // Remove any implied protocols from the list of inherited protocols.
11146 if (!ImpliedProtocols.empty()) {
11147 llvm::erase_if(C&: IntersectionSet, P: [&](ObjCProtocolDecl *proto) -> bool {
11148 return ImpliedProtocols.contains(Ptr: proto);
11149 });
11150 }
11151
11152 // Sort the remaining protocols by name.
11153 llvm::array_pod_sort(Start: IntersectionSet.begin(), End: IntersectionSet.end(),
11154 Compare: compareObjCProtocolsByName);
11155}
11156
11157/// Determine whether the first type is a subtype of the second.
11158static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs,
11159 QualType rhs) {
11160 // Common case: two object pointers.
11161 const auto *lhsOPT = lhs->getAs<ObjCObjectPointerType>();
11162 const auto *rhsOPT = rhs->getAs<ObjCObjectPointerType>();
11163 if (lhsOPT && rhsOPT)
11164 return ctx.canAssignObjCInterfaces(LHSOPT: lhsOPT, RHSOPT: rhsOPT);
11165
11166 // Two block pointers.
11167 const auto *lhsBlock = lhs->getAs<BlockPointerType>();
11168 const auto *rhsBlock = rhs->getAs<BlockPointerType>();
11169 if (lhsBlock && rhsBlock)
11170 return ctx.typesAreBlockPointerCompatible(lhs, rhs);
11171
11172 // If either is an unqualified 'id' and the other is a block, it's
11173 // acceptable.
11174 if ((lhsOPT && lhsOPT->isObjCIdType() && rhsBlock) ||
11175 (rhsOPT && rhsOPT->isObjCIdType() && lhsBlock))
11176 return true;
11177
11178 return false;
11179}
11180
11181// Check that the given Objective-C type argument lists are equivalent.
11182static bool sameObjCTypeArgs(ASTContext &ctx,
11183 const ObjCInterfaceDecl *iface,
11184 ArrayRef<QualType> lhsArgs,
11185 ArrayRef<QualType> rhsArgs,
11186 bool stripKindOf) {
11187 if (lhsArgs.size() != rhsArgs.size())
11188 return false;
11189
11190 ObjCTypeParamList *typeParams = iface->getTypeParamList();
11191 if (!typeParams)
11192 return false;
11193
11194 for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) {
11195 if (ctx.hasSameType(T1: lhsArgs[i], T2: rhsArgs[i]))
11196 continue;
11197
11198 switch (typeParams->begin()[i]->getVariance()) {
11199 case ObjCTypeParamVariance::Invariant:
11200 if (!stripKindOf ||
11201 !ctx.hasSameType(T1: lhsArgs[i].stripObjCKindOfType(ctx),
11202 T2: rhsArgs[i].stripObjCKindOfType(ctx))) {
11203 return false;
11204 }
11205 break;
11206
11207 case ObjCTypeParamVariance::Covariant:
11208 if (!canAssignObjCObjectTypes(ctx, lhs: lhsArgs[i], rhs: rhsArgs[i]))
11209 return false;
11210 break;
11211
11212 case ObjCTypeParamVariance::Contravariant:
11213 if (!canAssignObjCObjectTypes(ctx, lhs: rhsArgs[i], rhs: lhsArgs[i]))
11214 return false;
11215 break;
11216 }
11217 }
11218
11219 return true;
11220}
11221
11222QualType ASTContext::areCommonBaseCompatible(
11223 const ObjCObjectPointerType *Lptr,
11224 const ObjCObjectPointerType *Rptr) {
11225 const ObjCObjectType *LHS = Lptr->getObjectType();
11226 const ObjCObjectType *RHS = Rptr->getObjectType();
11227 const ObjCInterfaceDecl* LDecl = LHS->getInterface();
11228 const ObjCInterfaceDecl* RDecl = RHS->getInterface();
11229
11230 if (!LDecl || !RDecl)
11231 return {};
11232
11233 // When either LHS or RHS is a kindof type, we should return a kindof type.
11234 // For example, for common base of kindof(ASub1) and kindof(ASub2), we return
11235 // kindof(A).
11236 bool anyKindOf = LHS->isKindOfType() || RHS->isKindOfType();
11237
11238 // Follow the left-hand side up the class hierarchy until we either hit a
11239 // root or find the RHS. Record the ancestors in case we don't find it.
11240 llvm::SmallDenseMap<const ObjCInterfaceDecl *, const ObjCObjectType *, 4>
11241 LHSAncestors;
11242 while (true) {
11243 // Record this ancestor. We'll need this if the common type isn't in the
11244 // path from the LHS to the root.
11245 LHSAncestors[LHS->getInterface()->getCanonicalDecl()] = LHS;
11246
11247 if (declaresSameEntity(D1: LHS->getInterface(), D2: RDecl)) {
11248 // Get the type arguments.
11249 ArrayRef<QualType> LHSTypeArgs = LHS->getTypeArgsAsWritten();
11250 bool anyChanges = false;
11251 if (LHS->isSpecialized() && RHS->isSpecialized()) {
11252 // Both have type arguments, compare them.
11253 if (!sameObjCTypeArgs(ctx&: *this, iface: LHS->getInterface(),
11254 lhsArgs: LHS->getTypeArgs(), rhsArgs: RHS->getTypeArgs(),
11255 /*stripKindOf=*/true))
11256 return {};
11257 } else if (LHS->isSpecialized() != RHS->isSpecialized()) {
11258 // If only one has type arguments, the result will not have type
11259 // arguments.
11260 LHSTypeArgs = {};
11261 anyChanges = true;
11262 }
11263
11264 // Compute the intersection of protocols.
11265 SmallVector<ObjCProtocolDecl *, 8> Protocols;
11266 getIntersectionOfProtocols(Context&: *this, CommonBase: LHS->getInterface(), LHSOPT: Lptr, RHSOPT: Rptr,
11267 IntersectionSet&: Protocols);
11268 if (!Protocols.empty())
11269 anyChanges = true;
11270
11271 // If anything in the LHS will have changed, build a new result type.
11272 // If we need to return a kindof type but LHS is not a kindof type, we
11273 // build a new result type.
11274 if (anyChanges || LHS->isKindOfType() != anyKindOf) {
11275 QualType Result = getObjCInterfaceType(Decl: LHS->getInterface());
11276 Result = getObjCObjectType(baseType: Result, typeArgs: LHSTypeArgs, protocols: Protocols,
11277 isKindOf: anyKindOf || LHS->isKindOfType());
11278 return getObjCObjectPointerType(ObjectT: Result);
11279 }
11280
11281 return getObjCObjectPointerType(ObjectT: QualType(LHS, 0));
11282 }
11283
11284 // Find the superclass.
11285 QualType LHSSuperType = LHS->getSuperClassType();
11286 if (LHSSuperType.isNull())
11287 break;
11288
11289 LHS = LHSSuperType->castAs<ObjCObjectType>();
11290 }
11291
11292 // We didn't find anything by following the LHS to its root; now check
11293 // the RHS against the cached set of ancestors.
11294 while (true) {
11295 auto KnownLHS = LHSAncestors.find(Val: RHS->getInterface()->getCanonicalDecl());
11296 if (KnownLHS != LHSAncestors.end()) {
11297 LHS = KnownLHS->second;
11298
11299 // Get the type arguments.
11300 ArrayRef<QualType> RHSTypeArgs = RHS->getTypeArgsAsWritten();
11301 bool anyChanges = false;
11302 if (LHS->isSpecialized() && RHS->isSpecialized()) {
11303 // Both have type arguments, compare them.
11304 if (!sameObjCTypeArgs(ctx&: *this, iface: LHS->getInterface(),
11305 lhsArgs: LHS->getTypeArgs(), rhsArgs: RHS->getTypeArgs(),
11306 /*stripKindOf=*/true))
11307 return {};
11308 } else if (LHS->isSpecialized() != RHS->isSpecialized()) {
11309 // If only one has type arguments, the result will not have type
11310 // arguments.
11311 RHSTypeArgs = {};
11312 anyChanges = true;
11313 }
11314
11315 // Compute the intersection of protocols.
11316 SmallVector<ObjCProtocolDecl *, 8> Protocols;
11317 getIntersectionOfProtocols(Context&: *this, CommonBase: RHS->getInterface(), LHSOPT: Lptr, RHSOPT: Rptr,
11318 IntersectionSet&: Protocols);
11319 if (!Protocols.empty())
11320 anyChanges = true;
11321
11322 // If we need to return a kindof type but RHS is not a kindof type, we
11323 // build a new result type.
11324 if (anyChanges || RHS->isKindOfType() != anyKindOf) {
11325 QualType Result = getObjCInterfaceType(Decl: RHS->getInterface());
11326 Result = getObjCObjectType(baseType: Result, typeArgs: RHSTypeArgs, protocols: Protocols,
11327 isKindOf: anyKindOf || RHS->isKindOfType());
11328 return getObjCObjectPointerType(ObjectT: Result);
11329 }
11330
11331 return getObjCObjectPointerType(ObjectT: QualType(RHS, 0));
11332 }
11333
11334 // Find the superclass of the RHS.
11335 QualType RHSSuperType = RHS->getSuperClassType();
11336 if (RHSSuperType.isNull())
11337 break;
11338
11339 RHS = RHSSuperType->castAs<ObjCObjectType>();
11340 }
11341
11342 return {};
11343}
11344
11345bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS,
11346 const ObjCObjectType *RHS) {
11347 assert(LHS->getInterface() && "LHS is not an interface type");
11348 assert(RHS->getInterface() && "RHS is not an interface type");
11349
11350 // Verify that the base decls are compatible: the RHS must be a subclass of
11351 // the LHS.
11352 ObjCInterfaceDecl *LHSInterface = LHS->getInterface();
11353 bool IsSuperClass = LHSInterface->isSuperClassOf(I: RHS->getInterface());
11354 if (!IsSuperClass)
11355 return false;
11356
11357 // If the LHS has protocol qualifiers, determine whether all of them are
11358 // satisfied by the RHS (i.e., the RHS has a superset of the protocols in the
11359 // LHS).
11360 if (LHS->getNumProtocols() > 0) {
11361 // OK if conversion of LHS to SuperClass results in narrowing of types
11362 // ; i.e., SuperClass may implement at least one of the protocols
11363 // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok.
11364 // But not SuperObj<P1,P2,P3> = lhs<P1,P2>.
11365 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols;
11366 CollectInheritedProtocols(CDecl: RHS->getInterface(), Protocols&: SuperClassInheritedProtocols);
11367 // Also, if RHS has explicit quelifiers, include them for comparing with LHS's
11368 // qualifiers.
11369 for (auto *RHSPI : RHS->quals())
11370 CollectInheritedProtocols(CDecl: RHSPI, Protocols&: SuperClassInheritedProtocols);
11371 // If there is no protocols associated with RHS, it is not a match.
11372 if (SuperClassInheritedProtocols.empty())
11373 return false;
11374
11375 for (const auto *LHSProto : LHS->quals()) {
11376 bool SuperImplementsProtocol = false;
11377 for (auto *SuperClassProto : SuperClassInheritedProtocols)
11378 if (SuperClassProto->lookupProtocolNamed(PName: LHSProto->getIdentifier())) {
11379 SuperImplementsProtocol = true;
11380 break;
11381 }
11382 if (!SuperImplementsProtocol)
11383 return false;
11384 }
11385 }
11386
11387 // If the LHS is specialized, we may need to check type arguments.
11388 if (LHS->isSpecialized()) {
11389 // Follow the superclass chain until we've matched the LHS class in the
11390 // hierarchy. This substitutes type arguments through.
11391 const ObjCObjectType *RHSSuper = RHS;
11392 while (!declaresSameEntity(D1: RHSSuper->getInterface(), D2: LHSInterface))
11393 RHSSuper = RHSSuper->getSuperClassType()->castAs<ObjCObjectType>();
11394
11395 // If the RHS is specializd, compare type arguments.
11396 if (RHSSuper->isSpecialized() &&
11397 !sameObjCTypeArgs(ctx&: *this, iface: LHS->getInterface(),
11398 lhsArgs: LHS->getTypeArgs(), rhsArgs: RHSSuper->getTypeArgs(),
11399 /*stripKindOf=*/true)) {
11400 return false;
11401 }
11402 }
11403
11404 return true;
11405}
11406
11407bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) {
11408 // get the "pointed to" types
11409 const auto *LHSOPT = LHS->getAs<ObjCObjectPointerType>();
11410 const auto *RHSOPT = RHS->getAs<ObjCObjectPointerType>();
11411
11412 if (!LHSOPT || !RHSOPT)
11413 return false;
11414
11415 return canAssignObjCInterfaces(LHSOPT, RHSOPT) ||
11416 canAssignObjCInterfaces(LHSOPT: RHSOPT, RHSOPT: LHSOPT);
11417}
11418
11419bool ASTContext::canBindObjCObjectType(QualType To, QualType From) {
11420 return canAssignObjCInterfaces(
11421 LHSOPT: getObjCObjectPointerType(ObjectT: To)->castAs<ObjCObjectPointerType>(),
11422 RHSOPT: getObjCObjectPointerType(ObjectT: From)->castAs<ObjCObjectPointerType>());
11423}
11424
11425/// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible,
11426/// both shall have the identically qualified version of a compatible type.
11427/// C99 6.2.7p1: Two types have compatible types if their types are the
11428/// same. See 6.7.[2,3,5] for additional rules.
11429bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS,
11430 bool CompareUnqualified) {
11431 if (getLangOpts().CPlusPlus)
11432 return hasSameType(T1: LHS, T2: RHS);
11433
11434 return !mergeTypes(LHS, RHS, OfBlockPointer: false, Unqualified: CompareUnqualified).isNull();
11435}
11436
11437bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) {
11438 return typesAreCompatible(LHS, RHS);
11439}
11440
11441bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) {
11442 return !mergeTypes(LHS, RHS, OfBlockPointer: true).isNull();
11443}
11444
11445/// mergeTransparentUnionType - if T is a transparent union type and a member
11446/// of T is compatible with SubType, return the merged type, else return
11447/// QualType()
11448QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType,
11449 bool OfBlockPointer,
11450 bool Unqualified) {
11451 if (const RecordType *UT = T->getAsUnionType()) {
11452 RecordDecl *UD = UT->getDecl()->getMostRecentDecl();
11453 if (UD->hasAttr<TransparentUnionAttr>()) {
11454 for (const auto *I : UD->fields()) {
11455 QualType ET = I->getType().getUnqualifiedType();
11456 QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified);
11457 if (!MT.isNull())
11458 return MT;
11459 }
11460 }
11461 }
11462
11463 return {};
11464}
11465
11466/// mergeFunctionParameterTypes - merge two types which appear as function
11467/// parameter types
11468QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs,
11469 bool OfBlockPointer,
11470 bool Unqualified) {
11471 // GNU extension: two types are compatible if they appear as a function
11472 // argument, one of the types is a transparent union type and the other
11473 // type is compatible with a union member
11474 QualType lmerge = mergeTransparentUnionType(T: lhs, SubType: rhs, OfBlockPointer,
11475 Unqualified);
11476 if (!lmerge.isNull())
11477 return lmerge;
11478
11479 QualType rmerge = mergeTransparentUnionType(T: rhs, SubType: lhs, OfBlockPointer,
11480 Unqualified);
11481 if (!rmerge.isNull())
11482 return rmerge;
11483
11484 return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified);
11485}
11486
11487QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
11488 bool OfBlockPointer, bool Unqualified,
11489 bool AllowCXX,
11490 bool IsConditionalOperator) {
11491 const auto *lbase = lhs->castAs<FunctionType>();
11492 const auto *rbase = rhs->castAs<FunctionType>();
11493 const auto *lproto = dyn_cast<FunctionProtoType>(Val: lbase);
11494 const auto *rproto = dyn_cast<FunctionProtoType>(Val: rbase);
11495 bool allLTypes = true;
11496 bool allRTypes = true;
11497
11498 // Check return type
11499 QualType retType;
11500 if (OfBlockPointer) {
11501 QualType RHS = rbase->getReturnType();
11502 QualType LHS = lbase->getReturnType();
11503 bool UnqualifiedResult = Unqualified;
11504 if (!UnqualifiedResult)
11505 UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers());
11506 retType = mergeTypes(LHS, RHS, OfBlockPointer: true, Unqualified: UnqualifiedResult, BlockReturnType: true);
11507 }
11508 else
11509 retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), OfBlockPointer: false,
11510 Unqualified);
11511 if (retType.isNull())
11512 return {};
11513
11514 if (Unqualified)
11515 retType = retType.getUnqualifiedType();
11516
11517 CanQualType LRetType = getCanonicalType(T: lbase->getReturnType());
11518 CanQualType RRetType = getCanonicalType(T: rbase->getReturnType());
11519 if (Unqualified) {
11520 LRetType = LRetType.getUnqualifiedType();
11521 RRetType = RRetType.getUnqualifiedType();
11522 }
11523
11524 if (getCanonicalType(T: retType) != LRetType)
11525 allLTypes = false;
11526 if (getCanonicalType(T: retType) != RRetType)
11527 allRTypes = false;
11528
11529 // FIXME: double check this
11530 // FIXME: should we error if lbase->getRegParmAttr() != 0 &&
11531 // rbase->getRegParmAttr() != 0 &&
11532 // lbase->getRegParmAttr() != rbase->getRegParmAttr()?
11533 FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo();
11534 FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo();
11535
11536 // Compatible functions must have compatible calling conventions
11537 if (lbaseInfo.getCC() != rbaseInfo.getCC())
11538 return {};
11539
11540 // Regparm is part of the calling convention.
11541 if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm())
11542 return {};
11543 if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm())
11544 return {};
11545
11546 if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult())
11547 return {};
11548 if (lbaseInfo.getNoCallerSavedRegs() != rbaseInfo.getNoCallerSavedRegs())
11549 return {};
11550 if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck())
11551 return {};
11552
11553 // When merging declarations, it's common for supplemental information like
11554 // attributes to only be present in one of the declarations, and we generally
11555 // want type merging to preserve the union of information. So a merged
11556 // function type should be noreturn if it was noreturn in *either* operand
11557 // type.
11558 //
11559 // But for the conditional operator, this is backwards. The result of the
11560 // operator could be either operand, and its type should conservatively
11561 // reflect that. So a function type in a composite type is noreturn only
11562 // if it's noreturn in *both* operand types.
11563 //
11564 // Arguably, noreturn is a kind of subtype, and the conditional operator
11565 // ought to produce the most specific common supertype of its operand types.
11566 // That would differ from this rule in contravariant positions. However,
11567 // neither C nor C++ generally uses this kind of subtype reasoning. Also,
11568 // as a practical matter, it would only affect C code that does abstraction of
11569 // higher-order functions (taking noreturn callbacks!), which is uncommon to
11570 // say the least. So we use the simpler rule.
11571 bool NoReturn = IsConditionalOperator
11572 ? lbaseInfo.getNoReturn() && rbaseInfo.getNoReturn()
11573 : lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn();
11574 if (lbaseInfo.getNoReturn() != NoReturn)
11575 allLTypes = false;
11576 if (rbaseInfo.getNoReturn() != NoReturn)
11577 allRTypes = false;
11578
11579 FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(noReturn: NoReturn);
11580
11581 std::optional<FunctionEffectSet> MergedFX;
11582
11583 if (lproto && rproto) { // two C99 style function prototypes
11584 assert((AllowCXX ||
11585 (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) &&
11586 "C++ shouldn't be here");
11587 // Compatible functions must have the same number of parameters
11588 if (lproto->getNumParams() != rproto->getNumParams())
11589 return {};
11590
11591 // Variadic and non-variadic functions aren't compatible
11592 if (lproto->isVariadic() != rproto->isVariadic())
11593 return {};
11594
11595 if (lproto->getMethodQuals() != rproto->getMethodQuals())
11596 return {};
11597
11598 // Function protos with different 'cfi_salt' values aren't compatible.
11599 if (lproto->getExtraAttributeInfo().CFISalt !=
11600 rproto->getExtraAttributeInfo().CFISalt)
11601 return {};
11602
11603 // Function effects are handled similarly to noreturn, see above.
11604 FunctionEffectsRef LHSFX = lproto->getFunctionEffects();
11605 FunctionEffectsRef RHSFX = rproto->getFunctionEffects();
11606 if (LHSFX != RHSFX) {
11607 if (IsConditionalOperator)
11608 MergedFX = FunctionEffectSet::getIntersection(LHS: LHSFX, RHS: RHSFX);
11609 else {
11610 FunctionEffectSet::Conflicts Errs;
11611 MergedFX = FunctionEffectSet::getUnion(LHS: LHSFX, RHS: RHSFX, Errs);
11612 // Here we're discarding a possible error due to conflicts in the effect
11613 // sets. But we're not in a context where we can report it. The
11614 // operation does however guarantee maintenance of invariants.
11615 }
11616 if (*MergedFX != LHSFX)
11617 allLTypes = false;
11618 if (*MergedFX != RHSFX)
11619 allRTypes = false;
11620 }
11621
11622 SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos;
11623 bool canUseLeft, canUseRight;
11624 if (!mergeExtParameterInfo(FirstFnType: lproto, SecondFnType: rproto, CanUseFirst&: canUseLeft, CanUseSecond&: canUseRight,
11625 NewParamInfos&: newParamInfos))
11626 return {};
11627
11628 if (!canUseLeft)
11629 allLTypes = false;
11630 if (!canUseRight)
11631 allRTypes = false;
11632
11633 // Check parameter type compatibility
11634 SmallVector<QualType, 10> types;
11635 for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) {
11636 QualType lParamType = lproto->getParamType(i).getUnqualifiedType();
11637 QualType rParamType = rproto->getParamType(i).getUnqualifiedType();
11638 QualType paramType = mergeFunctionParameterTypes(
11639 lhs: lParamType, rhs: rParamType, OfBlockPointer, Unqualified);
11640 if (paramType.isNull())
11641 return {};
11642
11643 if (Unqualified)
11644 paramType = paramType.getUnqualifiedType();
11645
11646 types.push_back(Elt: paramType);
11647 if (Unqualified) {
11648 lParamType = lParamType.getUnqualifiedType();
11649 rParamType = rParamType.getUnqualifiedType();
11650 }
11651
11652 if (getCanonicalType(T: paramType) != getCanonicalType(T: lParamType))
11653 allLTypes = false;
11654 if (getCanonicalType(T: paramType) != getCanonicalType(T: rParamType))
11655 allRTypes = false;
11656 }
11657
11658 if (allLTypes) return lhs;
11659 if (allRTypes) return rhs;
11660
11661 FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo();
11662 EPI.ExtInfo = einfo;
11663 EPI.ExtParameterInfos =
11664 newParamInfos.empty() ? nullptr : newParamInfos.data();
11665 if (MergedFX)
11666 EPI.FunctionEffects = *MergedFX;
11667 return getFunctionType(ResultTy: retType, Args: types, EPI);
11668 }
11669
11670 if (lproto) allRTypes = false;
11671 if (rproto) allLTypes = false;
11672
11673 const FunctionProtoType *proto = lproto ? lproto : rproto;
11674 if (proto) {
11675 assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here");
11676 if (proto->isVariadic())
11677 return {};
11678 // Check that the types are compatible with the types that
11679 // would result from default argument promotions (C99 6.7.5.3p15).
11680 // The only types actually affected are promotable integer
11681 // types and floats, which would be passed as a different
11682 // type depending on whether the prototype is visible.
11683 for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) {
11684 QualType paramTy = proto->getParamType(i);
11685
11686 // Look at the converted type of enum types, since that is the type used
11687 // to pass enum values.
11688 if (const auto *ED = paramTy->getAsEnumDecl()) {
11689 paramTy = ED->getIntegerType();
11690 if (paramTy.isNull())
11691 return {};
11692 }
11693
11694 if (isPromotableIntegerType(T: paramTy) ||
11695 getCanonicalType(T: paramTy).getUnqualifiedType() == FloatTy)
11696 return {};
11697 }
11698
11699 if (allLTypes) return lhs;
11700 if (allRTypes) return rhs;
11701
11702 FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo();
11703 EPI.ExtInfo = einfo;
11704 if (MergedFX)
11705 EPI.FunctionEffects = *MergedFX;
11706 return getFunctionType(ResultTy: retType, Args: proto->getParamTypes(), EPI);
11707 }
11708
11709 if (allLTypes) return lhs;
11710 if (allRTypes) return rhs;
11711 return getFunctionNoProtoType(ResultTy: retType, Info: einfo);
11712}
11713
11714/// Given that we have an enum type and a non-enum type, try to merge them.
11715static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET,
11716 QualType other, bool isBlockReturnType) {
11717 // C99 6.7.2.2p4: Each enumerated type shall be compatible with char,
11718 // a signed integer type, or an unsigned integer type.
11719 // Compatibility is based on the underlying type, not the promotion
11720 // type.
11721 QualType underlyingType =
11722 ET->getDecl()->getDefinitionOrSelf()->getIntegerType();
11723 if (underlyingType.isNull())
11724 return {};
11725 if (Context.hasSameType(T1: underlyingType, T2: other))
11726 return other;
11727
11728 // In block return types, we're more permissive and accept any
11729 // integral type of the same size.
11730 if (isBlockReturnType && other->isIntegerType() &&
11731 Context.getTypeSize(T: underlyingType) == Context.getTypeSize(T: other))
11732 return other;
11733
11734 return {};
11735}
11736
11737QualType ASTContext::mergeTagDefinitions(QualType LHS, QualType RHS) {
11738 // C17 and earlier and C++ disallow two tag definitions within the same TU
11739 // from being compatible.
11740 if (LangOpts.CPlusPlus || !LangOpts.C23)
11741 return {};
11742
11743 // Nameless tags are comparable only within outer definitions. At the top
11744 // level they are not comparable.
11745 const TagDecl *LTagD = LHS->castAsTagDecl(), *RTagD = RHS->castAsTagDecl();
11746 if (!LTagD->getIdentifier() || !RTagD->getIdentifier())
11747 return {};
11748
11749 // C23, on the other hand, requires the members to be "the same enough", so
11750 // we use a structural equivalence check.
11751 StructuralEquivalenceContext::NonEquivalentDeclSet NonEquivalentDecls;
11752 StructuralEquivalenceContext Ctx(
11753 getLangOpts(), *this, *this, NonEquivalentDecls,
11754 StructuralEquivalenceKind::Default, /*StrictTypeSpelling=*/false,
11755 /*Complain=*/false, /*ErrorOnTagTypeMismatch=*/true);
11756 return Ctx.IsEquivalent(T1: LHS, T2: RHS) ? LHS : QualType{};
11757}
11758
11759std::optional<QualType> ASTContext::tryMergeOverflowBehaviorTypes(
11760 QualType LHS, QualType RHS, bool OfBlockPointer, bool Unqualified,
11761 bool BlockReturnType, bool IsConditionalOperator) {
11762 const auto *LHSOBT = LHS->getAs<OverflowBehaviorType>();
11763 const auto *RHSOBT = RHS->getAs<OverflowBehaviorType>();
11764
11765 if (!LHSOBT && !RHSOBT)
11766 return std::nullopt;
11767
11768 if (LHSOBT) {
11769 if (RHSOBT) {
11770 if (LHSOBT->getBehaviorKind() != RHSOBT->getBehaviorKind())
11771 return QualType();
11772
11773 QualType MergedUnderlying = mergeTypes(
11774 LHSOBT->getUnderlyingType(), RHSOBT->getUnderlyingType(),
11775 OfBlockPointer, Unqualified, BlockReturnType, IsConditionalOperator);
11776
11777 if (MergedUnderlying.isNull())
11778 return QualType();
11779
11780 if (getCanonicalType(T: LHSOBT) == getCanonicalType(T: RHSOBT)) {
11781 if (LHSOBT->getUnderlyingType() == RHSOBT->getUnderlyingType())
11782 return getCommonSugaredType(X: LHS, Y: RHS);
11783 return getOverflowBehaviorType(
11784 Kind: LHSOBT->getBehaviorKind(),
11785 Underlying: getCanonicalType(T: LHSOBT->getUnderlyingType()));
11786 }
11787
11788 // For different underlying types that successfully merge, wrap the
11789 // merged underlying type with the common overflow behavior
11790 return getOverflowBehaviorType(Kind: LHSOBT->getBehaviorKind(),
11791 Underlying: MergedUnderlying);
11792 }
11793 return mergeTypes(LHSOBT->getUnderlyingType(), RHS, OfBlockPointer,
11794 Unqualified, BlockReturnType, IsConditionalOperator);
11795 }
11796
11797 return mergeTypes(LHS, RHSOBT->getUnderlyingType(), OfBlockPointer,
11798 Unqualified, BlockReturnType, IsConditionalOperator);
11799}
11800
11801QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, bool OfBlockPointer,
11802 bool Unqualified, bool BlockReturnType,
11803 bool IsConditionalOperator) {
11804 // For C++ we will not reach this code with reference types (see below),
11805 // for OpenMP variant call overloading we might.
11806 //
11807 // C++ [expr]: If an expression initially has the type "reference to T", the
11808 // type is adjusted to "T" prior to any further analysis, the expression
11809 // designates the object or function denoted by the reference, and the
11810 // expression is an lvalue unless the reference is an rvalue reference and
11811 // the expression is a function call (possibly inside parentheses).
11812 auto *LHSRefTy = LHS->getAs<ReferenceType>();
11813 auto *RHSRefTy = RHS->getAs<ReferenceType>();
11814 if (LangOpts.OpenMP && LHSRefTy && RHSRefTy &&
11815 LHS->getTypeClass() == RHS->getTypeClass())
11816 return mergeTypes(LHS: LHSRefTy->getPointeeType(), RHS: RHSRefTy->getPointeeType(),
11817 OfBlockPointer, Unqualified, BlockReturnType);
11818 if (LHSRefTy || RHSRefTy)
11819 return {};
11820
11821 if (std::optional<QualType> MergedOBT =
11822 tryMergeOverflowBehaviorTypes(LHS, RHS, OfBlockPointer, Unqualified,
11823 BlockReturnType, IsConditionalOperator))
11824 return *MergedOBT;
11825
11826 if (Unqualified) {
11827 LHS = LHS.getUnqualifiedType();
11828 RHS = RHS.getUnqualifiedType();
11829 }
11830
11831 QualType LHSCan = getCanonicalType(T: LHS),
11832 RHSCan = getCanonicalType(T: RHS);
11833
11834 // If two types are identical, they are compatible.
11835 if (LHSCan == RHSCan)
11836 return LHS;
11837
11838 // If the qualifiers are different, the types aren't compatible... mostly.
11839 Qualifiers LQuals = LHSCan.getLocalQualifiers();
11840 Qualifiers RQuals = RHSCan.getLocalQualifiers();
11841 if (LQuals != RQuals) {
11842 // If any of these qualifiers are different, we have a type
11843 // mismatch.
11844 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() ||
11845 LQuals.getAddressSpace() != RQuals.getAddressSpace() ||
11846 LQuals.getObjCLifetime() != RQuals.getObjCLifetime() ||
11847 !LQuals.getPointerAuth().isEquivalent(Other: RQuals.getPointerAuth()) ||
11848 LQuals.hasUnaligned() != RQuals.hasUnaligned())
11849 return {};
11850
11851 // Exactly one GC qualifier difference is allowed: __strong is
11852 // okay if the other type has no GC qualifier but is an Objective
11853 // C object pointer (i.e. implicitly strong by default). We fix
11854 // this by pretending that the unqualified type was actually
11855 // qualified __strong.
11856 Qualifiers::GC GC_L = LQuals.getObjCGCAttr();
11857 Qualifiers::GC GC_R = RQuals.getObjCGCAttr();
11858 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements");
11859
11860 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak)
11861 return {};
11862
11863 if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) {
11864 return mergeTypes(LHS, RHS: getObjCGCQualType(T: RHS, GCAttr: Qualifiers::Strong));
11865 }
11866 if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) {
11867 return mergeTypes(LHS: getObjCGCQualType(T: LHS, GCAttr: Qualifiers::Strong), RHS);
11868 }
11869 return {};
11870 }
11871
11872 // Okay, qualifiers are equal.
11873
11874 Type::TypeClass LHSClass = LHSCan->getTypeClass();
11875 Type::TypeClass RHSClass = RHSCan->getTypeClass();
11876
11877 // We want to consider the two function types to be the same for these
11878 // comparisons, just force one to the other.
11879 if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto;
11880 if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto;
11881
11882 // Same as above for arrays
11883 if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray)
11884 LHSClass = Type::ConstantArray;
11885 if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray)
11886 RHSClass = Type::ConstantArray;
11887
11888 // ObjCInterfaces are just specialized ObjCObjects.
11889 if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject;
11890 if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject;
11891
11892 // Canonicalize ExtVector -> Vector.
11893 if (LHSClass == Type::ExtVector) LHSClass = Type::Vector;
11894 if (RHSClass == Type::ExtVector) RHSClass = Type::Vector;
11895
11896 // If the canonical type classes don't match.
11897 if (LHSClass != RHSClass) {
11898 // Note that we only have special rules for turning block enum
11899 // returns into block int returns, not vice-versa.
11900 if (const auto *ETy = LHS->getAsCanonical<EnumType>()) {
11901 return mergeEnumWithInteger(Context&: *this, ET: ETy, other: RHS, isBlockReturnType: false);
11902 }
11903 if (const EnumType *ETy = RHS->getAsCanonical<EnumType>()) {
11904 return mergeEnumWithInteger(Context&: *this, ET: ETy, other: LHS, isBlockReturnType: BlockReturnType);
11905 }
11906 // allow block pointer type to match an 'id' type.
11907 if (OfBlockPointer && !BlockReturnType) {
11908 if (LHS->isObjCIdType() && RHS->isBlockPointerType())
11909 return LHS;
11910 if (RHS->isObjCIdType() && LHS->isBlockPointerType())
11911 return RHS;
11912 }
11913 // Allow __auto_type to match anything; it merges to the type with more
11914 // information.
11915 if (const auto *AT = LHS->getAs<AutoType>()) {
11916 if (!AT->isDeduced() && AT->isGNUAutoType())
11917 return RHS;
11918 }
11919 if (const auto *AT = RHS->getAs<AutoType>()) {
11920 if (!AT->isDeduced() && AT->isGNUAutoType())
11921 return LHS;
11922 }
11923 return {};
11924 }
11925
11926 // The canonical type classes match.
11927 switch (LHSClass) {
11928#define TYPE(Class, Base)
11929#define ABSTRACT_TYPE(Class, Base)
11930#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
11931#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
11932#define DEPENDENT_TYPE(Class, Base) case Type::Class:
11933#include "clang/AST/TypeNodes.inc"
11934 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
11935
11936 case Type::Auto:
11937 case Type::DeducedTemplateSpecialization:
11938 case Type::LValueReference:
11939 case Type::RValueReference:
11940 case Type::MemberPointer:
11941 llvm_unreachable("C++ should never be in mergeTypes");
11942
11943 case Type::ObjCInterface:
11944 case Type::IncompleteArray:
11945 case Type::VariableArray:
11946 case Type::FunctionProto:
11947 case Type::ExtVector:
11948 case Type::OverflowBehavior:
11949 llvm_unreachable("Types are eliminated above");
11950
11951 case Type::Pointer:
11952 {
11953 // Merge two pointer types, while trying to preserve typedef info
11954 QualType LHSPointee = LHS->castAs<PointerType>()->getPointeeType();
11955 QualType RHSPointee = RHS->castAs<PointerType>()->getPointeeType();
11956 if (Unqualified) {
11957 LHSPointee = LHSPointee.getUnqualifiedType();
11958 RHSPointee = RHSPointee.getUnqualifiedType();
11959 }
11960 QualType ResultType = mergeTypes(LHS: LHSPointee, RHS: RHSPointee, OfBlockPointer: false,
11961 Unqualified);
11962 if (ResultType.isNull())
11963 return {};
11964 if (getCanonicalType(T: LHSPointee) == getCanonicalType(T: ResultType))
11965 return LHS;
11966 if (getCanonicalType(T: RHSPointee) == getCanonicalType(T: ResultType))
11967 return RHS;
11968 return getPointerType(T: ResultType);
11969 }
11970 case Type::BlockPointer:
11971 {
11972 // Merge two block pointer types, while trying to preserve typedef info
11973 QualType LHSPointee = LHS->castAs<BlockPointerType>()->getPointeeType();
11974 QualType RHSPointee = RHS->castAs<BlockPointerType>()->getPointeeType();
11975 if (Unqualified) {
11976 LHSPointee = LHSPointee.getUnqualifiedType();
11977 RHSPointee = RHSPointee.getUnqualifiedType();
11978 }
11979 if (getLangOpts().OpenCL) {
11980 Qualifiers LHSPteeQual = LHSPointee.getQualifiers();
11981 Qualifiers RHSPteeQual = RHSPointee.getQualifiers();
11982 // Blocks can't be an expression in a ternary operator (OpenCL v2.0
11983 // 6.12.5) thus the following check is asymmetric.
11984 if (!LHSPteeQual.isAddressSpaceSupersetOf(other: RHSPteeQual, Ctx: *this))
11985 return {};
11986 LHSPteeQual.removeAddressSpace();
11987 RHSPteeQual.removeAddressSpace();
11988 LHSPointee =
11989 QualType(LHSPointee.getTypePtr(), LHSPteeQual.getAsOpaqueValue());
11990 RHSPointee =
11991 QualType(RHSPointee.getTypePtr(), RHSPteeQual.getAsOpaqueValue());
11992 }
11993 QualType ResultType = mergeTypes(LHS: LHSPointee, RHS: RHSPointee, OfBlockPointer,
11994 Unqualified);
11995 if (ResultType.isNull())
11996 return {};
11997 if (getCanonicalType(T: LHSPointee) == getCanonicalType(T: ResultType))
11998 return LHS;
11999 if (getCanonicalType(T: RHSPointee) == getCanonicalType(T: ResultType))
12000 return RHS;
12001 return getBlockPointerType(T: ResultType);
12002 }
12003 case Type::Atomic:
12004 {
12005 // Merge two pointer types, while trying to preserve typedef info
12006 QualType LHSValue = LHS->castAs<AtomicType>()->getValueType();
12007 QualType RHSValue = RHS->castAs<AtomicType>()->getValueType();
12008 if (Unqualified) {
12009 LHSValue = LHSValue.getUnqualifiedType();
12010 RHSValue = RHSValue.getUnqualifiedType();
12011 }
12012 QualType ResultType = mergeTypes(LHS: LHSValue, RHS: RHSValue, OfBlockPointer: false,
12013 Unqualified);
12014 if (ResultType.isNull())
12015 return {};
12016 if (getCanonicalType(T: LHSValue) == getCanonicalType(T: ResultType))
12017 return LHS;
12018 if (getCanonicalType(T: RHSValue) == getCanonicalType(T: ResultType))
12019 return RHS;
12020 return getAtomicType(T: ResultType);
12021 }
12022 case Type::ConstantArray:
12023 {
12024 const ConstantArrayType* LCAT = getAsConstantArrayType(T: LHS);
12025 const ConstantArrayType* RCAT = getAsConstantArrayType(T: RHS);
12026 if (LCAT && RCAT && RCAT->getZExtSize() != LCAT->getZExtSize())
12027 return {};
12028
12029 QualType LHSElem = getAsArrayType(T: LHS)->getElementType();
12030 QualType RHSElem = getAsArrayType(T: RHS)->getElementType();
12031 if (Unqualified) {
12032 LHSElem = LHSElem.getUnqualifiedType();
12033 RHSElem = RHSElem.getUnqualifiedType();
12034 }
12035
12036 QualType ResultType = mergeTypes(LHS: LHSElem, RHS: RHSElem, OfBlockPointer: false, Unqualified);
12037 if (ResultType.isNull())
12038 return {};
12039
12040 const VariableArrayType* LVAT = getAsVariableArrayType(T: LHS);
12041 const VariableArrayType* RVAT = getAsVariableArrayType(T: RHS);
12042
12043 // If either side is a variable array, and both are complete, check whether
12044 // the current dimension is definite.
12045 if (LVAT || RVAT) {
12046 auto SizeFetch = [this](const VariableArrayType* VAT,
12047 const ConstantArrayType* CAT)
12048 -> std::pair<bool,llvm::APInt> {
12049 if (VAT) {
12050 std::optional<llvm::APSInt> TheInt;
12051 Expr *E = VAT->getSizeExpr();
12052 if (E && (TheInt = E->getIntegerConstantExpr(Ctx: *this)))
12053 return std::make_pair(x: true, y&: *TheInt);
12054 return std::make_pair(x: false, y: llvm::APSInt());
12055 }
12056 if (CAT)
12057 return std::make_pair(x: true, y: CAT->getSize());
12058 return std::make_pair(x: false, y: llvm::APInt());
12059 };
12060
12061 bool HaveLSize, HaveRSize;
12062 llvm::APInt LSize, RSize;
12063 std::tie(args&: HaveLSize, args&: LSize) = SizeFetch(LVAT, LCAT);
12064 std::tie(args&: HaveRSize, args&: RSize) = SizeFetch(RVAT, RCAT);
12065 if (HaveLSize && HaveRSize && !llvm::APInt::isSameValue(I1: LSize, I2: RSize))
12066 return {}; // Definite, but unequal, array dimension
12067 }
12068
12069 if (LCAT && getCanonicalType(T: LHSElem) == getCanonicalType(T: ResultType))
12070 return LHS;
12071 if (RCAT && getCanonicalType(T: RHSElem) == getCanonicalType(T: ResultType))
12072 return RHS;
12073 if (LCAT)
12074 return getConstantArrayType(EltTy: ResultType, ArySizeIn: LCAT->getSize(),
12075 SizeExpr: LCAT->getSizeExpr(), ASM: ArraySizeModifier(), IndexTypeQuals: 0);
12076 if (RCAT)
12077 return getConstantArrayType(EltTy: ResultType, ArySizeIn: RCAT->getSize(),
12078 SizeExpr: RCAT->getSizeExpr(), ASM: ArraySizeModifier(), IndexTypeQuals: 0);
12079 if (LVAT && getCanonicalType(T: LHSElem) == getCanonicalType(T: ResultType))
12080 return LHS;
12081 if (RVAT && getCanonicalType(T: RHSElem) == getCanonicalType(T: ResultType))
12082 return RHS;
12083 if (LVAT) {
12084 // FIXME: This isn't correct! But tricky to implement because
12085 // the array's size has to be the size of LHS, but the type
12086 // has to be different.
12087 return LHS;
12088 }
12089 if (RVAT) {
12090 // FIXME: This isn't correct! But tricky to implement because
12091 // the array's size has to be the size of RHS, but the type
12092 // has to be different.
12093 return RHS;
12094 }
12095 if (getCanonicalType(T: LHSElem) == getCanonicalType(T: ResultType)) return LHS;
12096 if (getCanonicalType(T: RHSElem) == getCanonicalType(T: ResultType)) return RHS;
12097 return getIncompleteArrayType(elementType: ResultType, ASM: ArraySizeModifier(), elementTypeQuals: 0);
12098 }
12099 case Type::FunctionNoProto:
12100 return mergeFunctionTypes(lhs: LHS, rhs: RHS, OfBlockPointer, Unqualified,
12101 /*AllowCXX=*/false, IsConditionalOperator);
12102 case Type::Record:
12103 case Type::Enum:
12104 return mergeTagDefinitions(LHS, RHS);
12105 case Type::Builtin:
12106 // Only exactly equal builtin types are compatible, which is tested above.
12107 return {};
12108 case Type::Complex:
12109 // Distinct complex types are incompatible.
12110 return {};
12111 case Type::Vector:
12112 // FIXME: The merged type should be an ExtVector!
12113 if (areCompatVectorTypes(LHS: LHSCan->castAs<VectorType>(),
12114 RHS: RHSCan->castAs<VectorType>()))
12115 return LHS;
12116 return {};
12117 case Type::ConstantMatrix:
12118 if (areCompatMatrixTypes(LHS: LHSCan->castAs<ConstantMatrixType>(),
12119 RHS: RHSCan->castAs<ConstantMatrixType>()))
12120 return LHS;
12121 return {};
12122 case Type::ObjCObject: {
12123 // Check if the types are assignment compatible.
12124 // FIXME: This should be type compatibility, e.g. whether
12125 // "LHS x; RHS x;" at global scope is legal.
12126 if (canAssignObjCInterfaces(LHS: LHS->castAs<ObjCObjectType>(),
12127 RHS: RHS->castAs<ObjCObjectType>()))
12128 return LHS;
12129 return {};
12130 }
12131 case Type::ObjCObjectPointer:
12132 if (OfBlockPointer) {
12133 if (canAssignObjCInterfacesInBlockPointer(
12134 LHSOPT: LHS->castAs<ObjCObjectPointerType>(),
12135 RHSOPT: RHS->castAs<ObjCObjectPointerType>(), BlockReturnType))
12136 return LHS;
12137 return {};
12138 }
12139 if (canAssignObjCInterfaces(LHSOPT: LHS->castAs<ObjCObjectPointerType>(),
12140 RHSOPT: RHS->castAs<ObjCObjectPointerType>()))
12141 return LHS;
12142 return {};
12143 case Type::Pipe:
12144 assert(LHS != RHS &&
12145 "Equivalent pipe types should have already been handled!");
12146 return {};
12147 case Type::ArrayParameter:
12148 assert(LHS != RHS &&
12149 "Equivalent ArrayParameter types should have already been handled!");
12150 return {};
12151 case Type::BitInt: {
12152 // Merge two bit-precise int types, while trying to preserve typedef info.
12153 bool LHSUnsigned = LHS->castAs<BitIntType>()->isUnsigned();
12154 bool RHSUnsigned = RHS->castAs<BitIntType>()->isUnsigned();
12155 unsigned LHSBits = LHS->castAs<BitIntType>()->getNumBits();
12156 unsigned RHSBits = RHS->castAs<BitIntType>()->getNumBits();
12157
12158 // Like unsigned/int, shouldn't have a type if they don't match.
12159 if (LHSUnsigned != RHSUnsigned)
12160 return {};
12161
12162 if (LHSBits != RHSBits)
12163 return {};
12164 return LHS;
12165 }
12166 case Type::HLSLAttributedResource: {
12167 const HLSLAttributedResourceType *LHSTy =
12168 LHS->castAs<HLSLAttributedResourceType>();
12169 const HLSLAttributedResourceType *RHSTy =
12170 RHS->castAs<HLSLAttributedResourceType>();
12171 assert(LHSTy->getWrappedType() == RHSTy->getWrappedType() &&
12172 LHSTy->getWrappedType()->isHLSLResourceType() &&
12173 "HLSLAttributedResourceType should always wrap __hlsl_resource_t");
12174
12175 if (LHSTy->getAttrs() == RHSTy->getAttrs() &&
12176 LHSTy->getContainedType() == RHSTy->getContainedType())
12177 return LHS;
12178 return {};
12179 }
12180 case Type::HLSLInlineSpirv:
12181 const HLSLInlineSpirvType *LHSTy = LHS->castAs<HLSLInlineSpirvType>();
12182 const HLSLInlineSpirvType *RHSTy = RHS->castAs<HLSLInlineSpirvType>();
12183
12184 if (LHSTy->getOpcode() == RHSTy->getOpcode() &&
12185 LHSTy->getSize() == RHSTy->getSize() &&
12186 LHSTy->getAlignment() == RHSTy->getAlignment()) {
12187 for (size_t I = 0; I < LHSTy->getOperands().size(); I++)
12188 if (LHSTy->getOperands()[I] != RHSTy->getOperands()[I])
12189 return {};
12190
12191 return LHS;
12192 }
12193 return {};
12194 }
12195
12196 llvm_unreachable("Invalid Type::Class!");
12197}
12198
12199bool ASTContext::mergeExtParameterInfo(
12200 const FunctionProtoType *FirstFnType, const FunctionProtoType *SecondFnType,
12201 bool &CanUseFirst, bool &CanUseSecond,
12202 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &NewParamInfos) {
12203 assert(NewParamInfos.empty() && "param info list not empty");
12204 CanUseFirst = CanUseSecond = true;
12205 bool FirstHasInfo = FirstFnType->hasExtParameterInfos();
12206 bool SecondHasInfo = SecondFnType->hasExtParameterInfos();
12207
12208 // Fast path: if the first type doesn't have ext parameter infos,
12209 // we match if and only if the second type also doesn't have them.
12210 if (!FirstHasInfo && !SecondHasInfo)
12211 return true;
12212
12213 bool NeedParamInfo = false;
12214 size_t E = FirstHasInfo ? FirstFnType->getExtParameterInfos().size()
12215 : SecondFnType->getExtParameterInfos().size();
12216
12217 for (size_t I = 0; I < E; ++I) {
12218 FunctionProtoType::ExtParameterInfo FirstParam, SecondParam;
12219 if (FirstHasInfo)
12220 FirstParam = FirstFnType->getExtParameterInfo(I);
12221 if (SecondHasInfo)
12222 SecondParam = SecondFnType->getExtParameterInfo(I);
12223
12224 // Cannot merge unless everything except the noescape flag matches.
12225 if (FirstParam.withIsNoEscape(NoEscape: false) != SecondParam.withIsNoEscape(NoEscape: false))
12226 return false;
12227
12228 bool FirstNoEscape = FirstParam.isNoEscape();
12229 bool SecondNoEscape = SecondParam.isNoEscape();
12230 bool IsNoEscape = FirstNoEscape && SecondNoEscape;
12231 NewParamInfos.push_back(Elt: FirstParam.withIsNoEscape(NoEscape: IsNoEscape));
12232 if (NewParamInfos.back().getOpaqueValue())
12233 NeedParamInfo = true;
12234 if (FirstNoEscape != IsNoEscape)
12235 CanUseFirst = false;
12236 if (SecondNoEscape != IsNoEscape)
12237 CanUseSecond = false;
12238 }
12239
12240 if (!NeedParamInfo)
12241 NewParamInfos.clear();
12242
12243 return true;
12244}
12245
12246void ASTContext::ResetObjCLayout(const ObjCInterfaceDecl *D) {
12247 if (auto It = ObjCLayouts.find(Val: D); It != ObjCLayouts.end()) {
12248 It->second = nullptr;
12249 for (auto *SubClass : ObjCSubClasses.lookup(Val: D))
12250 ResetObjCLayout(D: SubClass);
12251 }
12252}
12253
12254/// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and
12255/// 'RHS' attributes and returns the merged version; including for function
12256/// return types.
12257QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) {
12258 QualType LHSCan = getCanonicalType(T: LHS),
12259 RHSCan = getCanonicalType(T: RHS);
12260 // If two types are identical, they are compatible.
12261 if (LHSCan == RHSCan)
12262 return LHS;
12263 if (RHSCan->isFunctionType()) {
12264 if (!LHSCan->isFunctionType())
12265 return {};
12266 QualType OldReturnType =
12267 cast<FunctionType>(Val: RHSCan.getTypePtr())->getReturnType();
12268 QualType NewReturnType =
12269 cast<FunctionType>(Val: LHSCan.getTypePtr())->getReturnType();
12270 QualType ResReturnType =
12271 mergeObjCGCQualifiers(LHS: NewReturnType, RHS: OldReturnType);
12272 if (ResReturnType.isNull())
12273 return {};
12274 if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) {
12275 // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo();
12276 // In either case, use OldReturnType to build the new function type.
12277 const auto *F = LHS->castAs<FunctionType>();
12278 if (const auto *FPT = cast<FunctionProtoType>(Val: F)) {
12279 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
12280 EPI.ExtInfo = getFunctionExtInfo(t: LHS);
12281 QualType ResultType =
12282 getFunctionType(ResultTy: OldReturnType, Args: FPT->getParamTypes(), EPI);
12283 return ResultType;
12284 }
12285 }
12286 return {};
12287 }
12288
12289 // If the qualifiers are different, the types can still be merged.
12290 Qualifiers LQuals = LHSCan.getLocalQualifiers();
12291 Qualifiers RQuals = RHSCan.getLocalQualifiers();
12292 if (LQuals != RQuals) {
12293 // If any of these qualifiers are different, we have a type mismatch.
12294 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() ||
12295 LQuals.getAddressSpace() != RQuals.getAddressSpace())
12296 return {};
12297
12298 // Exactly one GC qualifier difference is allowed: __strong is
12299 // okay if the other type has no GC qualifier but is an Objective
12300 // C object pointer (i.e. implicitly strong by default). We fix
12301 // this by pretending that the unqualified type was actually
12302 // qualified __strong.
12303 Qualifiers::GC GC_L = LQuals.getObjCGCAttr();
12304 Qualifiers::GC GC_R = RQuals.getObjCGCAttr();
12305 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements");
12306
12307 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak)
12308 return {};
12309
12310 if (GC_L == Qualifiers::Strong)
12311 return LHS;
12312 if (GC_R == Qualifiers::Strong)
12313 return RHS;
12314 return {};
12315 }
12316
12317 if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) {
12318 QualType LHSBaseQT = LHS->castAs<ObjCObjectPointerType>()->getPointeeType();
12319 QualType RHSBaseQT = RHS->castAs<ObjCObjectPointerType>()->getPointeeType();
12320 QualType ResQT = mergeObjCGCQualifiers(LHS: LHSBaseQT, RHS: RHSBaseQT);
12321 if (ResQT == LHSBaseQT)
12322 return LHS;
12323 if (ResQT == RHSBaseQT)
12324 return RHS;
12325 }
12326 return {};
12327}
12328
12329//===----------------------------------------------------------------------===//
12330// Integer Predicates
12331//===----------------------------------------------------------------------===//
12332
12333unsigned ASTContext::getIntWidth(QualType T) const {
12334 if (const auto *ED = T->getAsEnumDecl())
12335 T = ED->getIntegerType();
12336 if (T->isBooleanType())
12337 return 1;
12338 if (const auto *EIT = T->getAs<BitIntType>())
12339 return EIT->getNumBits();
12340 // For builtin types, just use the standard type sizing method
12341 return (unsigned)getTypeSize(T);
12342}
12343
12344QualType ASTContext::getCorrespondingUnsignedType(QualType T) const {
12345 assert((T->hasIntegerRepresentation() || T->isEnumeralType() ||
12346 T->isFixedPointType()) &&
12347 "Unexpected type");
12348
12349 // Turn <4 x signed int> -> <4 x unsigned int>
12350 if (const auto *VTy = T->getAs<VectorType>())
12351 return getVectorType(vecType: getCorrespondingUnsignedType(T: VTy->getElementType()),
12352 NumElts: VTy->getNumElements(), VecKind: VTy->getVectorKind());
12353
12354 // For _BitInt, return an unsigned _BitInt with same width.
12355 if (const auto *EITy = T->getAs<BitIntType>())
12356 return getBitIntType(/*Unsigned=*/IsUnsigned: true, NumBits: EITy->getNumBits());
12357
12358 // For the overflow behavior types, construct a new unsigned variant
12359 if (const auto *OBT = T->getAs<OverflowBehaviorType>())
12360 return getOverflowBehaviorType(
12361 Kind: OBT->getBehaviorKind(),
12362 Underlying: getCorrespondingUnsignedType(T: OBT->getUnderlyingType()));
12363
12364 // For enums, get the underlying integer type of the enum, and let the general
12365 // integer type signchanging code handle it.
12366 if (const auto *ED = T->getAsEnumDecl())
12367 T = ED->getIntegerType();
12368
12369 switch (T->castAs<BuiltinType>()->getKind()) {
12370 case BuiltinType::Char_U:
12371 // Plain `char` is mapped to `unsigned char` even if it's already unsigned
12372 case BuiltinType::Char_S:
12373 case BuiltinType::SChar:
12374 case BuiltinType::Char8:
12375 return UnsignedCharTy;
12376 case BuiltinType::Short:
12377 return UnsignedShortTy;
12378 case BuiltinType::Int:
12379 return UnsignedIntTy;
12380 case BuiltinType::Long:
12381 return UnsignedLongTy;
12382 case BuiltinType::LongLong:
12383 return UnsignedLongLongTy;
12384 case BuiltinType::Int128:
12385 return UnsignedInt128Ty;
12386 // wchar_t is special. It is either signed or not, but when it's signed,
12387 // there's no matching "unsigned wchar_t". Therefore we return the unsigned
12388 // version of its underlying type instead.
12389 case BuiltinType::WChar_S:
12390 return getUnsignedWCharType();
12391
12392 case BuiltinType::ShortAccum:
12393 return UnsignedShortAccumTy;
12394 case BuiltinType::Accum:
12395 return UnsignedAccumTy;
12396 case BuiltinType::LongAccum:
12397 return UnsignedLongAccumTy;
12398 case BuiltinType::SatShortAccum:
12399 return SatUnsignedShortAccumTy;
12400 case BuiltinType::SatAccum:
12401 return SatUnsignedAccumTy;
12402 case BuiltinType::SatLongAccum:
12403 return SatUnsignedLongAccumTy;
12404 case BuiltinType::ShortFract:
12405 return UnsignedShortFractTy;
12406 case BuiltinType::Fract:
12407 return UnsignedFractTy;
12408 case BuiltinType::LongFract:
12409 return UnsignedLongFractTy;
12410 case BuiltinType::SatShortFract:
12411 return SatUnsignedShortFractTy;
12412 case BuiltinType::SatFract:
12413 return SatUnsignedFractTy;
12414 case BuiltinType::SatLongFract:
12415 return SatUnsignedLongFractTy;
12416 default:
12417 assert((T->hasUnsignedIntegerRepresentation() ||
12418 T->isUnsignedFixedPointType()) &&
12419 "Unexpected signed integer or fixed point type");
12420 return T;
12421 }
12422}
12423
12424QualType ASTContext::getCorrespondingSignedType(QualType T) const {
12425 assert((T->hasIntegerRepresentation() || T->isEnumeralType() ||
12426 T->isFixedPointType()) &&
12427 "Unexpected type");
12428
12429 // Turn <4 x unsigned int> -> <4 x signed int>
12430 if (const auto *VTy = T->getAs<VectorType>())
12431 return getVectorType(vecType: getCorrespondingSignedType(T: VTy->getElementType()),
12432 NumElts: VTy->getNumElements(), VecKind: VTy->getVectorKind());
12433
12434 // For _BitInt, return a signed _BitInt with same width.
12435 if (const auto *EITy = T->getAs<BitIntType>())
12436 return getBitIntType(/*Unsigned=*/IsUnsigned: false, NumBits: EITy->getNumBits());
12437
12438 // For enums, get the underlying integer type of the enum, and let the general
12439 // integer type signchanging code handle it.
12440 if (const auto *ED = T->getAsEnumDecl())
12441 T = ED->getIntegerType();
12442
12443 switch (T->castAs<BuiltinType>()->getKind()) {
12444 case BuiltinType::Char_S:
12445 // Plain `char` is mapped to `signed char` even if it's already signed
12446 case BuiltinType::Char_U:
12447 case BuiltinType::UChar:
12448 case BuiltinType::Char8:
12449 return SignedCharTy;
12450 case BuiltinType::UShort:
12451 return ShortTy;
12452 case BuiltinType::UInt:
12453 return IntTy;
12454 case BuiltinType::ULong:
12455 return LongTy;
12456 case BuiltinType::ULongLong:
12457 return LongLongTy;
12458 case BuiltinType::UInt128:
12459 return Int128Ty;
12460 // wchar_t is special. It is either unsigned or not, but when it's unsigned,
12461 // there's no matching "signed wchar_t". Therefore we return the signed
12462 // version of its underlying type instead.
12463 case BuiltinType::WChar_U:
12464 return getSignedWCharType();
12465
12466 case BuiltinType::UShortAccum:
12467 return ShortAccumTy;
12468 case BuiltinType::UAccum:
12469 return AccumTy;
12470 case BuiltinType::ULongAccum:
12471 return LongAccumTy;
12472 case BuiltinType::SatUShortAccum:
12473 return SatShortAccumTy;
12474 case BuiltinType::SatUAccum:
12475 return SatAccumTy;
12476 case BuiltinType::SatULongAccum:
12477 return SatLongAccumTy;
12478 case BuiltinType::UShortFract:
12479 return ShortFractTy;
12480 case BuiltinType::UFract:
12481 return FractTy;
12482 case BuiltinType::ULongFract:
12483 return LongFractTy;
12484 case BuiltinType::SatUShortFract:
12485 return SatShortFractTy;
12486 case BuiltinType::SatUFract:
12487 return SatFractTy;
12488 case BuiltinType::SatULongFract:
12489 return SatLongFractTy;
12490 default:
12491 assert(
12492 (T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) &&
12493 "Unexpected signed integer or fixed point type");
12494 return T;
12495 }
12496}
12497
12498ASTMutationListener::~ASTMutationListener() = default;
12499
12500void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD,
12501 QualType ReturnType) {}
12502
12503//===----------------------------------------------------------------------===//
12504// Builtin Type Computation
12505//===----------------------------------------------------------------------===//
12506
12507/// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the
12508/// pointer over the consumed characters. This returns the resultant type. If
12509/// AllowTypeModifiers is false then modifier like * are not parsed, just basic
12510/// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of
12511/// a vector of "i*".
12512///
12513/// RequiresICE is filled in on return to indicate whether the value is required
12514/// to be an Integer Constant Expression.
12515static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
12516 ASTContext::GetBuiltinTypeError &Error,
12517 bool &RequiresICE,
12518 bool AllowTypeModifiers) {
12519 // Modifiers.
12520 int HowLong = 0;
12521 bool Signed = false, Unsigned = false;
12522 RequiresICE = false;
12523
12524 // Read the prefixed modifiers first.
12525 bool Done = false;
12526 #ifndef NDEBUG
12527 bool IsSpecial = false;
12528 #endif
12529 while (!Done) {
12530 switch (*Str++) {
12531 default: Done = true; --Str; break;
12532 case 'I':
12533 RequiresICE = true;
12534 break;
12535 case 'S':
12536 assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!");
12537 assert(!Signed && "Can't use 'S' modifier multiple times!");
12538 Signed = true;
12539 break;
12540 case 'U':
12541 assert(!Signed && "Can't use both 'S' and 'U' modifiers!");
12542 assert(!Unsigned && "Can't use 'U' modifier multiple times!");
12543 Unsigned = true;
12544 break;
12545 case 'L':
12546 assert(!IsSpecial && "Can't use 'L' with 'W', 'N', 'Z' or 'O' modifiers");
12547 assert(HowLong <= 2 && "Can't have LLLL modifier");
12548 ++HowLong;
12549 break;
12550 case 'N':
12551 // 'N' behaves like 'L' for all non LP64 targets and 'int' otherwise.
12552 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
12553 assert(HowLong == 0 && "Can't use both 'L' and 'N' modifiers!");
12554 #ifndef NDEBUG
12555 IsSpecial = true;
12556 #endif
12557 if (Context.getTargetInfo().getLongWidth() == 32)
12558 ++HowLong;
12559 break;
12560 case 'W':
12561 // This modifier represents int64 type.
12562 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
12563 assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!");
12564 #ifndef NDEBUG
12565 IsSpecial = true;
12566 #endif
12567 switch (Context.getTargetInfo().getInt64Type()) {
12568 default:
12569 llvm_unreachable("Unexpected integer type");
12570 case TargetInfo::SignedLong:
12571 HowLong = 1;
12572 break;
12573 case TargetInfo::SignedLongLong:
12574 HowLong = 2;
12575 break;
12576 }
12577 break;
12578 case 'Z':
12579 // This modifier represents int32 type.
12580 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
12581 assert(HowLong == 0 && "Can't use both 'L' and 'Z' modifiers!");
12582 #ifndef NDEBUG
12583 IsSpecial = true;
12584 #endif
12585 switch (Context.getTargetInfo().getIntTypeByWidth(BitWidth: 32, IsSigned: true)) {
12586 default:
12587 llvm_unreachable("Unexpected integer type");
12588 case TargetInfo::SignedInt:
12589 HowLong = 0;
12590 break;
12591 case TargetInfo::SignedLong:
12592 HowLong = 1;
12593 break;
12594 case TargetInfo::SignedLongLong:
12595 HowLong = 2;
12596 break;
12597 }
12598 break;
12599 case 'O':
12600 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
12601 assert(HowLong == 0 && "Can't use both 'L' and 'O' modifiers!");
12602 #ifndef NDEBUG
12603 IsSpecial = true;
12604 #endif
12605 if (Context.getLangOpts().OpenCL)
12606 HowLong = 1;
12607 else
12608 HowLong = 2;
12609 break;
12610 }
12611 }
12612
12613 QualType Type;
12614
12615 // Read the base type.
12616 switch (*Str++) {
12617 default:
12618 llvm_unreachable("Unknown builtin type letter!");
12619 case 'x':
12620 assert(HowLong == 0 && !Signed && !Unsigned &&
12621 "Bad modifiers used with 'x'!");
12622 Type = Context.Float16Ty;
12623 break;
12624 case 'y':
12625 assert(HowLong == 0 && !Signed && !Unsigned &&
12626 "Bad modifiers used with 'y'!");
12627 Type = Context.BFloat16Ty;
12628 break;
12629 case 'v':
12630 assert(HowLong == 0 && !Signed && !Unsigned &&
12631 "Bad modifiers used with 'v'!");
12632 Type = Context.VoidTy;
12633 break;
12634 case 'h':
12635 assert(HowLong == 0 && !Signed && !Unsigned &&
12636 "Bad modifiers used with 'h'!");
12637 Type = Context.HalfTy;
12638 break;
12639 case 'f':
12640 assert(HowLong == 0 && !Signed && !Unsigned &&
12641 "Bad modifiers used with 'f'!");
12642 Type = Context.FloatTy;
12643 break;
12644 case 'd':
12645 assert(HowLong < 3 && !Signed && !Unsigned &&
12646 "Bad modifiers used with 'd'!");
12647 if (HowLong == 1)
12648 Type = Context.LongDoubleTy;
12649 else if (HowLong == 2)
12650 Type = Context.Float128Ty;
12651 else
12652 Type = Context.DoubleTy;
12653 break;
12654 case 's':
12655 assert(HowLong == 0 && "Bad modifiers used with 's'!");
12656 if (Unsigned)
12657 Type = Context.UnsignedShortTy;
12658 else
12659 Type = Context.ShortTy;
12660 break;
12661 case 'i':
12662 if (HowLong == 3)
12663 Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty;
12664 else if (HowLong == 2)
12665 Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy;
12666 else if (HowLong == 1)
12667 Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy;
12668 else
12669 Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy;
12670 break;
12671 case 'c':
12672 assert(HowLong == 0 && "Bad modifiers used with 'c'!");
12673 if (Signed)
12674 Type = Context.SignedCharTy;
12675 else if (Unsigned)
12676 Type = Context.UnsignedCharTy;
12677 else
12678 Type = Context.CharTy;
12679 break;
12680 case 'b': // boolean
12681 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!");
12682 Type = Context.BoolTy;
12683 break;
12684 case 'z': // size_t.
12685 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!");
12686 Type = Context.getSizeType();
12687 break;
12688 case 'w': // wchar_t.
12689 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'w'!");
12690 Type = Context.getWideCharType();
12691 break;
12692 case 'F':
12693 Type = Context.getCFConstantStringType();
12694 break;
12695 case 'G':
12696 Type = Context.getObjCIdType();
12697 break;
12698 case 'H':
12699 Type = Context.getObjCSelType();
12700 break;
12701 case 'M':
12702 Type = Context.getObjCSuperType();
12703 break;
12704 case 'a':
12705 Type = Context.getBuiltinVaListType();
12706 assert(!Type.isNull() && "builtin va list type not initialized!");
12707 break;
12708 case 'A':
12709 // This is a "reference" to a va_list; however, what exactly
12710 // this means depends on how va_list is defined. There are two
12711 // different kinds of va_list: ones passed by value, and ones
12712 // passed by reference. An example of a by-value va_list is
12713 // x86, where va_list is a char*. An example of by-ref va_list
12714 // is x86-64, where va_list is a __va_list_tag[1]. For x86,
12715 // we want this argument to be a char*&; for x86-64, we want
12716 // it to be a __va_list_tag*.
12717 Type = Context.getBuiltinVaListType();
12718 assert(!Type.isNull() && "builtin va list type not initialized!");
12719 if (Type->isArrayType())
12720 Type = Context.getArrayDecayedType(Ty: Type);
12721 else
12722 Type = Context.getLValueReferenceType(T: Type);
12723 break;
12724 case 'q': {
12725 char *End;
12726 unsigned NumElements = strtoul(nptr: Str, endptr: &End, base: 10);
12727 assert(End != Str && "Missing vector size");
12728 Str = End;
12729
12730 QualType ElementType = DecodeTypeFromStr(Str, Context, Error,
12731 RequiresICE, AllowTypeModifiers: false);
12732 assert(!RequiresICE && "Can't require vector ICE");
12733
12734 Type = Context.getScalableVectorType(EltTy: ElementType, NumElts: NumElements);
12735 break;
12736 }
12737 case 'Q': {
12738 switch (*Str++) {
12739 case 'a': {
12740 Type = Context.SveCountTy;
12741 break;
12742 }
12743 case 'b': {
12744 Type = Context.AMDGPUBufferRsrcTy;
12745 break;
12746 }
12747 case 'c': {
12748 Type = Context.AMDGPUFeaturePredicateTy;
12749 break;
12750 }
12751 case 't': {
12752 Type = Context.AMDGPUTextureTy;
12753 break;
12754 }
12755 case 'r': {
12756 Type = Context.HLSLResourceTy;
12757 break;
12758 }
12759 default:
12760 llvm_unreachable("Unexpected target builtin type");
12761 }
12762 break;
12763 }
12764 case 'V': {
12765 char *End;
12766 unsigned NumElements = strtoul(nptr: Str, endptr: &End, base: 10);
12767 assert(End != Str && "Missing vector size");
12768 Str = End;
12769
12770 QualType ElementType = DecodeTypeFromStr(Str, Context, Error,
12771 RequiresICE, AllowTypeModifiers: false);
12772 assert(!RequiresICE && "Can't require vector ICE");
12773
12774 // TODO: No way to make AltiVec vectors in builtins yet.
12775 Type = Context.getVectorType(vecType: ElementType, NumElts: NumElements, VecKind: VectorKind::Generic);
12776 break;
12777 }
12778 case 'E': {
12779 char *End;
12780
12781 unsigned NumElements = strtoul(nptr: Str, endptr: &End, base: 10);
12782 assert(End != Str && "Missing vector size");
12783
12784 Str = End;
12785
12786 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE,
12787 AllowTypeModifiers: false);
12788 Type = Context.getExtVectorType(vecType: ElementType, NumElts: NumElements);
12789 break;
12790 }
12791 case 'X': {
12792 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE,
12793 AllowTypeModifiers: false);
12794 assert(!RequiresICE && "Can't require complex ICE");
12795 Type = Context.getComplexType(T: ElementType);
12796 break;
12797 }
12798 case 'Y':
12799 Type = Context.getPointerDiffType();
12800 break;
12801 case 'P':
12802 Type = Context.getFILEType();
12803 if (Type.isNull()) {
12804 Error = ASTContext::GE_Missing_stdio;
12805 return {};
12806 }
12807 break;
12808 case 'J':
12809 if (Signed)
12810 Type = Context.getsigjmp_bufType();
12811 else
12812 Type = Context.getjmp_bufType();
12813
12814 if (Type.isNull()) {
12815 Error = ASTContext::GE_Missing_setjmp;
12816 return {};
12817 }
12818 break;
12819 case 'K':
12820 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!");
12821 Type = Context.getucontext_tType();
12822
12823 if (Type.isNull()) {
12824 Error = ASTContext::GE_Missing_ucontext;
12825 return {};
12826 }
12827 break;
12828 case 'p':
12829 Type = Context.getProcessIDType();
12830 break;
12831 case 'm':
12832 Type = Context.MFloat8Ty;
12833 break;
12834 }
12835
12836 // If there are modifiers and if we're allowed to parse them, go for it.
12837 Done = !AllowTypeModifiers;
12838 while (!Done) {
12839 switch (char c = *Str++) {
12840 default: Done = true; --Str; break;
12841 case '*':
12842 case '&': {
12843 // Both pointers and references can have their pointee types
12844 // qualified with an address space.
12845 char *End;
12846 unsigned AddrSpace = strtoul(nptr: Str, endptr: &End, base: 10);
12847 if (End != Str) {
12848 // Note AddrSpace == 0 is not the same as an unspecified address space.
12849 Type = Context.getAddrSpaceQualType(
12850 T: Type,
12851 AddressSpace: Context.getLangASForBuiltinAddressSpace(AS: AddrSpace));
12852 Str = End;
12853 }
12854 if (c == '*')
12855 Type = Context.getPointerType(T: Type);
12856 else
12857 Type = Context.getLValueReferenceType(T: Type);
12858 break;
12859 }
12860 // FIXME: There's no way to have a built-in with an rvalue ref arg.
12861 case 'C':
12862 Type = Type.withConst();
12863 break;
12864 case 'D':
12865 Type = Context.getVolatileType(T: Type);
12866 break;
12867 case 'R':
12868 Type = Type.withRestrict();
12869 break;
12870 }
12871 }
12872
12873 assert((!RequiresICE || Type->isIntegralOrEnumerationType()) &&
12874 "Integer constant 'I' type must be an integer");
12875
12876 return Type;
12877}
12878
12879// On some targets such as PowerPC, some of the builtins are defined with custom
12880// type descriptors for target-dependent types. These descriptors are decoded in
12881// other functions, but it may be useful to be able to fall back to default
12882// descriptor decoding to define builtins mixing target-dependent and target-
12883// independent types. This function allows decoding one type descriptor with
12884// default decoding.
12885QualType ASTContext::DecodeTypeStr(const char *&Str, const ASTContext &Context,
12886 GetBuiltinTypeError &Error, bool &RequireICE,
12887 bool AllowTypeModifiers) const {
12888 return DecodeTypeFromStr(Str, Context, Error, RequiresICE&: RequireICE, AllowTypeModifiers);
12889}
12890
12891/// GetBuiltinType - Return the type for the specified builtin.
12892QualType ASTContext::GetBuiltinType(unsigned Id,
12893 GetBuiltinTypeError &Error,
12894 unsigned *IntegerConstantArgs) const {
12895 const char *TypeStr = BuiltinInfo.getTypeString(ID: Id);
12896 if (TypeStr[0] == '\0') {
12897 Error = GE_Missing_type;
12898 return {};
12899 }
12900
12901 SmallVector<QualType, 8> ArgTypes;
12902
12903 bool RequiresICE = false;
12904 Error = GE_None;
12905 QualType ResType = DecodeTypeFromStr(Str&: TypeStr, Context: *this, Error,
12906 RequiresICE, AllowTypeModifiers: true);
12907 if (Error != GE_None)
12908 return {};
12909
12910 assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE");
12911
12912 while (TypeStr[0] && TypeStr[0] != '.') {
12913 QualType Ty = DecodeTypeFromStr(Str&: TypeStr, Context: *this, Error, RequiresICE, AllowTypeModifiers: true);
12914 if (Error != GE_None)
12915 return {};
12916
12917 // If this argument is required to be an IntegerConstantExpression and the
12918 // caller cares, fill in the bitmask we return.
12919 if (RequiresICE && IntegerConstantArgs)
12920 *IntegerConstantArgs |= 1 << ArgTypes.size();
12921
12922 // Do array -> pointer decay. The builtin should use the decayed type.
12923 if (Ty->isArrayType())
12924 Ty = getArrayDecayedType(Ty);
12925
12926 ArgTypes.push_back(Elt: Ty);
12927 }
12928
12929 if (Id == Builtin::BI__GetExceptionInfo)
12930 return {};
12931
12932 assert((TypeStr[0] != '.' || TypeStr[1] == 0) &&
12933 "'.' should only occur at end of builtin type list!");
12934
12935 bool Variadic = (TypeStr[0] == '.');
12936
12937 FunctionType::ExtInfo EI(Target->getDefaultCallingConv());
12938 if (BuiltinInfo.isNoReturn(ID: Id))
12939 EI = EI.withNoReturn(noReturn: true);
12940
12941 // We really shouldn't be making a no-proto type here.
12942 if (ArgTypes.empty() && Variadic && !getLangOpts().requiresStrictPrototypes())
12943 return getFunctionNoProtoType(ResultTy: ResType, Info: EI);
12944
12945 FunctionProtoType::ExtProtoInfo EPI;
12946 EPI.ExtInfo = EI;
12947 EPI.Variadic = Variadic;
12948 if (getLangOpts().CPlusPlus && BuiltinInfo.isNoThrow(ID: Id))
12949 EPI.ExceptionSpec.Type =
12950 getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone;
12951
12952 return getFunctionType(ResultTy: ResType, Args: ArgTypes, EPI);
12953}
12954
12955static GVALinkage basicGVALinkageForFunction(const ASTContext &Context,
12956 const FunctionDecl *FD) {
12957 if (!FD->isExternallyVisible())
12958 return GVA_Internal;
12959
12960 // Non-user-provided functions get emitted as weak definitions with every
12961 // use, no matter whether they've been explicitly instantiated etc.
12962 if (!FD->isUserProvided())
12963 return GVA_DiscardableODR;
12964
12965 GVALinkage External;
12966 switch (FD->getTemplateSpecializationKind()) {
12967 case TSK_Undeclared:
12968 case TSK_ExplicitSpecialization:
12969 External = GVA_StrongExternal;
12970 break;
12971
12972 case TSK_ExplicitInstantiationDefinition:
12973 return GVA_StrongODR;
12974
12975 // C++11 [temp.explicit]p10:
12976 // [ Note: The intent is that an inline function that is the subject of
12977 // an explicit instantiation declaration will still be implicitly
12978 // instantiated when used so that the body can be considered for
12979 // inlining, but that no out-of-line copy of the inline function would be
12980 // generated in the translation unit. -- end note ]
12981 case TSK_ExplicitInstantiationDeclaration:
12982 return GVA_AvailableExternally;
12983
12984 case TSK_ImplicitInstantiation:
12985 External = GVA_DiscardableODR;
12986 break;
12987 }
12988
12989 if (!FD->isInlined())
12990 return External;
12991
12992 if ((!Context.getLangOpts().CPlusPlus &&
12993 !Context.getTargetInfo().getCXXABI().isMicrosoft() &&
12994 !FD->hasAttr<DLLExportAttr>()) ||
12995 FD->hasAttr<GNUInlineAttr>()) {
12996 // FIXME: This doesn't match gcc's behavior for dllexport inline functions.
12997
12998 // GNU or C99 inline semantics. Determine whether this symbol should be
12999 // externally visible.
13000 if (FD->isInlineDefinitionExternallyVisible())
13001 return External;
13002
13003 // C99 inline semantics, where the symbol is not externally visible.
13004 return GVA_AvailableExternally;
13005 }
13006
13007 // Functions specified with extern and inline in -fms-compatibility mode
13008 // forcibly get emitted. While the body of the function cannot be later
13009 // replaced, the function definition cannot be discarded.
13010 if (FD->isMSExternInline())
13011 return GVA_StrongODR;
13012
13013 if (Context.getTargetInfo().getCXXABI().isMicrosoft() &&
13014 isa<CXXConstructorDecl>(Val: FD) &&
13015 cast<CXXConstructorDecl>(Val: FD)->isInheritingConstructor() &&
13016 !FD->hasAttr<DLLExportAttr>()) {
13017 // Both Clang and MSVC implement inherited constructors as forwarding
13018 // thunks that delegate to the base constructor. Keep non-dllexport
13019 // inheriting constructor thunks internal since they are not needed
13020 // outside the translation unit.
13021 //
13022 // dllexport inherited constructors are exempted so they are externally
13023 // visible, matching MSVC's export behavior. Inherited constructors
13024 // whose parameters prevent ABI-compatible forwarding (e.g. callee-
13025 // cleanup types) are excluded from export in Sema to avoid silent
13026 // runtime mismatches.
13027 return GVA_Internal;
13028 }
13029
13030 return GVA_DiscardableODR;
13031}
13032
13033static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context,
13034 const Decl *D, GVALinkage L) {
13035 // See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx
13036 // dllexport/dllimport on inline functions.
13037 if (D->hasAttr<DLLImportAttr>()) {
13038 if (L == GVA_DiscardableODR || L == GVA_StrongODR)
13039 return GVA_AvailableExternally;
13040 } else if (D->hasAttr<DLLExportAttr>()) {
13041 if (L == GVA_DiscardableODR)
13042 return GVA_StrongODR;
13043 } else if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice) {
13044 // Device-side functions with __global__ attribute must always be
13045 // visible externally so they can be launched from host.
13046 if (D->hasAttr<CUDAGlobalAttr>() &&
13047 (L == GVA_DiscardableODR || L == GVA_Internal))
13048 return GVA_StrongODR;
13049 // Single source offloading languages like CUDA/HIP need to be able to
13050 // access static device variables from host code of the same compilation
13051 // unit. This is done by externalizing the static variable with a shared
13052 // name between the host and device compilation which is the same for the
13053 // same compilation unit whereas different among different compilation
13054 // units.
13055 if (Context.shouldExternalize(D))
13056 return GVA_StrongExternal;
13057 }
13058 return L;
13059}
13060
13061/// Adjust the GVALinkage for a declaration based on what an external AST source
13062/// knows about whether there can be other definitions of this declaration.
13063static GVALinkage
13064adjustGVALinkageForExternalDefinitionKind(const ASTContext &Ctx, const Decl *D,
13065 GVALinkage L) {
13066 ExternalASTSource *Source = Ctx.getExternalSource();
13067 if (!Source)
13068 return L;
13069
13070 switch (Source->hasExternalDefinitions(D)) {
13071 case ExternalASTSource::EK_Never:
13072 // Other translation units rely on us to provide the definition.
13073 if (L == GVA_DiscardableODR)
13074 return GVA_StrongODR;
13075 break;
13076
13077 case ExternalASTSource::EK_Always:
13078 return GVA_AvailableExternally;
13079
13080 case ExternalASTSource::EK_ReplyHazy:
13081 break;
13082 }
13083 return L;
13084}
13085
13086GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const {
13087 return adjustGVALinkageForExternalDefinitionKind(Ctx: *this, D: FD,
13088 L: adjustGVALinkageForAttributes(Context: *this, D: FD,
13089 L: basicGVALinkageForFunction(Context: *this, FD)));
13090}
13091
13092static GVALinkage basicGVALinkageForVariable(const ASTContext &Context,
13093 const VarDecl *VD) {
13094 // As an extension for interactive REPLs, make sure constant variables are
13095 // only emitted once instead of LinkageComputer::getLVForNamespaceScopeDecl
13096 // marking them as internal.
13097 if (Context.getLangOpts().CPlusPlus &&
13098 Context.getLangOpts().IncrementalExtensions &&
13099 VD->getType().isConstQualified() &&
13100 !VD->getType().isVolatileQualified() && !VD->isInline() &&
13101 !isa<VarTemplateSpecializationDecl>(Val: VD) && !VD->getDescribedVarTemplate())
13102 return GVA_DiscardableODR;
13103
13104 if (!VD->isExternallyVisible())
13105 return GVA_Internal;
13106
13107 if (VD->isStaticLocal()) {
13108 const DeclContext *LexicalContext = VD->getParentFunctionOrMethod();
13109 while (LexicalContext && !isa<FunctionDecl>(Val: LexicalContext))
13110 LexicalContext = LexicalContext->getLexicalParent();
13111
13112 // ObjC Blocks can create local variables that don't have a FunctionDecl
13113 // LexicalContext.
13114 if (!LexicalContext)
13115 return GVA_DiscardableODR;
13116
13117 // Otherwise, let the static local variable inherit its linkage from the
13118 // nearest enclosing function.
13119 auto StaticLocalLinkage =
13120 Context.GetGVALinkageForFunction(FD: cast<FunctionDecl>(Val: LexicalContext));
13121
13122 // Itanium ABI 5.2.2: "Each COMDAT group [for a static local variable] must
13123 // be emitted in any object with references to the symbol for the object it
13124 // contains, whether inline or out-of-line."
13125 // Similar behavior is observed with MSVC. An alternative ABI could use
13126 // StrongODR/AvailableExternally to match the function, but none are
13127 // known/supported currently.
13128 if (StaticLocalLinkage == GVA_StrongODR ||
13129 StaticLocalLinkage == GVA_AvailableExternally)
13130 return GVA_DiscardableODR;
13131 return StaticLocalLinkage;
13132 }
13133
13134 // MSVC treats in-class initialized static data members as definitions.
13135 // By giving them non-strong linkage, out-of-line definitions won't
13136 // cause link errors.
13137 if (Context.isMSStaticDataMemberInlineDefinition(VD))
13138 return GVA_DiscardableODR;
13139
13140 // Most non-template variables have strong linkage; inline variables are
13141 // linkonce_odr or (occasionally, for compatibility) weak_odr.
13142 GVALinkage StrongLinkage;
13143 switch (Context.getInlineVariableDefinitionKind(VD)) {
13144 case ASTContext::InlineVariableDefinitionKind::None:
13145 StrongLinkage = GVA_StrongExternal;
13146 break;
13147 case ASTContext::InlineVariableDefinitionKind::Weak:
13148 case ASTContext::InlineVariableDefinitionKind::WeakUnknown:
13149 StrongLinkage = GVA_DiscardableODR;
13150 break;
13151 case ASTContext::InlineVariableDefinitionKind::Strong:
13152 StrongLinkage = GVA_StrongODR;
13153 break;
13154 }
13155
13156 switch (VD->getTemplateSpecializationKind()) {
13157 case TSK_Undeclared:
13158 return StrongLinkage;
13159
13160 case TSK_ExplicitSpecialization:
13161 return Context.getTargetInfo().getCXXABI().isMicrosoft() &&
13162 VD->isStaticDataMember()
13163 ? GVA_StrongODR
13164 : StrongLinkage;
13165
13166 case TSK_ExplicitInstantiationDefinition:
13167 return GVA_StrongODR;
13168
13169 case TSK_ExplicitInstantiationDeclaration:
13170 return GVA_AvailableExternally;
13171
13172 case TSK_ImplicitInstantiation:
13173 return GVA_DiscardableODR;
13174 }
13175
13176 llvm_unreachable("Invalid Linkage!");
13177}
13178
13179GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) const {
13180 return adjustGVALinkageForExternalDefinitionKind(Ctx: *this, D: VD,
13181 L: adjustGVALinkageForAttributes(Context: *this, D: VD,
13182 L: basicGVALinkageForVariable(Context: *this, VD)));
13183}
13184
13185bool ASTContext::DeclMustBeEmitted(const Decl *D) {
13186 if (const auto *VD = dyn_cast<VarDecl>(Val: D)) {
13187 if (!VD->isFileVarDecl())
13188 return false;
13189 // Global named register variables (GNU extension) are never emitted.
13190 if (VD->getStorageClass() == SC_Register)
13191 return false;
13192 if (VD->getDescribedVarTemplate() ||
13193 isa<VarTemplatePartialSpecializationDecl>(Val: VD))
13194 return false;
13195 } else if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
13196 // We never need to emit an uninstantiated function template.
13197 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate)
13198 return false;
13199 } else if (isa<PragmaCommentDecl>(Val: D))
13200 return true;
13201 else if (isa<PragmaDetectMismatchDecl>(Val: D))
13202 return true;
13203 else if (isa<OMPRequiresDecl>(Val: D))
13204 return true;
13205 else if (isa<OMPThreadPrivateDecl>(Val: D))
13206 return !D->getDeclContext()->isDependentContext();
13207 else if (isa<OMPAllocateDecl>(Val: D))
13208 return !D->getDeclContext()->isDependentContext();
13209 else if (isa<OMPDeclareReductionDecl>(Val: D) || isa<OMPDeclareMapperDecl>(Val: D))
13210 return !D->getDeclContext()->isDependentContext();
13211 else if (isa<ImportDecl>(Val: D))
13212 return true;
13213 else
13214 return false;
13215
13216 // If this is a member of a class template, we do not need to emit it.
13217 if (D->getDeclContext()->isDependentContext())
13218 return false;
13219
13220 // Weak references don't produce any output by themselves.
13221 if (D->hasAttr<WeakRefAttr>())
13222 return false;
13223
13224 // SYCL device compilation requires that functions defined with the
13225 // sycl_kernel_entry_point or sycl_external attributes be emitted. All
13226 // other entities are emitted only if they are used by a function
13227 // defined with one of those attributes.
13228 if (LangOpts.SYCLIsDevice)
13229 return isa<FunctionDecl>(Val: D) && (D->hasAttr<SYCLKernelEntryPointAttr>() ||
13230 D->hasAttr<SYCLExternalAttr>());
13231
13232 // Aliases and used decls are required.
13233 if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>())
13234 return true;
13235
13236 if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
13237 // Forward declarations aren't required.
13238 if (!FD->doesThisDeclarationHaveABody())
13239 return FD->doesDeclarationForceExternallyVisibleDefinition();
13240
13241 // Constructors and destructors are required.
13242 if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>())
13243 return true;
13244
13245 // The key function for a class is required. This rule only comes
13246 // into play when inline functions can be key functions, though.
13247 if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) {
13248 if (const auto *MD = dyn_cast<CXXMethodDecl>(Val: FD)) {
13249 const CXXRecordDecl *RD = MD->getParent();
13250 if (MD->isOutOfLine() && RD->isDynamicClass()) {
13251 const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD);
13252 if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl())
13253 return true;
13254 }
13255 }
13256 }
13257
13258 GVALinkage Linkage = GetGVALinkageForFunction(FD);
13259
13260 // static, static inline, always_inline, and extern inline functions can
13261 // always be deferred. Normal inline functions can be deferred in C99/C++.
13262 // Implicit template instantiations can also be deferred in C++.
13263 return !isDiscardableGVALinkage(L: Linkage);
13264 }
13265
13266 const auto *VD = cast<VarDecl>(Val: D);
13267 assert(VD->isFileVarDecl() && "Expected file scoped var");
13268
13269 // If the decl is marked as `declare target to`, it should be emitted for the
13270 // host and for the device.
13271 if (LangOpts.OpenMP &&
13272 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
13273 return true;
13274
13275 if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly &&
13276 !isMSStaticDataMemberInlineDefinition(VD))
13277 return false;
13278
13279 if (VD->shouldEmitInExternalSource())
13280 return false;
13281
13282 // Variables that can be needed in other TUs are required.
13283 auto Linkage = GetGVALinkageForVariable(VD);
13284 if (!isDiscardableGVALinkage(L: Linkage))
13285 return true;
13286
13287 // We never need to emit a variable that is available in another TU.
13288 if (Linkage == GVA_AvailableExternally)
13289 return false;
13290
13291 // Variables that have destruction with side-effects are required.
13292 if (VD->needsDestruction(Ctx: *this))
13293 return true;
13294
13295 // Variables that have initialization with side-effects are required.
13296 if (VD->hasInitWithSideEffects())
13297 return true;
13298
13299 // Likewise, variables with tuple-like bindings are required if their
13300 // bindings have side-effects.
13301 if (const auto *DD = dyn_cast<DecompositionDecl>(Val: VD)) {
13302 for (const auto *BD : DD->flat_bindings())
13303 if (const auto *BindingVD = BD->getHoldingVar())
13304 if (DeclMustBeEmitted(D: BindingVD))
13305 return true;
13306 }
13307
13308 return false;
13309}
13310
13311void ASTContext::forEachMultiversionedFunctionVersion(
13312 const FunctionDecl *FD,
13313 llvm::function_ref<void(FunctionDecl *)> Pred) const {
13314 assert(FD->isMultiVersion() && "Only valid for multiversioned functions");
13315 llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls;
13316 FD = FD->getMostRecentDecl();
13317 // FIXME: The order of traversal here matters and depends on the order of
13318 // lookup results, which happens to be (mostly) oldest-to-newest, but we
13319 // shouldn't rely on that.
13320 for (auto *CurDecl :
13321 FD->getDeclContext()->getRedeclContext()->lookup(Name: FD->getDeclName())) {
13322 FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl();
13323 if (CurFD && hasSameType(T1: CurFD->getType(), T2: FD->getType()) &&
13324 SeenDecls.insert(V: CurFD).second) {
13325 Pred(CurFD);
13326 }
13327 }
13328}
13329
13330CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic,
13331 bool IsCXXMethod) const {
13332 // Pass through to the C++ ABI object
13333 if (IsCXXMethod)
13334 return ABI->getDefaultMethodCallConv(isVariadic: IsVariadic);
13335
13336 switch (LangOpts.getDefaultCallingConv()) {
13337 case LangOptions::DCC_None:
13338 break;
13339 case LangOptions::DCC_CDecl:
13340 return CC_C;
13341 case LangOptions::DCC_FastCall:
13342 if (getTargetInfo().hasFeature(Feature: "sse2") && !IsVariadic)
13343 return CC_X86FastCall;
13344 break;
13345 case LangOptions::DCC_StdCall:
13346 if (!IsVariadic)
13347 return CC_X86StdCall;
13348 break;
13349 case LangOptions::DCC_VectorCall:
13350 // __vectorcall cannot be applied to variadic functions.
13351 if (!IsVariadic)
13352 return CC_X86VectorCall;
13353 break;
13354 case LangOptions::DCC_RegCall:
13355 // __regcall cannot be applied to variadic functions.
13356 if (!IsVariadic)
13357 return CC_X86RegCall;
13358 break;
13359 case LangOptions::DCC_RtdCall:
13360 if (!IsVariadic)
13361 return CC_M68kRTD;
13362 break;
13363 }
13364 return Target->getDefaultCallingConv();
13365}
13366
13367bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const {
13368 // Pass through to the C++ ABI object
13369 return ABI->isNearlyEmpty(RD);
13370}
13371
13372VTableContextBase *ASTContext::getVTableContext() {
13373 if (!VTContext) {
13374 auto ABI = Target->getCXXABI();
13375 if (ABI.isMicrosoft())
13376 VTContext.reset(p: new MicrosoftVTableContext(*this));
13377 else {
13378 VTContext.reset(p: new ItaniumVTableContext(*this));
13379 }
13380 }
13381 return VTContext.get();
13382}
13383
13384MangleContext *ASTContext::createMangleContext(const TargetInfo *T) {
13385 if (!T)
13386 T = Target;
13387 switch (T->getCXXABI().getKind()) {
13388 case TargetCXXABI::AppleARM64:
13389 case TargetCXXABI::Fuchsia:
13390 case TargetCXXABI::GenericAArch64:
13391 case TargetCXXABI::GenericItanium:
13392 case TargetCXXABI::GenericARM:
13393 case TargetCXXABI::GenericMIPS:
13394 case TargetCXXABI::iOS:
13395 case TargetCXXABI::WebAssembly:
13396 case TargetCXXABI::WatchOS:
13397 case TargetCXXABI::XL:
13398 return ItaniumMangleContext::create(Context&: *this, Diags&: getDiagnostics());
13399 case TargetCXXABI::Microsoft:
13400 return MicrosoftMangleContext::create(Context&: *this, Diags&: getDiagnostics());
13401 }
13402 llvm_unreachable("Unsupported ABI");
13403}
13404
13405MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) {
13406 assert(T.getCXXABI().getKind() != TargetCXXABI::Microsoft &&
13407 "Device mangle context does not support Microsoft mangling.");
13408 switch (T.getCXXABI().getKind()) {
13409 case TargetCXXABI::AppleARM64:
13410 case TargetCXXABI::Fuchsia:
13411 case TargetCXXABI::GenericAArch64:
13412 case TargetCXXABI::GenericItanium:
13413 case TargetCXXABI::GenericARM:
13414 case TargetCXXABI::GenericMIPS:
13415 case TargetCXXABI::iOS:
13416 case TargetCXXABI::WebAssembly:
13417 case TargetCXXABI::WatchOS:
13418 case TargetCXXABI::XL:
13419 return ItaniumMangleContext::create(
13420 Context&: *this, Diags&: getDiagnostics(),
13421 Discriminator: [](ASTContext &, const NamedDecl *ND) -> UnsignedOrNone {
13422 if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: ND))
13423 return RD->getDeviceLambdaManglingNumber();
13424 return std::nullopt;
13425 },
13426 /*IsAux=*/true);
13427 case TargetCXXABI::Microsoft:
13428 return MicrosoftMangleContext::create(Context&: *this, Diags&: getDiagnostics(),
13429 /*IsAux=*/true);
13430 }
13431 llvm_unreachable("Unsupported ABI");
13432}
13433
13434MangleContext *ASTContext::cudaNVInitDeviceMC() {
13435 // If the host and device have different C++ ABIs, mark it as the device
13436 // mangle context so that the mangling needs to retrieve the additional
13437 // device lambda mangling number instead of the regular host one.
13438 if (getAuxTargetInfo() && getTargetInfo().getCXXABI().isMicrosoft() &&
13439 getAuxTargetInfo()->getCXXABI().isItaniumFamily()) {
13440 return createDeviceMangleContext(T: *getAuxTargetInfo());
13441 }
13442
13443 return createMangleContext(T: getAuxTargetInfo());
13444}
13445
13446CXXABI::~CXXABI() = default;
13447
13448size_t ASTContext::getSideTableAllocatedMemory() const {
13449 return ASTRecordLayouts.getMemorySize() +
13450 llvm::capacity_in_bytes(X: ObjCLayouts) +
13451 llvm::capacity_in_bytes(X: KeyFunctions) +
13452 llvm::capacity_in_bytes(X: ObjCImpls) +
13453 llvm::capacity_in_bytes(X: BlockVarCopyInits) +
13454 llvm::capacity_in_bytes(X: DeclAttrs) +
13455 llvm::capacity_in_bytes(X: TemplateOrInstantiation) +
13456 llvm::capacity_in_bytes(X: InstantiatedFromUsingDecl) +
13457 llvm::capacity_in_bytes(X: InstantiatedFromUsingShadowDecl) +
13458 llvm::capacity_in_bytes(X: InstantiatedFromUnnamedFieldDecl) +
13459 llvm::capacity_in_bytes(X: OverriddenMethods) +
13460 llvm::capacity_in_bytes(X: Types) +
13461 llvm::capacity_in_bytes(x: VariableArrayTypes);
13462}
13463
13464/// getIntTypeForBitwidth -
13465/// sets integer QualTy according to specified details:
13466/// bitwidth, signed/unsigned.
13467/// Returns empty type if there is no appropriate target types.
13468QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth,
13469 unsigned Signed) const {
13470 TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(BitWidth: DestWidth, IsSigned: Signed);
13471 CanQualType QualTy = getFromTargetType(Type: Ty);
13472 if (!QualTy && DestWidth == 128)
13473 return Signed ? Int128Ty : UnsignedInt128Ty;
13474 return QualTy;
13475}
13476
13477/// getRealTypeForBitwidth -
13478/// sets floating point QualTy according to specified bitwidth.
13479/// Returns empty type if there is no appropriate target types.
13480QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth,
13481 FloatModeKind ExplicitType) const {
13482 FloatModeKind Ty =
13483 getTargetInfo().getRealTypeByWidth(BitWidth: DestWidth, ExplicitType);
13484 switch (Ty) {
13485 case FloatModeKind::Half:
13486 return HalfTy;
13487 case FloatModeKind::Float:
13488 return FloatTy;
13489 case FloatModeKind::Double:
13490 return DoubleTy;
13491 case FloatModeKind::LongDouble:
13492 return LongDoubleTy;
13493 case FloatModeKind::Float128:
13494 return Float128Ty;
13495 case FloatModeKind::Ibm128:
13496 return Ibm128Ty;
13497 case FloatModeKind::NoFloat:
13498 return {};
13499 }
13500
13501 llvm_unreachable("Unhandled TargetInfo::RealType value");
13502}
13503
13504void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) {
13505 if (Number <= 1)
13506 return;
13507
13508 MangleNumbers[ND] = Number;
13509
13510 if (Listener)
13511 Listener->AddedManglingNumber(D: ND, Number);
13512}
13513
13514unsigned ASTContext::getManglingNumber(const NamedDecl *ND,
13515 bool ForAuxTarget) const {
13516 auto I = MangleNumbers.find(Key: ND);
13517 unsigned Res = I != MangleNumbers.end() ? I->second : 1;
13518 // CUDA/HIP host compilation encodes host and device mangling numbers
13519 // as lower and upper half of 32 bit integer.
13520 if (LangOpts.CUDA && !LangOpts.CUDAIsDevice) {
13521 Res = ForAuxTarget ? Res >> 16 : Res & 0xFFFF;
13522 } else {
13523 assert(!ForAuxTarget && "Only CUDA/HIP host compilation supports mangling "
13524 "number for aux target");
13525 }
13526 return Res > 1 ? Res : 1;
13527}
13528
13529void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) {
13530 if (Number <= 1)
13531 return;
13532
13533 StaticLocalNumbers[VD] = Number;
13534
13535 if (Listener)
13536 Listener->AddedStaticLocalNumbers(D: VD, Number);
13537}
13538
13539unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const {
13540 auto I = StaticLocalNumbers.find(Key: VD);
13541 return I != StaticLocalNumbers.end() ? I->second : 1;
13542}
13543
13544void ASTContext::setIsDestroyingOperatorDelete(const FunctionDecl *FD,
13545 bool IsDestroying) {
13546 if (!IsDestroying) {
13547 assert(!DestroyingOperatorDeletes.contains(FD->getCanonicalDecl()));
13548 return;
13549 }
13550 DestroyingOperatorDeletes.insert(V: FD->getCanonicalDecl());
13551}
13552
13553bool ASTContext::isDestroyingOperatorDelete(const FunctionDecl *FD) const {
13554 return DestroyingOperatorDeletes.contains(V: FD->getCanonicalDecl());
13555}
13556
13557void ASTContext::setIsTypeAwareOperatorNewOrDelete(const FunctionDecl *FD,
13558 bool IsTypeAware) {
13559 if (!IsTypeAware) {
13560 assert(!TypeAwareOperatorNewAndDeletes.contains(FD->getCanonicalDecl()));
13561 return;
13562 }
13563 TypeAwareOperatorNewAndDeletes.insert(V: FD->getCanonicalDecl());
13564}
13565
13566bool ASTContext::isTypeAwareOperatorNewOrDelete(const FunctionDecl *FD) const {
13567 return TypeAwareOperatorNewAndDeletes.contains(V: FD->getCanonicalDecl());
13568}
13569
13570void ASTContext::addOperatorDeleteForVDtor(const CXXDestructorDecl *Dtor,
13571 FunctionDecl *OperatorDelete,
13572 OperatorDeleteKind K) const {
13573 switch (K) {
13574 case OperatorDeleteKind::Regular:
13575 OperatorDeletesForVirtualDtor[Dtor->getCanonicalDecl()] = OperatorDelete;
13576 break;
13577 case OperatorDeleteKind::GlobalRegular:
13578 GlobalOperatorDeletesForVirtualDtor[Dtor->getCanonicalDecl()] =
13579 OperatorDelete;
13580 break;
13581 case OperatorDeleteKind::Array:
13582 ArrayOperatorDeletesForVirtualDtor[Dtor->getCanonicalDecl()] =
13583 OperatorDelete;
13584 break;
13585 case OperatorDeleteKind::ArrayGlobal:
13586 GlobalArrayOperatorDeletesForVirtualDtor[Dtor->getCanonicalDecl()] =
13587 OperatorDelete;
13588 break;
13589 }
13590}
13591
13592bool ASTContext::dtorHasOperatorDelete(const CXXDestructorDecl *Dtor,
13593 OperatorDeleteKind K) const {
13594 switch (K) {
13595 case OperatorDeleteKind::Regular:
13596 return OperatorDeletesForVirtualDtor.contains(Val: Dtor->getCanonicalDecl());
13597 case OperatorDeleteKind::GlobalRegular:
13598 return GlobalOperatorDeletesForVirtualDtor.contains(
13599 Val: Dtor->getCanonicalDecl());
13600 case OperatorDeleteKind::Array:
13601 return ArrayOperatorDeletesForVirtualDtor.contains(
13602 Val: Dtor->getCanonicalDecl());
13603 case OperatorDeleteKind::ArrayGlobal:
13604 return GlobalArrayOperatorDeletesForVirtualDtor.contains(
13605 Val: Dtor->getCanonicalDecl());
13606 }
13607 return false;
13608}
13609
13610FunctionDecl *
13611ASTContext::getOperatorDeleteForVDtor(const CXXDestructorDecl *Dtor,
13612 OperatorDeleteKind K) const {
13613 const CXXDestructorDecl *Canon = Dtor->getCanonicalDecl();
13614 switch (K) {
13615 case OperatorDeleteKind::Regular:
13616 if (OperatorDeletesForVirtualDtor.contains(Val: Canon))
13617 return OperatorDeletesForVirtualDtor[Canon];
13618 return nullptr;
13619 case OperatorDeleteKind::GlobalRegular:
13620 if (GlobalOperatorDeletesForVirtualDtor.contains(Val: Canon))
13621 return GlobalOperatorDeletesForVirtualDtor[Canon];
13622 return nullptr;
13623 case OperatorDeleteKind::Array:
13624 if (ArrayOperatorDeletesForVirtualDtor.contains(Val: Canon))
13625 return ArrayOperatorDeletesForVirtualDtor[Canon];
13626 return nullptr;
13627 case OperatorDeleteKind::ArrayGlobal:
13628 if (GlobalArrayOperatorDeletesForVirtualDtor.contains(Val: Canon))
13629 return GlobalArrayOperatorDeletesForVirtualDtor[Canon];
13630 return nullptr;
13631 }
13632 return nullptr;
13633}
13634
13635bool ASTContext::classMaybeNeedsVectorDeletingDestructor(
13636 const CXXRecordDecl *RD) {
13637 if (!getTargetInfo().emitVectorDeletingDtors(getLangOpts()))
13638 return false;
13639
13640 return MaybeRequireVectorDeletingDtor.count(V: RD);
13641}
13642
13643void ASTContext::setClassMaybeNeedsVectorDeletingDestructor(
13644 const CXXRecordDecl *RD) {
13645 if (!getTargetInfo().emitVectorDeletingDtors(getLangOpts()))
13646 return;
13647
13648 MaybeRequireVectorDeletingDtor.insert(V: RD);
13649}
13650
13651MangleNumberingContext &
13652ASTContext::getManglingNumberContext(const DeclContext *DC) {
13653 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C.
13654 std::unique_ptr<MangleNumberingContext> &MCtx = MangleNumberingContexts[DC];
13655 if (!MCtx)
13656 MCtx = createMangleNumberingContext();
13657 return *MCtx;
13658}
13659
13660MangleNumberingContext &
13661ASTContext::getManglingNumberContext(NeedExtraManglingDecl_t, const Decl *D) {
13662 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C.
13663 std::unique_ptr<MangleNumberingContext> &MCtx =
13664 ExtraMangleNumberingContexts[D];
13665 if (!MCtx)
13666 MCtx = createMangleNumberingContext();
13667 return *MCtx;
13668}
13669
13670std::unique_ptr<MangleNumberingContext>
13671ASTContext::createMangleNumberingContext() const {
13672 return ABI->createMangleNumberingContext();
13673}
13674
13675const CXXConstructorDecl *
13676ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) {
13677 return ABI->getCopyConstructorForExceptionObject(
13678 cast<CXXRecordDecl>(Val: RD->getFirstDecl()));
13679}
13680
13681void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD,
13682 CXXConstructorDecl *CD) {
13683 return ABI->addCopyConstructorForExceptionObject(
13684 cast<CXXRecordDecl>(Val: RD->getFirstDecl()),
13685 cast<CXXConstructorDecl>(Val: CD->getFirstDecl()));
13686}
13687
13688void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD,
13689 TypedefNameDecl *DD) {
13690 return ABI->addTypedefNameForUnnamedTagDecl(TD, DD);
13691}
13692
13693TypedefNameDecl *
13694ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl *TD) {
13695 return ABI->getTypedefNameForUnnamedTagDecl(TD);
13696}
13697
13698void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl *TD,
13699 DeclaratorDecl *DD) {
13700 return ABI->addDeclaratorForUnnamedTagDecl(TD, DD);
13701}
13702
13703DeclaratorDecl *ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl *TD) {
13704 return ABI->getDeclaratorForUnnamedTagDecl(TD);
13705}
13706
13707void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) {
13708 ParamIndices[D] = index;
13709}
13710
13711unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const {
13712 ParameterIndexTable::const_iterator I = ParamIndices.find(Val: D);
13713 assert(I != ParamIndices.end() &&
13714 "ParmIndices lacks entry set by ParmVarDecl");
13715 return I->second;
13716}
13717
13718QualType ASTContext::getStringLiteralArrayType(QualType EltTy,
13719 unsigned Length) const {
13720 // A C++ string literal has a const-qualified element type (C++ 2.13.4p1).
13721 if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings)
13722 EltTy = EltTy.withConst();
13723
13724 EltTy = adjustStringLiteralBaseType(Ty: EltTy);
13725
13726 // Get an array type for the string, according to C99 6.4.5. This includes
13727 // the null terminator character.
13728 return getConstantArrayType(EltTy, ArySizeIn: llvm::APInt(32, Length + 1), SizeExpr: nullptr,
13729 ASM: ArraySizeModifier::Normal, /*IndexTypeQuals*/ 0);
13730}
13731
13732StringLiteral *
13733ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const {
13734 StringLiteral *&Result = StringLiteralCache[Key];
13735 if (!Result)
13736 Result = StringLiteral::Create(
13737 Ctx: *this, Str: Key, Kind: StringLiteralKind::Ordinary,
13738 /*Pascal*/ false, Ty: getStringLiteralArrayType(EltTy: CharTy, Length: Key.size()),
13739 Locs: SourceLocation());
13740 return Result;
13741}
13742
13743MSGuidDecl *
13744ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const {
13745 assert(MSGuidTagDecl && "building MS GUID without MS extensions?");
13746
13747 llvm::FoldingSetNodeID ID;
13748 MSGuidDecl::Profile(ID, P: Parts);
13749
13750 void *InsertPos;
13751 if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos))
13752 return Existing;
13753
13754 QualType GUIDType = getMSGuidType().withConst();
13755 MSGuidDecl *New = MSGuidDecl::Create(C: *this, T: GUIDType, P: Parts);
13756 MSGuidDecls.InsertNode(N: New, InsertPos);
13757 return New;
13758}
13759
13760UnnamedGlobalConstantDecl *
13761ASTContext::getUnnamedGlobalConstantDecl(QualType Ty,
13762 const APValue &APVal) const {
13763 llvm::FoldingSetNodeID ID;
13764 UnnamedGlobalConstantDecl::Profile(ID, Ty, APVal);
13765
13766 void *InsertPos;
13767 if (UnnamedGlobalConstantDecl *Existing =
13768 UnnamedGlobalConstantDecls.FindNodeOrInsertPos(ID, InsertPos))
13769 return Existing;
13770
13771 UnnamedGlobalConstantDecl *New =
13772 UnnamedGlobalConstantDecl::Create(C: *this, T: Ty, APVal);
13773 UnnamedGlobalConstantDecls.InsertNode(N: New, InsertPos);
13774 return New;
13775}
13776
13777TemplateParamObjectDecl *
13778ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const {
13779 assert(T->isRecordType() && "template param object of unexpected type");
13780
13781 // C++ [temp.param]p8:
13782 // [...] a static storage duration object of type 'const T' [...]
13783 T.addConst();
13784
13785 llvm::FoldingSetNodeID ID;
13786 TemplateParamObjectDecl::Profile(ID, T, V);
13787
13788 void *InsertPos;
13789 if (TemplateParamObjectDecl *Existing =
13790 TemplateParamObjectDecls.FindNodeOrInsertPos(ID, InsertPos))
13791 return Existing;
13792
13793 TemplateParamObjectDecl *New = TemplateParamObjectDecl::Create(C: *this, T, V);
13794 TemplateParamObjectDecls.InsertNode(N: New, InsertPos);
13795 return New;
13796}
13797
13798bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const {
13799 const llvm::Triple &T = getTargetInfo().getTriple();
13800 if (!T.isOSDarwin())
13801 return false;
13802
13803 if (!(T.isiOS() && T.isOSVersionLT(Major: 7)) &&
13804 !(T.isMacOSX() && T.isOSVersionLT(Major: 10, Minor: 9)))
13805 return false;
13806
13807 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
13808 CharUnits sizeChars = getTypeSizeInChars(T: AtomicTy);
13809 uint64_t Size = sizeChars.getQuantity();
13810 CharUnits alignChars = getTypeAlignInChars(T: AtomicTy);
13811 unsigned Align = alignChars.getQuantity();
13812 unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth();
13813 return (Size != Align || toBits(CharSize: sizeChars) > MaxInlineWidthInBits);
13814}
13815
13816bool
13817ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl,
13818 const ObjCMethodDecl *MethodImpl) {
13819 // No point trying to match an unavailable/deprecated mothod.
13820 if (MethodDecl->hasAttr<UnavailableAttr>()
13821 || MethodDecl->hasAttr<DeprecatedAttr>())
13822 return false;
13823 if (MethodDecl->getObjCDeclQualifier() !=
13824 MethodImpl->getObjCDeclQualifier())
13825 return false;
13826 if (!hasSameType(T1: MethodDecl->getReturnType(), T2: MethodImpl->getReturnType()))
13827 return false;
13828
13829 if (MethodDecl->param_size() != MethodImpl->param_size())
13830 return false;
13831
13832 for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(),
13833 IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(),
13834 EF = MethodDecl->param_end();
13835 IM != EM && IF != EF; ++IM, ++IF) {
13836 const ParmVarDecl *DeclVar = (*IF);
13837 const ParmVarDecl *ImplVar = (*IM);
13838 if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier())
13839 return false;
13840 if (!hasSameType(T1: DeclVar->getType(), T2: ImplVar->getType()))
13841 return false;
13842 }
13843
13844 return (MethodDecl->isVariadic() == MethodImpl->isVariadic());
13845}
13846
13847uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const {
13848 LangAS AS;
13849 if (QT->getUnqualifiedDesugaredType()->isNullPtrType())
13850 AS = LangAS::Default;
13851 else
13852 AS = QT->getPointeeType().getAddressSpace();
13853
13854 return getTargetInfo().getNullPointerValue(AddrSpace: AS);
13855}
13856
13857unsigned ASTContext::getTargetAddressSpace(LangAS AS) const {
13858 return getTargetInfo().getTargetAddressSpace(AS);
13859}
13860
13861bool ASTContext::hasSameExpr(const Expr *X, const Expr *Y) const {
13862 if (X == Y)
13863 return true;
13864 if (!X || !Y)
13865 return false;
13866 llvm::FoldingSetNodeID IDX, IDY;
13867 X->Profile(ID&: IDX, Context: *this, /*Canonical=*/true);
13868 Y->Profile(ID&: IDY, Context: *this, /*Canonical=*/true);
13869 return IDX == IDY;
13870}
13871
13872// The getCommon* helpers return, for given 'same' X and Y entities given as
13873// inputs, another entity which is also the 'same' as the inputs, but which
13874// is closer to the canonical form of the inputs, each according to a given
13875// criteria.
13876// The getCommon*Checked variants are 'null inputs not-allowed' equivalents of
13877// the regular ones.
13878
13879static Decl *getCommonDecl(Decl *X, Decl *Y) {
13880 if (!declaresSameEntity(D1: X, D2: Y))
13881 return nullptr;
13882 for (const Decl *DX : X->redecls()) {
13883 // If we reach Y before reaching the first decl, that means X is older.
13884 if (DX == Y)
13885 return X;
13886 // If we reach the first decl, then Y is older.
13887 if (DX->isFirstDecl())
13888 return Y;
13889 }
13890 llvm_unreachable("Corrupt redecls chain");
13891}
13892
13893template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true>
13894static T *getCommonDecl(T *X, T *Y) {
13895 return cast_or_null<T>(
13896 getCommonDecl(X: const_cast<Decl *>(cast_or_null<Decl>(X)),
13897 Y: const_cast<Decl *>(cast_or_null<Decl>(Y))));
13898}
13899
13900template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true>
13901static T *getCommonDeclChecked(T *X, T *Y) {
13902 return cast<T>(getCommonDecl(X: const_cast<Decl *>(cast<Decl>(X)),
13903 Y: const_cast<Decl *>(cast<Decl>(Y))));
13904}
13905
13906static TemplateName getCommonTemplateName(const ASTContext &Ctx, TemplateName X,
13907 TemplateName Y,
13908 bool IgnoreDeduced = false) {
13909 if (X.getAsVoidPointer() == Y.getAsVoidPointer())
13910 return X;
13911 // FIXME: There are cases here where we could find a common template name
13912 // with more sugar. For example one could be a SubstTemplateTemplate*
13913 // replacing the other.
13914 TemplateName CX = Ctx.getCanonicalTemplateName(Name: X, IgnoreDeduced);
13915 if (CX.getAsVoidPointer() !=
13916 Ctx.getCanonicalTemplateName(Name: Y).getAsVoidPointer())
13917 return TemplateName();
13918 return CX;
13919}
13920
13921static TemplateName getCommonTemplateNameChecked(const ASTContext &Ctx,
13922 TemplateName X, TemplateName Y,
13923 bool IgnoreDeduced) {
13924 TemplateName R = getCommonTemplateName(Ctx, X, Y, IgnoreDeduced);
13925 assert(R.getAsVoidPointer() != nullptr);
13926 return R;
13927}
13928
13929static auto getCommonTypes(const ASTContext &Ctx, ArrayRef<QualType> Xs,
13930 ArrayRef<QualType> Ys, bool Unqualified = false) {
13931 assert(Xs.size() == Ys.size());
13932 SmallVector<QualType, 8> Rs(Xs.size());
13933 for (size_t I = 0; I < Rs.size(); ++I)
13934 Rs[I] = Ctx.getCommonSugaredType(X: Xs[I], Y: Ys[I], Unqualified);
13935 return Rs;
13936}
13937
13938template <class T>
13939static SourceLocation getCommonAttrLoc(const T *X, const T *Y) {
13940 return X->getAttributeLoc() == Y->getAttributeLoc() ? X->getAttributeLoc()
13941 : SourceLocation();
13942}
13943
13944static TemplateArgument getCommonTemplateArgument(const ASTContext &Ctx,
13945 const TemplateArgument &X,
13946 const TemplateArgument &Y) {
13947 if (X.getKind() != Y.getKind())
13948 return TemplateArgument();
13949
13950 switch (X.getKind()) {
13951 case TemplateArgument::ArgKind::Type:
13952 if (!Ctx.hasSameType(T1: X.getAsType(), T2: Y.getAsType()))
13953 return TemplateArgument();
13954 return TemplateArgument(
13955 Ctx.getCommonSugaredType(X: X.getAsType(), Y: Y.getAsType()));
13956 case TemplateArgument::ArgKind::NullPtr:
13957 if (!Ctx.hasSameType(T1: X.getNullPtrType(), T2: Y.getNullPtrType()))
13958 return TemplateArgument();
13959 return TemplateArgument(
13960 Ctx.getCommonSugaredType(X: X.getNullPtrType(), Y: Y.getNullPtrType()),
13961 /*Unqualified=*/true);
13962 case TemplateArgument::ArgKind::Expression:
13963 if (!Ctx.hasSameType(T1: X.getAsExpr()->getType(), T2: Y.getAsExpr()->getType()))
13964 return TemplateArgument();
13965 // FIXME: Try to keep the common sugar.
13966 return X;
13967 case TemplateArgument::ArgKind::Template: {
13968 TemplateName TX = X.getAsTemplate(), TY = Y.getAsTemplate();
13969 TemplateName CTN = ::getCommonTemplateName(Ctx, X: TX, Y: TY);
13970 if (!CTN.getAsVoidPointer())
13971 return TemplateArgument();
13972 return TemplateArgument(CTN);
13973 }
13974 case TemplateArgument::ArgKind::TemplateExpansion: {
13975 TemplateName TX = X.getAsTemplateOrTemplatePattern(),
13976 TY = Y.getAsTemplateOrTemplatePattern();
13977 TemplateName CTN = ::getCommonTemplateName(Ctx, X: TX, Y: TY);
13978 if (!CTN.getAsVoidPointer())
13979 return TemplateName();
13980 auto NExpX = X.getNumTemplateExpansions();
13981 assert(NExpX == Y.getNumTemplateExpansions());
13982 return TemplateArgument(CTN, NExpX);
13983 }
13984 default:
13985 // FIXME: Handle the other argument kinds.
13986 return X;
13987 }
13988}
13989
13990static bool getCommonTemplateArguments(const ASTContext &Ctx,
13991 SmallVectorImpl<TemplateArgument> &R,
13992 ArrayRef<TemplateArgument> Xs,
13993 ArrayRef<TemplateArgument> Ys) {
13994 if (Xs.size() != Ys.size())
13995 return true;
13996 R.resize(N: Xs.size());
13997 for (size_t I = 0; I < R.size(); ++I) {
13998 R[I] = getCommonTemplateArgument(Ctx, X: Xs[I], Y: Ys[I]);
13999 if (R[I].isNull())
14000 return true;
14001 }
14002 return false;
14003}
14004
14005static auto getCommonTemplateArguments(const ASTContext &Ctx,
14006 ArrayRef<TemplateArgument> Xs,
14007 ArrayRef<TemplateArgument> Ys) {
14008 SmallVector<TemplateArgument, 8> R;
14009 bool Different = getCommonTemplateArguments(Ctx, R, Xs, Ys);
14010 assert(!Different);
14011 (void)Different;
14012 return R;
14013}
14014
14015template <class T>
14016static ElaboratedTypeKeyword getCommonTypeKeyword(const T *X, const T *Y,
14017 bool IsSame) {
14018 ElaboratedTypeKeyword KX = X->getKeyword(), KY = Y->getKeyword();
14019 if (KX == KY)
14020 return KX;
14021 KX = getCanonicalElaboratedTypeKeyword(Keyword: KX);
14022 assert(!IsSame || KX == getCanonicalElaboratedTypeKeyword(KY));
14023 return KX;
14024}
14025
14026/// Returns a NestedNameSpecifier which has only the common sugar
14027/// present in both NNS1 and NNS2.
14028static NestedNameSpecifier getCommonNNS(const ASTContext &Ctx,
14029 NestedNameSpecifier NNS1,
14030 NestedNameSpecifier NNS2, bool IsSame) {
14031 // If they are identical, all sugar is common.
14032 if (NNS1 == NNS2)
14033 return NNS1;
14034
14035 // IsSame implies both Qualifiers are equivalent.
14036 NestedNameSpecifier Canon = NNS1.getCanonical();
14037 if (Canon != NNS2.getCanonical()) {
14038 assert(!IsSame && "Should be the same NestedNameSpecifier");
14039 // If they are not the same, there is nothing to unify.
14040 return std::nullopt;
14041 }
14042
14043 NestedNameSpecifier R = std::nullopt;
14044 NestedNameSpecifier::Kind Kind = NNS1.getKind();
14045 assert(Kind == NNS2.getKind());
14046 switch (Kind) {
14047 case NestedNameSpecifier::Kind::Namespace: {
14048 auto [Namespace1, Prefix1] = NNS1.getAsNamespaceAndPrefix();
14049 auto [Namespace2, Prefix2] = NNS2.getAsNamespaceAndPrefix();
14050 auto Kind = Namespace1->getKind();
14051 if (Kind != Namespace2->getKind() ||
14052 (Kind == Decl::NamespaceAlias &&
14053 !declaresSameEntity(D1: Namespace1, D2: Namespace2))) {
14054 R = NestedNameSpecifier(
14055 Ctx,
14056 ::getCommonDeclChecked(X: Namespace1->getNamespace(),
14057 Y: Namespace2->getNamespace()),
14058 /*Prefix=*/std::nullopt);
14059 break;
14060 }
14061 // The prefixes for namespaces are not significant, its declaration
14062 // identifies it uniquely.
14063 NestedNameSpecifier Prefix = ::getCommonNNS(Ctx, NNS1: Prefix1, NNS2: Prefix2,
14064 /*IsSame=*/false);
14065 R = NestedNameSpecifier(Ctx, ::getCommonDeclChecked(X: Namespace1, Y: Namespace2),
14066 Prefix);
14067 break;
14068 }
14069 case NestedNameSpecifier::Kind::Type: {
14070 const Type *T1 = NNS1.getAsType(), *T2 = NNS2.getAsType();
14071 const Type *T = Ctx.getCommonSugaredType(X: QualType(T1, 0), Y: QualType(T2, 0),
14072 /*Unqualified=*/true)
14073 .getTypePtr();
14074 R = NestedNameSpecifier(T);
14075 break;
14076 }
14077 case NestedNameSpecifier::Kind::MicrosoftSuper: {
14078 // FIXME: Can __super even be used with data members?
14079 // If it's only usable in functions, we will never see it here,
14080 // unless we save the qualifiers used in function types.
14081 // In that case, it might be possible NNS2 is a type,
14082 // in which case we should degrade the result to
14083 // a CXXRecordType.
14084 R = NestedNameSpecifier(getCommonDeclChecked(X: NNS1.getAsMicrosoftSuper(),
14085 Y: NNS2.getAsMicrosoftSuper()));
14086 break;
14087 }
14088 case NestedNameSpecifier::Kind::Null:
14089 case NestedNameSpecifier::Kind::Global:
14090 // These are singletons.
14091 llvm_unreachable("singletons did not compare equal");
14092 }
14093 assert(R.getCanonical() == Canon);
14094 return R;
14095}
14096
14097template <class T>
14098static NestedNameSpecifier getCommonQualifier(const ASTContext &Ctx, const T *X,
14099 const T *Y, bool IsSame) {
14100 return ::getCommonNNS(Ctx, NNS1: X->getQualifier(), NNS2: Y->getQualifier(), IsSame);
14101}
14102
14103template <class T>
14104static QualType getCommonElementType(const ASTContext &Ctx, const T *X,
14105 const T *Y) {
14106 return Ctx.getCommonSugaredType(X: X->getElementType(), Y: Y->getElementType());
14107}
14108
14109static QualType getCommonTypeWithQualifierLifting(const ASTContext &Ctx,
14110 QualType X, QualType Y,
14111 Qualifiers &QX,
14112 Qualifiers &QY) {
14113 QualType R = Ctx.getCommonSugaredType(X, Y,
14114 /*Unqualified=*/true);
14115 // Qualifiers common to both element types.
14116 Qualifiers RQ = R.getQualifiers();
14117 // For each side, move to the top level any qualifiers which are not common to
14118 // both element types. The caller must assume top level qualifiers might
14119 // be different, even if they are the same type, and can be treated as sugar.
14120 QX += X.getQualifiers() - RQ;
14121 QY += Y.getQualifiers() - RQ;
14122 return R;
14123}
14124
14125template <class T>
14126static QualType getCommonArrayElementType(const ASTContext &Ctx, const T *X,
14127 Qualifiers &QX, const T *Y,
14128 Qualifiers &QY) {
14129 return getCommonTypeWithQualifierLifting(Ctx, X->getElementType(),
14130 Y->getElementType(), QX, QY);
14131}
14132
14133template <class T>
14134static QualType getCommonPointeeType(const ASTContext &Ctx, const T *X,
14135 const T *Y) {
14136 return Ctx.getCommonSugaredType(X: X->getPointeeType(), Y: Y->getPointeeType());
14137}
14138
14139template <class T>
14140static auto *getCommonSizeExpr(const ASTContext &Ctx, T *X, T *Y) {
14141 assert(Ctx.hasSameExpr(X->getSizeExpr(), Y->getSizeExpr()));
14142 return X->getSizeExpr();
14143}
14144
14145static auto getCommonSizeModifier(const ArrayType *X, const ArrayType *Y) {
14146 assert(X->getSizeModifier() == Y->getSizeModifier());
14147 return X->getSizeModifier();
14148}
14149
14150static auto getCommonIndexTypeCVRQualifiers(const ArrayType *X,
14151 const ArrayType *Y) {
14152 assert(X->getIndexTypeCVRQualifiers() == Y->getIndexTypeCVRQualifiers());
14153 return X->getIndexTypeCVRQualifiers();
14154}
14155
14156// Merges two type lists such that the resulting vector will contain
14157// each type (in a canonical sense) only once, in the order they appear
14158// from X to Y. If they occur in both X and Y, the result will contain
14159// the common sugared type between them.
14160static void mergeTypeLists(const ASTContext &Ctx,
14161 SmallVectorImpl<QualType> &Out, ArrayRef<QualType> X,
14162 ArrayRef<QualType> Y) {
14163 llvm::DenseMap<QualType, unsigned> Found;
14164 for (auto Ts : {X, Y}) {
14165 for (QualType T : Ts) {
14166 auto Res = Found.try_emplace(Key: Ctx.getCanonicalType(T), Args: Out.size());
14167 if (!Res.second) {
14168 QualType &U = Out[Res.first->second];
14169 U = Ctx.getCommonSugaredType(X: U, Y: T);
14170 } else {
14171 Out.emplace_back(Args&: T);
14172 }
14173 }
14174 }
14175}
14176
14177FunctionProtoType::ExceptionSpecInfo
14178ASTContext::mergeExceptionSpecs(FunctionProtoType::ExceptionSpecInfo ESI1,
14179 FunctionProtoType::ExceptionSpecInfo ESI2,
14180 SmallVectorImpl<QualType> &ExceptionTypeStorage,
14181 bool AcceptDependent) const {
14182 ExceptionSpecificationType EST1 = ESI1.Type, EST2 = ESI2.Type;
14183
14184 // If either of them can throw anything, that is the result.
14185 for (auto I : {EST_None, EST_MSAny, EST_NoexceptFalse}) {
14186 if (EST1 == I)
14187 return ESI1;
14188 if (EST2 == I)
14189 return ESI2;
14190 }
14191
14192 // If either of them is non-throwing, the result is the other.
14193 for (auto I :
14194 {EST_NoThrow, EST_DynamicNone, EST_BasicNoexcept, EST_NoexceptTrue}) {
14195 if (EST1 == I)
14196 return ESI2;
14197 if (EST2 == I)
14198 return ESI1;
14199 }
14200
14201 // If we're left with value-dependent computed noexcept expressions, we're
14202 // stuck. Before C++17, we can just drop the exception specification entirely,
14203 // since it's not actually part of the canonical type. And this should never
14204 // happen in C++17, because it would mean we were computing the composite
14205 // pointer type of dependent types, which should never happen.
14206 if (EST1 == EST_DependentNoexcept || EST2 == EST_DependentNoexcept) {
14207 assert(AcceptDependent &&
14208 "computing composite pointer type of dependent types");
14209 return FunctionProtoType::ExceptionSpecInfo();
14210 }
14211
14212 // Switch over the possibilities so that people adding new values know to
14213 // update this function.
14214 switch (EST1) {
14215 case EST_None:
14216 case EST_DynamicNone:
14217 case EST_MSAny:
14218 case EST_BasicNoexcept:
14219 case EST_DependentNoexcept:
14220 case EST_NoexceptFalse:
14221 case EST_NoexceptTrue:
14222 case EST_NoThrow:
14223 llvm_unreachable("These ESTs should be handled above");
14224
14225 case EST_Dynamic: {
14226 // This is the fun case: both exception specifications are dynamic. Form
14227 // the union of the two lists.
14228 assert(EST2 == EST_Dynamic && "other cases should already be handled");
14229 mergeTypeLists(Ctx: *this, Out&: ExceptionTypeStorage, X: ESI1.Exceptions,
14230 Y: ESI2.Exceptions);
14231 FunctionProtoType::ExceptionSpecInfo Result(EST_Dynamic);
14232 Result.Exceptions = ExceptionTypeStorage;
14233 return Result;
14234 }
14235
14236 case EST_Unevaluated:
14237 case EST_Uninstantiated:
14238 case EST_Unparsed:
14239 llvm_unreachable("shouldn't see unresolved exception specifications here");
14240 }
14241
14242 llvm_unreachable("invalid ExceptionSpecificationType");
14243}
14244
14245static QualType getCommonNonSugarTypeNode(const ASTContext &Ctx, const Type *X,
14246 Qualifiers &QX, const Type *Y,
14247 Qualifiers &QY) {
14248 Type::TypeClass TC = X->getTypeClass();
14249 assert(TC == Y->getTypeClass());
14250 switch (TC) {
14251#define UNEXPECTED_TYPE(Class, Kind) \
14252 case Type::Class: \
14253 llvm_unreachable("Unexpected " Kind ": " #Class);
14254
14255#define NON_CANONICAL_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "non-canonical")
14256#define TYPE(Class, Base)
14257#include "clang/AST/TypeNodes.inc"
14258
14259#define SUGAR_FREE_TYPE(Class) UNEXPECTED_TYPE(Class, "sugar-free")
14260 SUGAR_FREE_TYPE(Builtin)
14261 SUGAR_FREE_TYPE(DeducedTemplateSpecialization)
14262 SUGAR_FREE_TYPE(DependentBitInt)
14263 SUGAR_FREE_TYPE(BitInt)
14264 SUGAR_FREE_TYPE(ObjCInterface)
14265 SUGAR_FREE_TYPE(SubstTemplateTypeParmPack)
14266 SUGAR_FREE_TYPE(SubstBuiltinTemplatePack)
14267 SUGAR_FREE_TYPE(UnresolvedUsing)
14268 SUGAR_FREE_TYPE(HLSLAttributedResource)
14269 SUGAR_FREE_TYPE(HLSLInlineSpirv)
14270#undef SUGAR_FREE_TYPE
14271#define NON_UNIQUE_TYPE(Class) UNEXPECTED_TYPE(Class, "non-unique")
14272 NON_UNIQUE_TYPE(TypeOfExpr)
14273 NON_UNIQUE_TYPE(VariableArray)
14274#undef NON_UNIQUE_TYPE
14275
14276 UNEXPECTED_TYPE(TypeOf, "sugar")
14277
14278#undef UNEXPECTED_TYPE
14279
14280 case Type::Auto: {
14281 const auto *AX = cast<AutoType>(Val: X), *AY = cast<AutoType>(Val: Y);
14282 assert(AX->getDeducedKind() == AY->getDeducedKind());
14283 assert(AX->getDeducedKind() != DeducedKind::Deduced);
14284 assert(AX->getKeyword() == AY->getKeyword());
14285 TemplateDecl *CD = ::getCommonDecl(X: AX->getTypeConstraintConcept(),
14286 Y: AY->getTypeConstraintConcept());
14287 SmallVector<TemplateArgument, 8> As;
14288 if (CD &&
14289 getCommonTemplateArguments(Ctx, R&: As, Xs: AX->getTypeConstraintArguments(),
14290 Ys: AY->getTypeConstraintArguments())) {
14291 CD = nullptr; // The arguments differ, so make it unconstrained.
14292 As.clear();
14293 }
14294 return Ctx.getAutoType(DK: AX->getDeducedKind(), DeducedAsType: QualType(), Keyword: AX->getKeyword(),
14295 TypeConstraintConcept: CD, TypeConstraintArgs: As);
14296 }
14297 case Type::IncompleteArray: {
14298 const auto *AX = cast<IncompleteArrayType>(Val: X),
14299 *AY = cast<IncompleteArrayType>(Val: Y);
14300 return Ctx.getIncompleteArrayType(
14301 elementType: getCommonArrayElementType(Ctx, X: AX, QX, Y: AY, QY),
14302 ASM: getCommonSizeModifier(X: AX, Y: AY), elementTypeQuals: getCommonIndexTypeCVRQualifiers(X: AX, Y: AY));
14303 }
14304 case Type::DependentSizedArray: {
14305 const auto *AX = cast<DependentSizedArrayType>(Val: X),
14306 *AY = cast<DependentSizedArrayType>(Val: Y);
14307 return Ctx.getDependentSizedArrayType(
14308 elementType: getCommonArrayElementType(Ctx, X: AX, QX, Y: AY, QY),
14309 numElements: getCommonSizeExpr(Ctx, X: AX, Y: AY), ASM: getCommonSizeModifier(X: AX, Y: AY),
14310 elementTypeQuals: getCommonIndexTypeCVRQualifiers(X: AX, Y: AY));
14311 }
14312 case Type::ConstantArray: {
14313 const auto *AX = cast<ConstantArrayType>(Val: X),
14314 *AY = cast<ConstantArrayType>(Val: Y);
14315 assert(AX->getSize() == AY->getSize());
14316 const Expr *SizeExpr = Ctx.hasSameExpr(X: AX->getSizeExpr(), Y: AY->getSizeExpr())
14317 ? AX->getSizeExpr()
14318 : nullptr;
14319 return Ctx.getConstantArrayType(
14320 EltTy: getCommonArrayElementType(Ctx, X: AX, QX, Y: AY, QY), ArySizeIn: AX->getSize(), SizeExpr,
14321 ASM: getCommonSizeModifier(X: AX, Y: AY), IndexTypeQuals: getCommonIndexTypeCVRQualifiers(X: AX, Y: AY));
14322 }
14323 case Type::ArrayParameter: {
14324 const auto *AX = cast<ArrayParameterType>(Val: X),
14325 *AY = cast<ArrayParameterType>(Val: Y);
14326 assert(AX->getSize() == AY->getSize());
14327 const Expr *SizeExpr = Ctx.hasSameExpr(X: AX->getSizeExpr(), Y: AY->getSizeExpr())
14328 ? AX->getSizeExpr()
14329 : nullptr;
14330 auto ArrayTy = Ctx.getConstantArrayType(
14331 EltTy: getCommonArrayElementType(Ctx, X: AX, QX, Y: AY, QY), ArySizeIn: AX->getSize(), SizeExpr,
14332 ASM: getCommonSizeModifier(X: AX, Y: AY), IndexTypeQuals: getCommonIndexTypeCVRQualifiers(X: AX, Y: AY));
14333 return Ctx.getArrayParameterType(Ty: ArrayTy);
14334 }
14335 case Type::Atomic: {
14336 const auto *AX = cast<AtomicType>(Val: X), *AY = cast<AtomicType>(Val: Y);
14337 return Ctx.getAtomicType(
14338 T: Ctx.getCommonSugaredType(X: AX->getValueType(), Y: AY->getValueType()));
14339 }
14340 case Type::Complex: {
14341 const auto *CX = cast<ComplexType>(Val: X), *CY = cast<ComplexType>(Val: Y);
14342 return Ctx.getComplexType(T: getCommonArrayElementType(Ctx, X: CX, QX, Y: CY, QY));
14343 }
14344 case Type::Pointer: {
14345 const auto *PX = cast<PointerType>(Val: X), *PY = cast<PointerType>(Val: Y);
14346 return Ctx.getPointerType(T: getCommonPointeeType(Ctx, X: PX, Y: PY));
14347 }
14348 case Type::BlockPointer: {
14349 const auto *PX = cast<BlockPointerType>(Val: X), *PY = cast<BlockPointerType>(Val: Y);
14350 return Ctx.getBlockPointerType(T: getCommonPointeeType(Ctx, X: PX, Y: PY));
14351 }
14352 case Type::ObjCObjectPointer: {
14353 const auto *PX = cast<ObjCObjectPointerType>(Val: X),
14354 *PY = cast<ObjCObjectPointerType>(Val: Y);
14355 return Ctx.getObjCObjectPointerType(ObjectT: getCommonPointeeType(Ctx, X: PX, Y: PY));
14356 }
14357 case Type::MemberPointer: {
14358 const auto *PX = cast<MemberPointerType>(Val: X),
14359 *PY = cast<MemberPointerType>(Val: Y);
14360 assert(declaresSameEntity(PX->getMostRecentCXXRecordDecl(),
14361 PY->getMostRecentCXXRecordDecl()));
14362 return Ctx.getMemberPointerType(
14363 T: getCommonPointeeType(Ctx, X: PX, Y: PY),
14364 Qualifier: getCommonQualifier(Ctx, X: PX, Y: PY, /*IsSame=*/true),
14365 Cls: PX->getMostRecentCXXRecordDecl());
14366 }
14367 case Type::LValueReference: {
14368 const auto *PX = cast<LValueReferenceType>(Val: X),
14369 *PY = cast<LValueReferenceType>(Val: Y);
14370 // FIXME: Preserve PointeeTypeAsWritten.
14371 return Ctx.getLValueReferenceType(T: getCommonPointeeType(Ctx, X: PX, Y: PY),
14372 SpelledAsLValue: PX->isSpelledAsLValue() ||
14373 PY->isSpelledAsLValue());
14374 }
14375 case Type::RValueReference: {
14376 const auto *PX = cast<RValueReferenceType>(Val: X),
14377 *PY = cast<RValueReferenceType>(Val: Y);
14378 // FIXME: Preserve PointeeTypeAsWritten.
14379 return Ctx.getRValueReferenceType(T: getCommonPointeeType(Ctx, X: PX, Y: PY));
14380 }
14381 case Type::DependentAddressSpace: {
14382 const auto *PX = cast<DependentAddressSpaceType>(Val: X),
14383 *PY = cast<DependentAddressSpaceType>(Val: Y);
14384 assert(Ctx.hasSameExpr(PX->getAddrSpaceExpr(), PY->getAddrSpaceExpr()));
14385 return Ctx.getDependentAddressSpaceType(PointeeType: getCommonPointeeType(Ctx, X: PX, Y: PY),
14386 AddrSpaceExpr: PX->getAddrSpaceExpr(),
14387 AttrLoc: getCommonAttrLoc(X: PX, Y: PY));
14388 }
14389 case Type::FunctionNoProto: {
14390 const auto *FX = cast<FunctionNoProtoType>(Val: X),
14391 *FY = cast<FunctionNoProtoType>(Val: Y);
14392 assert(FX->getExtInfo() == FY->getExtInfo());
14393 return Ctx.getFunctionNoProtoType(
14394 ResultTy: Ctx.getCommonSugaredType(X: FX->getReturnType(), Y: FY->getReturnType()),
14395 Info: FX->getExtInfo());
14396 }
14397 case Type::FunctionProto: {
14398 const auto *FX = cast<FunctionProtoType>(Val: X),
14399 *FY = cast<FunctionProtoType>(Val: Y);
14400 FunctionProtoType::ExtProtoInfo EPIX = FX->getExtProtoInfo(),
14401 EPIY = FY->getExtProtoInfo();
14402 assert(EPIX.ExtInfo == EPIY.ExtInfo);
14403 assert(!EPIX.ExtParameterInfos == !EPIY.ExtParameterInfos);
14404 assert(!EPIX.ExtParameterInfos ||
14405 llvm::equal(
14406 llvm::ArrayRef(EPIX.ExtParameterInfos, FX->getNumParams()),
14407 llvm::ArrayRef(EPIY.ExtParameterInfos, FY->getNumParams())));
14408 assert(EPIX.RefQualifier == EPIY.RefQualifier);
14409 assert(EPIX.TypeQuals == EPIY.TypeQuals);
14410 assert(EPIX.Variadic == EPIY.Variadic);
14411
14412 // FIXME: Can we handle an empty EllipsisLoc?
14413 // Use emtpy EllipsisLoc if X and Y differ.
14414
14415 EPIX.HasTrailingReturn = EPIX.HasTrailingReturn && EPIY.HasTrailingReturn;
14416
14417 QualType R =
14418 Ctx.getCommonSugaredType(X: FX->getReturnType(), Y: FY->getReturnType());
14419 auto P = getCommonTypes(Ctx, Xs: FX->param_types(), Ys: FY->param_types(),
14420 /*Unqualified=*/true);
14421
14422 SmallVector<QualType, 8> Exceptions;
14423 EPIX.ExceptionSpec = Ctx.mergeExceptionSpecs(
14424 ESI1: EPIX.ExceptionSpec, ESI2: EPIY.ExceptionSpec, ExceptionTypeStorage&: Exceptions, AcceptDependent: true);
14425 return Ctx.getFunctionType(ResultTy: R, Args: P, EPI: EPIX);
14426 }
14427 case Type::ObjCObject: {
14428 const auto *OX = cast<ObjCObjectType>(Val: X), *OY = cast<ObjCObjectType>(Val: Y);
14429 assert(
14430 std::equal(OX->getProtocols().begin(), OX->getProtocols().end(),
14431 OY->getProtocols().begin(), OY->getProtocols().end(),
14432 [](const ObjCProtocolDecl *P0, const ObjCProtocolDecl *P1) {
14433 return P0->getCanonicalDecl() == P1->getCanonicalDecl();
14434 }) &&
14435 "protocol lists must be the same");
14436 auto TAs = getCommonTypes(Ctx, Xs: OX->getTypeArgsAsWritten(),
14437 Ys: OY->getTypeArgsAsWritten());
14438 return Ctx.getObjCObjectType(
14439 baseType: Ctx.getCommonSugaredType(X: OX->getBaseType(), Y: OY->getBaseType()), typeArgs: TAs,
14440 protocols: OX->getProtocols(),
14441 isKindOf: OX->isKindOfTypeAsWritten() && OY->isKindOfTypeAsWritten());
14442 }
14443 case Type::ConstantMatrix: {
14444 const auto *MX = cast<ConstantMatrixType>(Val: X),
14445 *MY = cast<ConstantMatrixType>(Val: Y);
14446 assert(MX->getNumRows() == MY->getNumRows());
14447 assert(MX->getNumColumns() == MY->getNumColumns());
14448 return Ctx.getConstantMatrixType(ElementTy: getCommonElementType(Ctx, X: MX, Y: MY),
14449 NumRows: MX->getNumRows(), NumColumns: MX->getNumColumns());
14450 }
14451 case Type::DependentSizedMatrix: {
14452 const auto *MX = cast<DependentSizedMatrixType>(Val: X),
14453 *MY = cast<DependentSizedMatrixType>(Val: Y);
14454 assert(Ctx.hasSameExpr(MX->getRowExpr(), MY->getRowExpr()));
14455 assert(Ctx.hasSameExpr(MX->getColumnExpr(), MY->getColumnExpr()));
14456 return Ctx.getDependentSizedMatrixType(
14457 ElementTy: getCommonElementType(Ctx, X: MX, Y: MY), RowExpr: MX->getRowExpr(),
14458 ColumnExpr: MX->getColumnExpr(), AttrLoc: getCommonAttrLoc(X: MX, Y: MY));
14459 }
14460 case Type::Vector: {
14461 const auto *VX = cast<VectorType>(Val: X), *VY = cast<VectorType>(Val: Y);
14462 assert(VX->getNumElements() == VY->getNumElements());
14463 assert(VX->getVectorKind() == VY->getVectorKind());
14464 return Ctx.getVectorType(vecType: getCommonElementType(Ctx, X: VX, Y: VY),
14465 NumElts: VX->getNumElements(), VecKind: VX->getVectorKind());
14466 }
14467 case Type::ExtVector: {
14468 const auto *VX = cast<ExtVectorType>(Val: X), *VY = cast<ExtVectorType>(Val: Y);
14469 assert(VX->getNumElements() == VY->getNumElements());
14470 return Ctx.getExtVectorType(vecType: getCommonElementType(Ctx, X: VX, Y: VY),
14471 NumElts: VX->getNumElements());
14472 }
14473 case Type::DependentSizedExtVector: {
14474 const auto *VX = cast<DependentSizedExtVectorType>(Val: X),
14475 *VY = cast<DependentSizedExtVectorType>(Val: Y);
14476 return Ctx.getDependentSizedExtVectorType(vecType: getCommonElementType(Ctx, X: VX, Y: VY),
14477 SizeExpr: getCommonSizeExpr(Ctx, X: VX, Y: VY),
14478 AttrLoc: getCommonAttrLoc(X: VX, Y: VY));
14479 }
14480 case Type::DependentVector: {
14481 const auto *VX = cast<DependentVectorType>(Val: X),
14482 *VY = cast<DependentVectorType>(Val: Y);
14483 assert(VX->getVectorKind() == VY->getVectorKind());
14484 return Ctx.getDependentVectorType(
14485 VecType: getCommonElementType(Ctx, X: VX, Y: VY), SizeExpr: getCommonSizeExpr(Ctx, X: VX, Y: VY),
14486 AttrLoc: getCommonAttrLoc(X: VX, Y: VY), VecKind: VX->getVectorKind());
14487 }
14488 case Type::Enum:
14489 case Type::Record:
14490 case Type::InjectedClassName: {
14491 const auto *TX = cast<TagType>(Val: X), *TY = cast<TagType>(Val: Y);
14492 return Ctx.getTagType(Keyword: ::getCommonTypeKeyword(X: TX, Y: TY, /*IsSame=*/false),
14493 Qualifier: ::getCommonQualifier(Ctx, X: TX, Y: TY, /*IsSame=*/false),
14494 TD: ::getCommonDeclChecked(X: TX->getDecl(), Y: TY->getDecl()),
14495 /*OwnedTag=*/OwnsTag: false);
14496 }
14497 case Type::TemplateSpecialization: {
14498 const auto *TX = cast<TemplateSpecializationType>(Val: X),
14499 *TY = cast<TemplateSpecializationType>(Val: Y);
14500 auto As = getCommonTemplateArguments(Ctx, Xs: TX->template_arguments(),
14501 Ys: TY->template_arguments());
14502 return Ctx.getTemplateSpecializationType(
14503 Keyword: getCommonTypeKeyword(X: TX, Y: TY, /*IsSame=*/false),
14504 Template: ::getCommonTemplateNameChecked(Ctx, X: TX->getTemplateName(),
14505 Y: TY->getTemplateName(),
14506 /*IgnoreDeduced=*/true),
14507 SpecifiedArgs: As, /*CanonicalArgs=*/{}, Underlying: X->getCanonicalTypeInternal());
14508 }
14509 case Type::Decltype: {
14510 const auto *DX = cast<DecltypeType>(Val: X);
14511 [[maybe_unused]] const auto *DY = cast<DecltypeType>(Val: Y);
14512 assert(DX->isDependentType());
14513 assert(DY->isDependentType());
14514 assert(Ctx.hasSameExpr(DX->getUnderlyingExpr(), DY->getUnderlyingExpr()));
14515 // As Decltype is not uniqued, building a common type would be wasteful.
14516 return QualType(DX, 0);
14517 }
14518 case Type::PackIndexing: {
14519 const auto *DX = cast<PackIndexingType>(Val: X);
14520 [[maybe_unused]] const auto *DY = cast<PackIndexingType>(Val: Y);
14521 assert(DX->isDependentType());
14522 assert(DY->isDependentType());
14523 assert(Ctx.hasSameExpr(DX->getIndexExpr(), DY->getIndexExpr()));
14524 return QualType(DX, 0);
14525 }
14526 case Type::DependentName: {
14527 const auto *NX = cast<DependentNameType>(Val: X),
14528 *NY = cast<DependentNameType>(Val: Y);
14529 assert(NX->getIdentifier() == NY->getIdentifier());
14530 return Ctx.getDependentNameType(
14531 Keyword: getCommonTypeKeyword(X: NX, Y: NY, /*IsSame=*/true),
14532 NNS: getCommonQualifier(Ctx, X: NX, Y: NY, /*IsSame=*/true), Name: NX->getIdentifier());
14533 }
14534 case Type::OverflowBehavior: {
14535 const auto *NX = cast<OverflowBehaviorType>(Val: X),
14536 *NY = cast<OverflowBehaviorType>(Val: Y);
14537 assert(NX->getBehaviorKind() == NY->getBehaviorKind());
14538 return Ctx.getOverflowBehaviorType(
14539 Kind: NX->getBehaviorKind(),
14540 Underlying: getCommonTypeWithQualifierLifting(Ctx, X: NX->getUnderlyingType(),
14541 Y: NY->getUnderlyingType(), QX, QY));
14542 }
14543 case Type::UnaryTransform: {
14544 const auto *TX = cast<UnaryTransformType>(Val: X),
14545 *TY = cast<UnaryTransformType>(Val: Y);
14546 assert(TX->getUTTKind() == TY->getUTTKind());
14547 return Ctx.getUnaryTransformType(
14548 BaseType: Ctx.getCommonSugaredType(X: TX->getBaseType(), Y: TY->getBaseType()),
14549 UnderlyingType: Ctx.getCommonSugaredType(X: TX->getUnderlyingType(),
14550 Y: TY->getUnderlyingType()),
14551 Kind: TX->getUTTKind());
14552 }
14553 case Type::PackExpansion: {
14554 const auto *PX = cast<PackExpansionType>(Val: X),
14555 *PY = cast<PackExpansionType>(Val: Y);
14556 assert(PX->getNumExpansions() == PY->getNumExpansions());
14557 return Ctx.getPackExpansionType(
14558 Pattern: Ctx.getCommonSugaredType(X: PX->getPattern(), Y: PY->getPattern()),
14559 NumExpansions: PX->getNumExpansions(), ExpectPackInType: false);
14560 }
14561 case Type::Pipe: {
14562 const auto *PX = cast<PipeType>(Val: X), *PY = cast<PipeType>(Val: Y);
14563 assert(PX->isReadOnly() == PY->isReadOnly());
14564 auto MP = PX->isReadOnly() ? &ASTContext::getReadPipeType
14565 : &ASTContext::getWritePipeType;
14566 return (Ctx.*MP)(getCommonElementType(Ctx, X: PX, Y: PY));
14567 }
14568 case Type::TemplateTypeParm: {
14569 const auto *TX = cast<TemplateTypeParmType>(Val: X),
14570 *TY = cast<TemplateTypeParmType>(Val: Y);
14571 assert(TX->getDepth() == TY->getDepth());
14572 assert(TX->getIndex() == TY->getIndex());
14573 assert(TX->isParameterPack() == TY->isParameterPack());
14574 return Ctx.getTemplateTypeParmType(
14575 Depth: TX->getDepth(), Index: TX->getIndex(), ParameterPack: TX->isParameterPack(),
14576 TTPDecl: getCommonDecl(X: TX->getDecl(), Y: TY->getDecl()));
14577 }
14578 }
14579 llvm_unreachable("Unknown Type Class");
14580}
14581
14582static QualType getCommonSugarTypeNode(const ASTContext &Ctx, const Type *X,
14583 const Type *Y,
14584 SplitQualType Underlying) {
14585 Type::TypeClass TC = X->getTypeClass();
14586 if (TC != Y->getTypeClass())
14587 return QualType();
14588 switch (TC) {
14589#define UNEXPECTED_TYPE(Class, Kind) \
14590 case Type::Class: \
14591 llvm_unreachable("Unexpected " Kind ": " #Class);
14592#define TYPE(Class, Base)
14593#define DEPENDENT_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "dependent")
14594#include "clang/AST/TypeNodes.inc"
14595
14596#define CANONICAL_TYPE(Class) UNEXPECTED_TYPE(Class, "canonical")
14597 CANONICAL_TYPE(Atomic)
14598 CANONICAL_TYPE(BitInt)
14599 CANONICAL_TYPE(BlockPointer)
14600 CANONICAL_TYPE(Builtin)
14601 CANONICAL_TYPE(Complex)
14602 CANONICAL_TYPE(ConstantArray)
14603 CANONICAL_TYPE(ArrayParameter)
14604 CANONICAL_TYPE(ConstantMatrix)
14605 CANONICAL_TYPE(Enum)
14606 CANONICAL_TYPE(ExtVector)
14607 CANONICAL_TYPE(FunctionNoProto)
14608 CANONICAL_TYPE(FunctionProto)
14609 CANONICAL_TYPE(IncompleteArray)
14610 CANONICAL_TYPE(HLSLAttributedResource)
14611 CANONICAL_TYPE(HLSLInlineSpirv)
14612 CANONICAL_TYPE(LValueReference)
14613 CANONICAL_TYPE(ObjCInterface)
14614 CANONICAL_TYPE(ObjCObject)
14615 CANONICAL_TYPE(ObjCObjectPointer)
14616 CANONICAL_TYPE(OverflowBehavior)
14617 CANONICAL_TYPE(Pipe)
14618 CANONICAL_TYPE(Pointer)
14619 CANONICAL_TYPE(Record)
14620 CANONICAL_TYPE(RValueReference)
14621 CANONICAL_TYPE(VariableArray)
14622 CANONICAL_TYPE(Vector)
14623#undef CANONICAL_TYPE
14624
14625#undef UNEXPECTED_TYPE
14626
14627 case Type::Adjusted: {
14628 const auto *AX = cast<AdjustedType>(Val: X), *AY = cast<AdjustedType>(Val: Y);
14629 QualType OX = AX->getOriginalType(), OY = AY->getOriginalType();
14630 if (!Ctx.hasSameType(T1: OX, T2: OY))
14631 return QualType();
14632 // FIXME: It's inefficient to have to unify the original types.
14633 return Ctx.getAdjustedType(Orig: Ctx.getCommonSugaredType(X: OX, Y: OY),
14634 New: Ctx.getQualifiedType(split: Underlying));
14635 }
14636 case Type::Decayed: {
14637 const auto *DX = cast<DecayedType>(Val: X), *DY = cast<DecayedType>(Val: Y);
14638 QualType OX = DX->getOriginalType(), OY = DY->getOriginalType();
14639 if (!Ctx.hasSameType(T1: OX, T2: OY))
14640 return QualType();
14641 // FIXME: It's inefficient to have to unify the original types.
14642 return Ctx.getDecayedType(Orig: Ctx.getCommonSugaredType(X: OX, Y: OY),
14643 Decayed: Ctx.getQualifiedType(split: Underlying));
14644 }
14645 case Type::Attributed: {
14646 const auto *AX = cast<AttributedType>(Val: X), *AY = cast<AttributedType>(Val: Y);
14647 AttributedType::Kind Kind = AX->getAttrKind();
14648 if (Kind != AY->getAttrKind())
14649 return QualType();
14650 QualType MX = AX->getModifiedType(), MY = AY->getModifiedType();
14651 if (!Ctx.hasSameType(T1: MX, T2: MY))
14652 return QualType();
14653 // FIXME: It's inefficient to have to unify the modified types.
14654 return Ctx.getAttributedType(attrKind: Kind, modifiedType: Ctx.getCommonSugaredType(X: MX, Y: MY),
14655 equivalentType: Ctx.getQualifiedType(split: Underlying),
14656 attr: AX->getAttr());
14657 }
14658 case Type::BTFTagAttributed: {
14659 const auto *BX = cast<BTFTagAttributedType>(Val: X);
14660 const BTFTypeTagAttr *AX = BX->getAttr();
14661 // The attribute is not uniqued, so just compare the tag.
14662 if (AX->getBTFTypeTag() !=
14663 cast<BTFTagAttributedType>(Val: Y)->getAttr()->getBTFTypeTag())
14664 return QualType();
14665 return Ctx.getBTFTagAttributedType(BTFAttr: AX, Wrapped: Ctx.getQualifiedType(split: Underlying));
14666 }
14667 case Type::Auto: {
14668 const auto *AX = cast<AutoType>(Val: X), *AY = cast<AutoType>(Val: Y);
14669 assert(AX->getDeducedKind() == DeducedKind::Deduced);
14670 assert(AY->getDeducedKind() == DeducedKind::Deduced);
14671
14672 AutoTypeKeyword KW = AX->getKeyword();
14673 if (KW != AY->getKeyword())
14674 return QualType();
14675
14676 TemplateDecl *CD = ::getCommonDecl(X: AX->getTypeConstraintConcept(),
14677 Y: AY->getTypeConstraintConcept());
14678 SmallVector<TemplateArgument, 8> As;
14679 if (CD &&
14680 getCommonTemplateArguments(Ctx, R&: As, Xs: AX->getTypeConstraintArguments(),
14681 Ys: AY->getTypeConstraintArguments())) {
14682 CD = nullptr; // The arguments differ, so make it unconstrained.
14683 As.clear();
14684 }
14685
14686 // Both auto types can't be dependent, otherwise they wouldn't have been
14687 // sugar. This implies they can't contain unexpanded packs either.
14688 return Ctx.getAutoType(DK: DeducedKind::Deduced,
14689 DeducedAsType: Ctx.getQualifiedType(split: Underlying), Keyword: AX->getKeyword(),
14690 TypeConstraintConcept: CD, TypeConstraintArgs: As);
14691 }
14692 case Type::PackIndexing:
14693 case Type::Decltype:
14694 return QualType();
14695 case Type::DeducedTemplateSpecialization:
14696 // FIXME: Try to merge these.
14697 return QualType();
14698 case Type::MacroQualified: {
14699 const auto *MX = cast<MacroQualifiedType>(Val: X),
14700 *MY = cast<MacroQualifiedType>(Val: Y);
14701 const IdentifierInfo *IX = MX->getMacroIdentifier();
14702 if (IX != MY->getMacroIdentifier())
14703 return QualType();
14704 return Ctx.getMacroQualifiedType(UnderlyingTy: Ctx.getQualifiedType(split: Underlying), MacroII: IX);
14705 }
14706 case Type::SubstTemplateTypeParm: {
14707 const auto *SX = cast<SubstTemplateTypeParmType>(Val: X),
14708 *SY = cast<SubstTemplateTypeParmType>(Val: Y);
14709 Decl *CD =
14710 ::getCommonDecl(X: SX->getAssociatedDecl(), Y: SY->getAssociatedDecl());
14711 if (!CD)
14712 return QualType();
14713 unsigned Index = SX->getIndex();
14714 if (Index != SY->getIndex())
14715 return QualType();
14716 auto PackIndex = SX->getPackIndex();
14717 if (PackIndex != SY->getPackIndex())
14718 return QualType();
14719 return Ctx.getSubstTemplateTypeParmType(Replacement: Ctx.getQualifiedType(split: Underlying),
14720 AssociatedDecl: CD, Index, PackIndex,
14721 Final: SX->getFinal() && SY->getFinal());
14722 }
14723 case Type::ObjCTypeParam:
14724 // FIXME: Try to merge these.
14725 return QualType();
14726 case Type::Paren:
14727 return Ctx.getParenType(InnerType: Ctx.getQualifiedType(split: Underlying));
14728
14729 case Type::TemplateSpecialization: {
14730 const auto *TX = cast<TemplateSpecializationType>(Val: X),
14731 *TY = cast<TemplateSpecializationType>(Val: Y);
14732 TemplateName CTN =
14733 ::getCommonTemplateName(Ctx, X: TX->getTemplateName(),
14734 Y: TY->getTemplateName(), /*IgnoreDeduced=*/true);
14735 if (!CTN.getAsVoidPointer())
14736 return QualType();
14737 SmallVector<TemplateArgument, 8> As;
14738 if (getCommonTemplateArguments(Ctx, R&: As, Xs: TX->template_arguments(),
14739 Ys: TY->template_arguments()))
14740 return QualType();
14741 return Ctx.getTemplateSpecializationType(
14742 Keyword: getCommonTypeKeyword(X: TX, Y: TY, /*IsSame=*/false), Template: CTN, SpecifiedArgs: As,
14743 /*CanonicalArgs=*/{}, Underlying: Ctx.getQualifiedType(split: Underlying));
14744 }
14745 case Type::Typedef: {
14746 const auto *TX = cast<TypedefType>(Val: X), *TY = cast<TypedefType>(Val: Y);
14747 const TypedefNameDecl *CD = ::getCommonDecl(X: TX->getDecl(), Y: TY->getDecl());
14748 if (!CD)
14749 return QualType();
14750 return Ctx.getTypedefType(
14751 Keyword: ::getCommonTypeKeyword(X: TX, Y: TY, /*IsSame=*/false),
14752 Qualifier: ::getCommonQualifier(Ctx, X: TX, Y: TY, /*IsSame=*/false), Decl: CD,
14753 UnderlyingType: Ctx.getQualifiedType(split: Underlying));
14754 }
14755 case Type::TypeOf: {
14756 // The common sugar between two typeof expressions, where one is
14757 // potentially a typeof_unqual and the other is not, we unify to the
14758 // qualified type as that retains the most information along with the type.
14759 // We only return a typeof_unqual type when both types are unqual types.
14760 TypeOfKind Kind = TypeOfKind::Qualified;
14761 if (cast<TypeOfType>(Val: X)->getKind() == cast<TypeOfType>(Val: Y)->getKind() &&
14762 cast<TypeOfType>(Val: X)->getKind() == TypeOfKind::Unqualified)
14763 Kind = TypeOfKind::Unqualified;
14764 return Ctx.getTypeOfType(tofType: Ctx.getQualifiedType(split: Underlying), Kind);
14765 }
14766 case Type::TypeOfExpr:
14767 return QualType();
14768
14769 case Type::UnaryTransform: {
14770 const auto *UX = cast<UnaryTransformType>(Val: X),
14771 *UY = cast<UnaryTransformType>(Val: Y);
14772 UnaryTransformType::UTTKind KX = UX->getUTTKind();
14773 if (KX != UY->getUTTKind())
14774 return QualType();
14775 QualType BX = UX->getBaseType(), BY = UY->getBaseType();
14776 if (!Ctx.hasSameType(T1: BX, T2: BY))
14777 return QualType();
14778 // FIXME: It's inefficient to have to unify the base types.
14779 return Ctx.getUnaryTransformType(BaseType: Ctx.getCommonSugaredType(X: BX, Y: BY),
14780 UnderlyingType: Ctx.getQualifiedType(split: Underlying), Kind: KX);
14781 }
14782 case Type::Using: {
14783 const auto *UX = cast<UsingType>(Val: X), *UY = cast<UsingType>(Val: Y);
14784 const UsingShadowDecl *CD = ::getCommonDecl(X: UX->getDecl(), Y: UY->getDecl());
14785 if (!CD)
14786 return QualType();
14787 return Ctx.getUsingType(Keyword: ::getCommonTypeKeyword(X: UX, Y: UY, /*IsSame=*/false),
14788 Qualifier: ::getCommonQualifier(Ctx, X: UX, Y: UY, /*IsSame=*/false),
14789 D: CD, UnderlyingType: Ctx.getQualifiedType(split: Underlying));
14790 }
14791 case Type::MemberPointer: {
14792 const auto *PX = cast<MemberPointerType>(Val: X),
14793 *PY = cast<MemberPointerType>(Val: Y);
14794 CXXRecordDecl *Cls = PX->getMostRecentCXXRecordDecl();
14795 assert(Cls == PY->getMostRecentCXXRecordDecl());
14796 return Ctx.getMemberPointerType(
14797 T: ::getCommonPointeeType(Ctx, X: PX, Y: PY),
14798 Qualifier: ::getCommonQualifier(Ctx, X: PX, Y: PY, /*IsSame=*/false), Cls);
14799 }
14800 case Type::CountAttributed: {
14801 const auto *DX = cast<CountAttributedType>(Val: X),
14802 *DY = cast<CountAttributedType>(Val: Y);
14803 if (DX->isCountInBytes() != DY->isCountInBytes())
14804 return QualType();
14805 if (DX->isOrNull() != DY->isOrNull())
14806 return QualType();
14807 Expr *CEX = DX->getCountExpr();
14808 Expr *CEY = DY->getCountExpr();
14809 ArrayRef<clang::TypeCoupledDeclRefInfo> CDX = DX->getCoupledDecls();
14810 if (Ctx.hasSameExpr(X: CEX, Y: CEY))
14811 return Ctx.getCountAttributedType(WrappedTy: Ctx.getQualifiedType(split: Underlying), CountExpr: CEX,
14812 CountInBytes: DX->isCountInBytes(), OrNull: DX->isOrNull(),
14813 DependentDecls: CDX);
14814 if (!CEX->isIntegerConstantExpr(Ctx) || !CEY->isIntegerConstantExpr(Ctx))
14815 return QualType();
14816 // Two declarations with the same integer constant may still differ in their
14817 // expression pointers, so we need to evaluate them.
14818 llvm::APSInt VX = *CEX->getIntegerConstantExpr(Ctx);
14819 llvm::APSInt VY = *CEY->getIntegerConstantExpr(Ctx);
14820 if (VX != VY)
14821 return QualType();
14822 return Ctx.getCountAttributedType(WrappedTy: Ctx.getQualifiedType(split: Underlying), CountExpr: CEX,
14823 CountInBytes: DX->isCountInBytes(), OrNull: DX->isOrNull(),
14824 DependentDecls: CDX);
14825 }
14826 case Type::PredefinedSugar:
14827 assert(cast<PredefinedSugarType>(X)->getKind() !=
14828 cast<PredefinedSugarType>(Y)->getKind());
14829 return QualType();
14830 }
14831 llvm_unreachable("Unhandled Type Class");
14832}
14833
14834static auto unwrapSugar(SplitQualType &T, Qualifiers &QTotal) {
14835 SmallVector<SplitQualType, 8> R;
14836 while (true) {
14837 QTotal.addConsistentQualifiers(qs: T.Quals);
14838 QualType NT = T.Ty->getLocallyUnqualifiedSingleStepDesugaredType();
14839 if (NT == QualType(T.Ty, 0))
14840 break;
14841 R.push_back(Elt: T);
14842 T = NT.split();
14843 }
14844 return R;
14845}
14846
14847QualType ASTContext::getCommonSugaredType(QualType X, QualType Y,
14848 bool Unqualified) const {
14849 assert(Unqualified ? hasSameUnqualifiedType(X, Y) : hasSameType(X, Y));
14850 if (X == Y)
14851 return X;
14852 if (!Unqualified) {
14853 if (X.isCanonical())
14854 return X;
14855 if (Y.isCanonical())
14856 return Y;
14857 }
14858
14859 SplitQualType SX = X.split(), SY = Y.split();
14860 Qualifiers QX, QY;
14861 // Desugar SX and SY, setting the sugar and qualifiers aside into Xs and Ys,
14862 // until we reach their underlying "canonical nodes". Note these are not
14863 // necessarily canonical types, as they may still have sugared properties.
14864 // QX and QY will store the sum of all qualifiers in Xs and Ys respectively.
14865 auto Xs = ::unwrapSugar(T&: SX, QTotal&: QX), Ys = ::unwrapSugar(T&: SY, QTotal&: QY);
14866
14867 // If this is an ArrayType, the element qualifiers are interchangeable with
14868 // the top level qualifiers.
14869 // * In case the canonical nodes are the same, the elements types are already
14870 // the same.
14871 // * Otherwise, the element types will be made the same, and any different
14872 // element qualifiers will be moved up to the top level qualifiers, per
14873 // 'getCommonArrayElementType'.
14874 // In both cases, this means there may be top level qualifiers which differ
14875 // between X and Y. If so, these differing qualifiers are redundant with the
14876 // element qualifiers, and can be removed without changing the canonical type.
14877 // The desired behaviour is the same as for the 'Unqualified' case here:
14878 // treat the redundant qualifiers as sugar, remove the ones which are not
14879 // common to both sides.
14880 bool KeepCommonQualifiers =
14881 Unqualified || isa<ArrayType, OverflowBehaviorType>(Val: SX.Ty);
14882
14883 if (SX.Ty != SY.Ty) {
14884 // The canonical nodes differ. Build a common canonical node out of the two,
14885 // unifying their sugar. This may recurse back here.
14886 SX.Ty =
14887 ::getCommonNonSugarTypeNode(Ctx: *this, X: SX.Ty, QX, Y: SY.Ty, QY).getTypePtr();
14888 } else {
14889 // The canonical nodes were identical: We may have desugared too much.
14890 // Add any common sugar back in.
14891 while (!Xs.empty() && !Ys.empty() && Xs.back().Ty == Ys.back().Ty) {
14892 QX -= SX.Quals;
14893 QY -= SY.Quals;
14894 SX = Xs.pop_back_val();
14895 SY = Ys.pop_back_val();
14896 }
14897 }
14898 if (KeepCommonQualifiers)
14899 QX = Qualifiers::removeCommonQualifiers(L&: QX, R&: QY);
14900 else
14901 assert(QX == QY);
14902
14903 // Even though the remaining sugar nodes in Xs and Ys differ, some may be
14904 // related. Walk up these nodes, unifying them and adding the result.
14905 while (!Xs.empty() && !Ys.empty()) {
14906 auto Underlying = SplitQualType(
14907 SX.Ty, Qualifiers::removeCommonQualifiers(L&: SX.Quals, R&: SY.Quals));
14908 SX = Xs.pop_back_val();
14909 SY = Ys.pop_back_val();
14910 SX.Ty = ::getCommonSugarTypeNode(Ctx: *this, X: SX.Ty, Y: SY.Ty, Underlying)
14911 .getTypePtrOrNull();
14912 // Stop at the first pair which is unrelated.
14913 if (!SX.Ty) {
14914 SX.Ty = Underlying.Ty;
14915 break;
14916 }
14917 QX -= Underlying.Quals;
14918 };
14919
14920 // Add back the missing accumulated qualifiers, which were stripped off
14921 // with the sugar nodes we could not unify.
14922 QualType R = getQualifiedType(T: SX.Ty, Qs: QX);
14923 assert(Unqualified ? hasSameUnqualifiedType(R, X) : hasSameType(R, X));
14924 return R;
14925}
14926
14927QualType ASTContext::getCorrespondingUnsaturatedType(QualType Ty) const {
14928 assert(Ty->isFixedPointType());
14929
14930 if (Ty->isUnsaturatedFixedPointType())
14931 return Ty;
14932
14933 switch (Ty->castAs<BuiltinType>()->getKind()) {
14934 default:
14935 llvm_unreachable("Not a saturated fixed point type!");
14936 case BuiltinType::SatShortAccum:
14937 return ShortAccumTy;
14938 case BuiltinType::SatAccum:
14939 return AccumTy;
14940 case BuiltinType::SatLongAccum:
14941 return LongAccumTy;
14942 case BuiltinType::SatUShortAccum:
14943 return UnsignedShortAccumTy;
14944 case BuiltinType::SatUAccum:
14945 return UnsignedAccumTy;
14946 case BuiltinType::SatULongAccum:
14947 return UnsignedLongAccumTy;
14948 case BuiltinType::SatShortFract:
14949 return ShortFractTy;
14950 case BuiltinType::SatFract:
14951 return FractTy;
14952 case BuiltinType::SatLongFract:
14953 return LongFractTy;
14954 case BuiltinType::SatUShortFract:
14955 return UnsignedShortFractTy;
14956 case BuiltinType::SatUFract:
14957 return UnsignedFractTy;
14958 case BuiltinType::SatULongFract:
14959 return UnsignedLongFractTy;
14960 }
14961}
14962
14963QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const {
14964 assert(Ty->isFixedPointType());
14965
14966 if (Ty->isSaturatedFixedPointType()) return Ty;
14967
14968 switch (Ty->castAs<BuiltinType>()->getKind()) {
14969 default:
14970 llvm_unreachable("Not a fixed point type!");
14971 case BuiltinType::ShortAccum:
14972 return SatShortAccumTy;
14973 case BuiltinType::Accum:
14974 return SatAccumTy;
14975 case BuiltinType::LongAccum:
14976 return SatLongAccumTy;
14977 case BuiltinType::UShortAccum:
14978 return SatUnsignedShortAccumTy;
14979 case BuiltinType::UAccum:
14980 return SatUnsignedAccumTy;
14981 case BuiltinType::ULongAccum:
14982 return SatUnsignedLongAccumTy;
14983 case BuiltinType::ShortFract:
14984 return SatShortFractTy;
14985 case BuiltinType::Fract:
14986 return SatFractTy;
14987 case BuiltinType::LongFract:
14988 return SatLongFractTy;
14989 case BuiltinType::UShortFract:
14990 return SatUnsignedShortFractTy;
14991 case BuiltinType::UFract:
14992 return SatUnsignedFractTy;
14993 case BuiltinType::ULongFract:
14994 return SatUnsignedLongFractTy;
14995 }
14996}
14997
14998LangAS ASTContext::getLangASForBuiltinAddressSpace(unsigned AS) const {
14999 if (LangOpts.OpenCL)
15000 return getTargetInfo().getOpenCLBuiltinAddressSpace(AS);
15001
15002 if (LangOpts.CUDA)
15003 return getTargetInfo().getCUDABuiltinAddressSpace(AS);
15004
15005 return getLangASFromTargetAS(TargetAS: AS);
15006}
15007
15008// Explicitly instantiate this in case a Redeclarable<T> is used from a TU that
15009// doesn't include ASTContext.h
15010template
15011clang::LazyGenerationalUpdatePtr<
15012 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::ValueType
15013clang::LazyGenerationalUpdatePtr<
15014 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue(
15015 const clang::ASTContext &Ctx, Decl *Value);
15016
15017unsigned char ASTContext::getFixedPointScale(QualType Ty) const {
15018 assert(Ty->isFixedPointType());
15019
15020 const TargetInfo &Target = getTargetInfo();
15021 switch (Ty->castAs<BuiltinType>()->getKind()) {
15022 default:
15023 llvm_unreachable("Not a fixed point type!");
15024 case BuiltinType::ShortAccum:
15025 case BuiltinType::SatShortAccum:
15026 return Target.getShortAccumScale();
15027 case BuiltinType::Accum:
15028 case BuiltinType::SatAccum:
15029 return Target.getAccumScale();
15030 case BuiltinType::LongAccum:
15031 case BuiltinType::SatLongAccum:
15032 return Target.getLongAccumScale();
15033 case BuiltinType::UShortAccum:
15034 case BuiltinType::SatUShortAccum:
15035 return Target.getUnsignedShortAccumScale();
15036 case BuiltinType::UAccum:
15037 case BuiltinType::SatUAccum:
15038 return Target.getUnsignedAccumScale();
15039 case BuiltinType::ULongAccum:
15040 case BuiltinType::SatULongAccum:
15041 return Target.getUnsignedLongAccumScale();
15042 case BuiltinType::ShortFract:
15043 case BuiltinType::SatShortFract:
15044 return Target.getShortFractScale();
15045 case BuiltinType::Fract:
15046 case BuiltinType::SatFract:
15047 return Target.getFractScale();
15048 case BuiltinType::LongFract:
15049 case BuiltinType::SatLongFract:
15050 return Target.getLongFractScale();
15051 case BuiltinType::UShortFract:
15052 case BuiltinType::SatUShortFract:
15053 return Target.getUnsignedShortFractScale();
15054 case BuiltinType::UFract:
15055 case BuiltinType::SatUFract:
15056 return Target.getUnsignedFractScale();
15057 case BuiltinType::ULongFract:
15058 case BuiltinType::SatULongFract:
15059 return Target.getUnsignedLongFractScale();
15060 }
15061}
15062
15063unsigned char ASTContext::getFixedPointIBits(QualType Ty) const {
15064 assert(Ty->isFixedPointType());
15065
15066 const TargetInfo &Target = getTargetInfo();
15067 switch (Ty->castAs<BuiltinType>()->getKind()) {
15068 default:
15069 llvm_unreachable("Not a fixed point type!");
15070 case BuiltinType::ShortAccum:
15071 case BuiltinType::SatShortAccum:
15072 return Target.getShortAccumIBits();
15073 case BuiltinType::Accum:
15074 case BuiltinType::SatAccum:
15075 return Target.getAccumIBits();
15076 case BuiltinType::LongAccum:
15077 case BuiltinType::SatLongAccum:
15078 return Target.getLongAccumIBits();
15079 case BuiltinType::UShortAccum:
15080 case BuiltinType::SatUShortAccum:
15081 return Target.getUnsignedShortAccumIBits();
15082 case BuiltinType::UAccum:
15083 case BuiltinType::SatUAccum:
15084 return Target.getUnsignedAccumIBits();
15085 case BuiltinType::ULongAccum:
15086 case BuiltinType::SatULongAccum:
15087 return Target.getUnsignedLongAccumIBits();
15088 case BuiltinType::ShortFract:
15089 case BuiltinType::SatShortFract:
15090 case BuiltinType::Fract:
15091 case BuiltinType::SatFract:
15092 case BuiltinType::LongFract:
15093 case BuiltinType::SatLongFract:
15094 case BuiltinType::UShortFract:
15095 case BuiltinType::SatUShortFract:
15096 case BuiltinType::UFract:
15097 case BuiltinType::SatUFract:
15098 case BuiltinType::ULongFract:
15099 case BuiltinType::SatULongFract:
15100 return 0;
15101 }
15102}
15103
15104llvm::FixedPointSemantics
15105ASTContext::getFixedPointSemantics(QualType Ty) const {
15106 assert((Ty->isFixedPointType() || Ty->isIntegerType()) &&
15107 "Can only get the fixed point semantics for a "
15108 "fixed point or integer type.");
15109 if (Ty->isIntegerType())
15110 return llvm::FixedPointSemantics::GetIntegerSemantics(
15111 Width: getIntWidth(T: Ty), IsSigned: Ty->isSignedIntegerType());
15112
15113 bool isSigned = Ty->isSignedFixedPointType();
15114 return llvm::FixedPointSemantics(
15115 static_cast<unsigned>(getTypeSize(T: Ty)), getFixedPointScale(Ty), isSigned,
15116 Ty->isSaturatedFixedPointType(),
15117 !isSigned && getTargetInfo().doUnsignedFixedPointTypesHavePadding());
15118}
15119
15120llvm::APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const {
15121 assert(Ty->isFixedPointType());
15122 return llvm::APFixedPoint::getMax(Sema: getFixedPointSemantics(Ty));
15123}
15124
15125llvm::APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const {
15126 assert(Ty->isFixedPointType());
15127 return llvm::APFixedPoint::getMin(Sema: getFixedPointSemantics(Ty));
15128}
15129
15130QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const {
15131 assert(Ty->isUnsignedFixedPointType() &&
15132 "Expected unsigned fixed point type");
15133
15134 switch (Ty->castAs<BuiltinType>()->getKind()) {
15135 case BuiltinType::UShortAccum:
15136 return ShortAccumTy;
15137 case BuiltinType::UAccum:
15138 return AccumTy;
15139 case BuiltinType::ULongAccum:
15140 return LongAccumTy;
15141 case BuiltinType::SatUShortAccum:
15142 return SatShortAccumTy;
15143 case BuiltinType::SatUAccum:
15144 return SatAccumTy;
15145 case BuiltinType::SatULongAccum:
15146 return SatLongAccumTy;
15147 case BuiltinType::UShortFract:
15148 return ShortFractTy;
15149 case BuiltinType::UFract:
15150 return FractTy;
15151 case BuiltinType::ULongFract:
15152 return LongFractTy;
15153 case BuiltinType::SatUShortFract:
15154 return SatShortFractTy;
15155 case BuiltinType::SatUFract:
15156 return SatFractTy;
15157 case BuiltinType::SatULongFract:
15158 return SatLongFractTy;
15159 default:
15160 llvm_unreachable("Unexpected unsigned fixed point type");
15161 }
15162}
15163
15164// Given a list of FMV features, return a concatenated list of the
15165// corresponding backend features (which may contain duplicates).
15166static std::vector<std::string> getFMVBackendFeaturesFor(
15167 const llvm::SmallVectorImpl<StringRef> &FMVFeatStrings) {
15168 std::vector<std::string> BackendFeats;
15169 llvm::AArch64::ExtensionSet FeatureBits;
15170 for (StringRef F : FMVFeatStrings)
15171 if (auto FMVExt = llvm::AArch64::parseFMVExtension(Extension: F))
15172 if (FMVExt->ID)
15173 FeatureBits.enable(E: *FMVExt->ID);
15174 FeatureBits.toLLVMFeatureList(Features&: BackendFeats);
15175 return BackendFeats;
15176}
15177
15178ParsedTargetAttr
15179ASTContext::filterFunctionTargetAttrs(const TargetAttr *TD) const {
15180 assert(TD != nullptr);
15181 ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(Str: TD->getFeaturesStr());
15182
15183 llvm::erase_if(C&: ParsedAttr.Features, P: [&](const std::string &Feat) {
15184 return !Target->isValidFeatureName(Feature: StringRef{Feat}.substr(Start: 1));
15185 });
15186 return ParsedAttr;
15187}
15188
15189void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
15190 const FunctionDecl *FD) const {
15191 if (FD)
15192 getFunctionFeatureMap(FeatureMap, GD: GlobalDecl().getWithDecl(D: FD));
15193 else
15194 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(),
15195 CPU: Target->getTargetOpts().CPU,
15196 FeatureVec: Target->getTargetOpts().Features);
15197}
15198
15199// Fills in the supplied string map with the set of target features for the
15200// passed in function.
15201void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
15202 GlobalDecl GD) const {
15203 StringRef TargetCPU = Target->getTargetOpts().CPU;
15204 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
15205 if (const auto *TD = FD->getAttr<TargetAttr>()) {
15206 ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD);
15207
15208 // Make a copy of the features as passed on the command line into the
15209 // beginning of the additional features from the function to override.
15210 // AArch64 handles command line option features in parseTargetAttr().
15211 if (!Target->getTriple().isAArch64())
15212 ParsedAttr.Features.insert(
15213 position: ParsedAttr.Features.begin(),
15214 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
15215 last: Target->getTargetOpts().FeaturesAsWritten.end());
15216
15217 if (ParsedAttr.CPU != "" && Target->isValidCPUName(Name: ParsedAttr.CPU))
15218 TargetCPU = ParsedAttr.CPU;
15219
15220 // Now populate the feature map, first with the TargetCPU which is either
15221 // the default or a new one from the target attribute string. Then we'll use
15222 // the passed in features (FeaturesAsWritten) along with the new ones from
15223 // the attribute.
15224 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU,
15225 FeatureVec: ParsedAttr.Features);
15226 } else if (const auto *SD = FD->getAttr<CPUSpecificAttr>()) {
15227 llvm::SmallVector<StringRef, 32> FeaturesTmp;
15228 Target->getCPUSpecificCPUDispatchFeatures(
15229 Name: SD->getCPUName(Index: GD.getMultiVersionIndex())->getName(), Features&: FeaturesTmp);
15230 std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end());
15231 Features.insert(position: Features.begin(),
15232 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
15233 last: Target->getTargetOpts().FeaturesAsWritten.end());
15234 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
15235 } else if (const auto *TC = FD->getAttr<TargetClonesAttr>()) {
15236 if (Target->getTriple().isAArch64()) {
15237 llvm::SmallVector<StringRef, 8> Feats;
15238 TC->getFeatures(Out&: Feats, Index: GD.getMultiVersionIndex());
15239 std::vector<std::string> Features = getFMVBackendFeaturesFor(FMVFeatStrings: Feats);
15240 Features.insert(position: Features.begin(),
15241 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
15242 last: Target->getTargetOpts().FeaturesAsWritten.end());
15243 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
15244 } else if (Target->getTriple().isRISCV()) {
15245 StringRef VersionStr = TC->getFeatureStr(Index: GD.getMultiVersionIndex());
15246 std::vector<std::string> Features;
15247 if (VersionStr != "default") {
15248 ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(Str: VersionStr);
15249 Features.insert(position: Features.begin(), first: ParsedAttr.Features.begin(),
15250 last: ParsedAttr.Features.end());
15251 }
15252 Features.insert(position: Features.begin(),
15253 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
15254 last: Target->getTargetOpts().FeaturesAsWritten.end());
15255 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
15256 } else if (Target->getTriple().isOSAIX()) {
15257 std::vector<std::string> Features;
15258 StringRef VersionStr = TC->getFeatureStr(Index: GD.getMultiVersionIndex());
15259 if (VersionStr.starts_with(Prefix: "cpu="))
15260 TargetCPU = VersionStr.drop_front(N: sizeof("cpu=") - 1);
15261 else
15262 assert(VersionStr == "default");
15263 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
15264 } else {
15265 std::vector<std::string> Features;
15266 StringRef VersionStr = TC->getFeatureStr(Index: GD.getMultiVersionIndex());
15267 if (VersionStr.starts_with(Prefix: "arch="))
15268 TargetCPU = VersionStr.drop_front(N: sizeof("arch=") - 1);
15269 else if (VersionStr != "default")
15270 Features.push_back(x: (StringRef{"+"} + VersionStr).str());
15271 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
15272 }
15273 } else if (const auto *TV = FD->getAttr<TargetVersionAttr>()) {
15274 std::vector<std::string> Features;
15275 if (Target->getTriple().isRISCV()) {
15276 ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(Str: TV->getName());
15277 Features.insert(position: Features.begin(), first: ParsedAttr.Features.begin(),
15278 last: ParsedAttr.Features.end());
15279 } else {
15280 assert(Target->getTriple().isAArch64());
15281 llvm::SmallVector<StringRef, 8> Feats;
15282 TV->getFeatures(Out&: Feats);
15283 Features = getFMVBackendFeaturesFor(FMVFeatStrings: Feats);
15284 }
15285 Features.insert(position: Features.begin(),
15286 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
15287 last: Target->getTargetOpts().FeaturesAsWritten.end());
15288 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
15289 } else {
15290 FeatureMap = Target->getTargetOpts().FeatureMap;
15291 }
15292}
15293
15294static SYCLKernelInfo BuildSYCLKernelInfo(ASTContext &Context,
15295 CanQualType KernelNameType,
15296 const FunctionDecl *FD) {
15297 // Host and device compilation may use different ABIs and different ABIs
15298 // may allocate name mangling discriminators differently. A discriminator
15299 // override is used to ensure consistent discriminator allocation across
15300 // host and device compilation.
15301 auto DeviceDiscriminatorOverrider =
15302 [](ASTContext &Ctx, const NamedDecl *ND) -> UnsignedOrNone {
15303 if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: ND))
15304 if (RD->isLambda())
15305 return RD->getDeviceLambdaManglingNumber();
15306 return std::nullopt;
15307 };
15308 std::unique_ptr<MangleContext> MC{ItaniumMangleContext::create(
15309 Context, Diags&: Context.getDiagnostics(), Discriminator: DeviceDiscriminatorOverrider)};
15310
15311 // Construct a mangled name for the SYCL kernel caller offload entry point.
15312 // FIXME: The Itanium typeinfo mangling (_ZTS<type>) is currently used to
15313 // name the SYCL kernel caller offload entry point function. This mangling
15314 // does not suffice to clearly identify symbols that correspond to SYCL
15315 // kernel caller functions, nor is this mangling natural for targets that
15316 // use a non-Itanium ABI.
15317 std::string Buffer;
15318 Buffer.reserve(res_arg: 128);
15319 llvm::raw_string_ostream Out(Buffer);
15320 MC->mangleCanonicalTypeName(T: KernelNameType, Out);
15321 std::string KernelName = Out.str();
15322
15323 return {KernelNameType, FD, KernelName};
15324}
15325
15326void ASTContext::registerSYCLEntryPointFunction(FunctionDecl *FD) {
15327 // If the function declaration to register is invalid or dependent, the
15328 // registration attempt is ignored.
15329 if (FD->isInvalidDecl() || FD->isTemplated())
15330 return;
15331
15332 const auto *SKEPAttr = FD->getAttr<SYCLKernelEntryPointAttr>();
15333 assert(SKEPAttr && "Missing sycl_kernel_entry_point attribute");
15334
15335 // Be tolerant of multiple registration attempts so long as each attempt
15336 // is for the same entity. Callers are obligated to detect and diagnose
15337 // conflicting kernel names prior to calling this function.
15338 CanQualType KernelNameType = getCanonicalType(T: SKEPAttr->getKernelName());
15339 auto IT = SYCLKernels.find(Val: KernelNameType);
15340 assert((IT == SYCLKernels.end() ||
15341 declaresSameEntity(FD, IT->second.getKernelEntryPointDecl())) &&
15342 "SYCL kernel name conflict");
15343 (void)IT;
15344 SYCLKernels.insert(KV: std::make_pair(
15345 x&: KernelNameType, y: BuildSYCLKernelInfo(Context&: *this, KernelNameType, FD)));
15346}
15347
15348const SYCLKernelInfo &ASTContext::getSYCLKernelInfo(QualType T) const {
15349 CanQualType KernelNameType = getCanonicalType(T);
15350 return SYCLKernels.at(Val: KernelNameType);
15351}
15352
15353const SYCLKernelInfo *ASTContext::findSYCLKernelInfo(QualType T) const {
15354 CanQualType KernelNameType = getCanonicalType(T);
15355 auto IT = SYCLKernels.find(Val: KernelNameType);
15356 if (IT != SYCLKernels.end())
15357 return &IT->second;
15358 return nullptr;
15359}
15360
15361OMPTraitInfo &ASTContext::getNewOMPTraitInfo() {
15362 OMPTraitInfoVector.emplace_back(Args: new OMPTraitInfo());
15363 return *OMPTraitInfoVector.back();
15364}
15365
15366const StreamingDiagnostic &clang::
15367operator<<(const StreamingDiagnostic &DB,
15368 const ASTContext::SectionInfo &Section) {
15369 if (Section.Decl)
15370 return DB << Section.Decl;
15371 return DB << "a prior #pragma section";
15372}
15373
15374bool ASTContext::mayExternalize(const Decl *D) const {
15375 bool IsInternalVar =
15376 isa<VarDecl>(Val: D) &&
15377 basicGVALinkageForVariable(Context: *this, VD: cast<VarDecl>(Val: D)) == GVA_Internal;
15378 bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() &&
15379 !D->getAttr<CUDADeviceAttr>()->isImplicit()) ||
15380 (D->hasAttr<CUDAConstantAttr>() &&
15381 !D->getAttr<CUDAConstantAttr>()->isImplicit());
15382 // CUDA/HIP: managed variables need to be externalized since it is
15383 // a declaration in IR, therefore cannot have internal linkage. Kernels in
15384 // anonymous name space needs to be externalized to avoid duplicate symbols.
15385 return (IsInternalVar &&
15386 (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar)) ||
15387 (D->hasAttr<CUDAGlobalAttr>() &&
15388 basicGVALinkageForFunction(Context: *this, FD: cast<FunctionDecl>(Val: D)) ==
15389 GVA_Internal);
15390}
15391
15392bool ASTContext::shouldExternalize(const Decl *D) const {
15393 return mayExternalize(D) &&
15394 (D->hasAttr<HIPManagedAttr>() || D->hasAttr<CUDAGlobalAttr>() ||
15395 CUDADeviceVarODRUsedByHost.count(key: cast<VarDecl>(Val: D)));
15396}
15397
15398StringRef ASTContext::getCUIDHash() const {
15399 if (!CUIDHash.empty())
15400 return CUIDHash;
15401 if (LangOpts.CUID.empty())
15402 return StringRef();
15403 CUIDHash = llvm::utohexstr(X: llvm::MD5Hash(Str: LangOpts.CUID), /*LowerCase=*/true);
15404 return CUIDHash;
15405}
15406
15407const CXXRecordDecl *
15408ASTContext::baseForVTableAuthentication(const CXXRecordDecl *ThisClass) const {
15409 assert(ThisClass);
15410 assert(ThisClass->isPolymorphic());
15411 const CXXRecordDecl *PrimaryBase = ThisClass;
15412 while (1) {
15413 assert(PrimaryBase);
15414 assert(PrimaryBase->isPolymorphic());
15415 auto &Layout = getASTRecordLayout(D: PrimaryBase);
15416 auto Base = Layout.getPrimaryBase();
15417 if (!Base || Base == PrimaryBase || !Base->isPolymorphic())
15418 break;
15419 PrimaryBase = Base;
15420 }
15421 return PrimaryBase;
15422}
15423
15424bool ASTContext::useAbbreviatedThunkName(GlobalDecl VirtualMethodDecl,
15425 StringRef MangledName) {
15426 auto *Method = cast<CXXMethodDecl>(Val: VirtualMethodDecl.getDecl());
15427 assert(Method->isVirtual());
15428 bool DefaultIncludesPointerAuth =
15429 LangOpts.PointerAuthCalls || LangOpts.PointerAuthIntrinsics;
15430
15431 if (!DefaultIncludesPointerAuth)
15432 return true;
15433
15434 auto Existing = ThunksToBeAbbreviated.find(Val: VirtualMethodDecl);
15435 if (Existing != ThunksToBeAbbreviated.end())
15436 return Existing->second.contains(key: MangledName.str());
15437
15438 std::unique_ptr<MangleContext> Mangler(createMangleContext());
15439 llvm::StringMap<llvm::SmallVector<std::string, 2>> Thunks;
15440 auto VtableContext = getVTableContext();
15441 if (const auto *ThunkInfos = VtableContext->getThunkInfo(GD: VirtualMethodDecl)) {
15442 auto *Destructor = dyn_cast<CXXDestructorDecl>(Val: Method);
15443 for (const auto &Thunk : *ThunkInfos) {
15444 SmallString<256> ElidedName;
15445 llvm::raw_svector_ostream ElidedNameStream(ElidedName);
15446 if (Destructor)
15447 Mangler->mangleCXXDtorThunk(DD: Destructor, Type: VirtualMethodDecl.getDtorType(),
15448 Thunk, /* elideOverrideInfo */ ElideOverrideInfo: true,
15449 ElidedNameStream);
15450 else
15451 Mangler->mangleThunk(MD: Method, Thunk, /* elideOverrideInfo */ ElideOverrideInfo: true,
15452 ElidedNameStream);
15453 SmallString<256> MangledName;
15454 llvm::raw_svector_ostream mangledNameStream(MangledName);
15455 if (Destructor)
15456 Mangler->mangleCXXDtorThunk(DD: Destructor, Type: VirtualMethodDecl.getDtorType(),
15457 Thunk, /* elideOverrideInfo */ ElideOverrideInfo: false,
15458 mangledNameStream);
15459 else
15460 Mangler->mangleThunk(MD: Method, Thunk, /* elideOverrideInfo */ ElideOverrideInfo: false,
15461 mangledNameStream);
15462
15463 Thunks[ElidedName].push_back(Elt: std::string(MangledName));
15464 }
15465 }
15466 llvm::StringSet<> SimplifiedThunkNames;
15467 for (auto &ThunkList : Thunks) {
15468 llvm::sort(C&: ThunkList.second);
15469 SimplifiedThunkNames.insert(key: ThunkList.second[0]);
15470 }
15471 bool Result = SimplifiedThunkNames.contains(key: MangledName);
15472 ThunksToBeAbbreviated[VirtualMethodDecl] = std::move(SimplifiedThunkNames);
15473 return Result;
15474}
15475
15476bool ASTContext::arePFPFieldsTriviallyCopyable(const RecordDecl *RD) const {
15477 // Check for trivially-destructible here because non-trivially-destructible
15478 // types will always cause the type and any types derived from it to be
15479 // considered non-trivially-copyable. The same cannot be said for
15480 // trivially-copyable because deleting special members of a type derived from
15481 // a non-trivially-copyable type can cause the derived type to be considered
15482 // trivially copyable.
15483 if (getLangOpts().PointerFieldProtectionTagged)
15484 return !isa<CXXRecordDecl>(Val: RD) ||
15485 cast<CXXRecordDecl>(Val: RD)->hasTrivialDestructor();
15486 return true;
15487}
15488
15489static void findPFPFields(const ASTContext &Ctx, QualType Ty, CharUnits Offset,
15490 std::vector<PFPField> &Fields, bool IncludeVBases) {
15491 if (auto *AT = Ctx.getAsConstantArrayType(T: Ty)) {
15492 if (auto *ElemDecl = AT->getElementType()->getAsCXXRecordDecl()) {
15493 const ASTRecordLayout &ElemRL = Ctx.getASTRecordLayout(D: ElemDecl);
15494 for (unsigned i = 0; i != AT->getSize(); ++i)
15495 findPFPFields(Ctx, Ty: AT->getElementType(), Offset: Offset + i * ElemRL.getSize(),
15496 Fields, IncludeVBases: true);
15497 }
15498 }
15499 auto *Decl = Ty->getAsCXXRecordDecl();
15500 // isPFPType() is inherited from bases and members (including via arrays), so
15501 // we can early exit if it is false. Unions are excluded per the API
15502 // documentation.
15503 if (!Decl || !Decl->isPFPType() || Decl->isUnion())
15504 return;
15505 const ASTRecordLayout &RL = Ctx.getASTRecordLayout(D: Decl);
15506 for (FieldDecl *Field : Decl->fields()) {
15507 CharUnits FieldOffset =
15508 Offset +
15509 Ctx.toCharUnitsFromBits(BitSize: RL.getFieldOffset(FieldNo: Field->getFieldIndex()));
15510 if (Ctx.isPFPField(Field))
15511 Fields.push_back(x: {.Offset: FieldOffset, .Field: Field});
15512 findPFPFields(Ctx, Ty: Field->getType(), Offset: FieldOffset, Fields,
15513 /*IncludeVBases=*/true);
15514 }
15515 // Pass false for IncludeVBases below because vbases are only included in
15516 // layout for top-level types, i.e. not bases or vbases.
15517 for (CXXBaseSpecifier &Base : Decl->bases()) {
15518 if (Base.isVirtual())
15519 continue;
15520 CharUnits BaseOffset =
15521 Offset + RL.getBaseClassOffset(Base: Base.getType()->getAsCXXRecordDecl());
15522 findPFPFields(Ctx, Ty: Base.getType(), Offset: BaseOffset, Fields,
15523 /*IncludeVBases=*/false);
15524 }
15525 if (IncludeVBases) {
15526 for (CXXBaseSpecifier &Base : Decl->vbases()) {
15527 CharUnits BaseOffset =
15528 Offset + RL.getVBaseClassOffset(VBase: Base.getType()->getAsCXXRecordDecl());
15529 findPFPFields(Ctx, Ty: Base.getType(), Offset: BaseOffset, Fields,
15530 /*IncludeVBases=*/false);
15531 }
15532 }
15533}
15534
15535std::vector<PFPField> ASTContext::findPFPFields(QualType Ty) const {
15536 std::vector<PFPField> PFPFields;
15537 ::findPFPFields(Ctx: *this, Ty, Offset: CharUnits::Zero(), Fields&: PFPFields, IncludeVBases: true);
15538 return PFPFields;
15539}
15540
15541bool ASTContext::hasPFPFields(QualType Ty) const {
15542 return !findPFPFields(Ty).empty();
15543}
15544
15545bool ASTContext::isPFPField(const FieldDecl *FD) const {
15546 if (auto *RD = dyn_cast<CXXRecordDecl>(Val: FD->getParent()))
15547 return RD->isPFPType() && FD->getType()->isPointerType() &&
15548 !FD->hasAttr<NoFieldProtectionAttr>();
15549 return false;
15550}
15551
15552void ASTContext::recordMemberDataPointerEvaluation(const ValueDecl *VD) {
15553 auto *FD = dyn_cast<FieldDecl>(Val: VD);
15554 if (!FD)
15555 FD = cast<FieldDecl>(Val: cast<IndirectFieldDecl>(Val: VD)->chain().back());
15556 if (isPFPField(FD))
15557 PFPFieldsWithEvaluatedOffset.insert(X: FD);
15558}
15559
15560void ASTContext::recordOffsetOfEvaluation(const OffsetOfExpr *E) {
15561 if (E->getNumComponents() == 0)
15562 return;
15563 OffsetOfNode Comp = E->getComponent(Idx: E->getNumComponents() - 1);
15564 if (Comp.getKind() != OffsetOfNode::Field)
15565 return;
15566 if (FieldDecl *FD = Comp.getField(); isPFPField(FD))
15567 PFPFieldsWithEvaluatedOffset.insert(X: FD);
15568}
15569