1//===--- SemaExprCXX.cpp - Semantic Analysis for Expressions --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// Implements semantic analysis for C++ expressions.
11///
12//===----------------------------------------------------------------------===//
13
14#include "TreeTransform.h"
15#include "TypeLocBuilder.h"
16#include "clang/AST/ASTContext.h"
17#include "clang/AST/ASTLambda.h"
18#include "clang/AST/CXXInheritance.h"
19#include "clang/AST/CharUnits.h"
20#include "clang/AST/DeclCXX.h"
21#include "clang/AST/DeclObjC.h"
22#include "clang/AST/DynamicRecursiveASTVisitor.h"
23#include "clang/AST/ExprCXX.h"
24#include "clang/AST/ExprConcepts.h"
25#include "clang/AST/ExprObjC.h"
26#include "clang/AST/Type.h"
27#include "clang/AST/TypeLoc.h"
28#include "clang/Basic/AlignedAllocation.h"
29#include "clang/Basic/DiagnosticSema.h"
30#include "clang/Basic/PartialDiagnostic.h"
31#include "clang/Basic/TargetInfo.h"
32#include "clang/Basic/TokenKinds.h"
33#include "clang/Lex/Preprocessor.h"
34#include "clang/Sema/DeclSpec.h"
35#include "clang/Sema/EnterExpressionEvaluationContext.h"
36#include "clang/Sema/Initialization.h"
37#include "clang/Sema/Lookup.h"
38#include "clang/Sema/ParsedTemplate.h"
39#include "clang/Sema/Scope.h"
40#include "clang/Sema/ScopeInfo.h"
41#include "clang/Sema/SemaCUDA.h"
42#include "clang/Sema/SemaHLSL.h"
43#include "clang/Sema/SemaLambda.h"
44#include "clang/Sema/SemaObjC.h"
45#include "clang/Sema/SemaPPC.h"
46#include "clang/Sema/Template.h"
47#include "clang/Sema/TemplateDeduction.h"
48#include "llvm/ADT/APInt.h"
49#include "llvm/ADT/STLExtras.h"
50#include "llvm/ADT/StringExtras.h"
51#include "llvm/Support/ErrorHandling.h"
52#include "llvm/Support/TypeSize.h"
53#include <optional>
54using namespace clang;
55using namespace sema;
56
57ParsedType Sema::getInheritingConstructorName(CXXScopeSpec &SS,
58 SourceLocation NameLoc,
59 const IdentifierInfo &Name) {
60 NestedNameSpecifier NNS = SS.getScopeRep();
61 QualType Type(NNS.getAsType(), 0);
62 if ([[maybe_unused]] const auto *DNT = dyn_cast<DependentNameType>(Val&: Type))
63 assert(DNT->getIdentifier() == &Name && "not a constructor name");
64
65 // This reference to the type is located entirely at the location of the
66 // final identifier in the qualified-id.
67 return CreateParsedType(T: Type,
68 TInfo: Context.getTrivialTypeSourceInfo(T: Type, Loc: NameLoc));
69}
70
71ParsedType Sema::getConstructorName(const IdentifierInfo &II,
72 SourceLocation NameLoc, Scope *S,
73 CXXScopeSpec &SS, bool EnteringContext) {
74 CXXRecordDecl *CurClass = getCurrentClass(S, SS: &SS);
75 assert(CurClass && &II == CurClass->getIdentifier() &&
76 "not a constructor name");
77
78 // When naming a constructor as a member of a dependent context (eg, in a
79 // friend declaration or an inherited constructor declaration), form an
80 // unresolved "typename" type.
81 if (CurClass->isDependentContext() && !EnteringContext && SS.getScopeRep()) {
82 QualType T = Context.getDependentNameType(Keyword: ElaboratedTypeKeyword::None,
83 NNS: SS.getScopeRep(), Name: &II);
84 return ParsedType::make(P: T);
85 }
86
87 if (SS.isNotEmpty() && RequireCompleteDeclContext(SS, DC: CurClass))
88 return ParsedType();
89
90 // Find the injected-class-name declaration. Note that we make no attempt to
91 // diagnose cases where the injected-class-name is shadowed: the only
92 // declaration that can validly shadow the injected-class-name is a
93 // non-static data member, and if the class contains both a non-static data
94 // member and a constructor then it is ill-formed (we check that in
95 // CheckCompletedCXXClass).
96 CXXRecordDecl *InjectedClassName = nullptr;
97 for (NamedDecl *ND : CurClass->lookup(Name: &II)) {
98 auto *RD = dyn_cast<CXXRecordDecl>(Val: ND);
99 if (RD && RD->isInjectedClassName()) {
100 InjectedClassName = RD;
101 break;
102 }
103 }
104 if (!InjectedClassName) {
105 if (!CurClass->isInvalidDecl()) {
106 // FIXME: RequireCompleteDeclContext doesn't check dependent contexts
107 // properly. Work around it here for now.
108 Diag(Loc: SS.getLastQualifierNameLoc(),
109 DiagID: diag::err_incomplete_nested_name_spec) << CurClass << SS.getRange();
110 }
111 return ParsedType();
112 }
113
114 QualType T = Context.getTagType(Keyword: ElaboratedTypeKeyword::None, Qualifier: SS.getScopeRep(),
115 TD: InjectedClassName, /*OwnsTag=*/false);
116 return ParsedType::make(P: T);
117}
118
119ParsedType Sema::getDestructorName(const IdentifierInfo &II,
120 SourceLocation NameLoc, Scope *S,
121 CXXScopeSpec &SS, ParsedType ObjectTypePtr,
122 bool EnteringContext) {
123 // Determine where to perform name lookup.
124
125 // FIXME: This area of the standard is very messy, and the current
126 // wording is rather unclear about which scopes we search for the
127 // destructor name; see core issues 399 and 555. Issue 399 in
128 // particular shows where the current description of destructor name
129 // lookup is completely out of line with existing practice, e.g.,
130 // this appears to be ill-formed:
131 //
132 // namespace N {
133 // template <typename T> struct S {
134 // ~S();
135 // };
136 // }
137 //
138 // void f(N::S<int>* s) {
139 // s->N::S<int>::~S();
140 // }
141 //
142 // See also PR6358 and PR6359.
143 //
144 // For now, we accept all the cases in which the name given could plausibly
145 // be interpreted as a correct destructor name, issuing off-by-default
146 // extension diagnostics on the cases that don't strictly conform to the
147 // C++20 rules. This basically means we always consider looking in the
148 // nested-name-specifier prefix, the complete nested-name-specifier, and
149 // the scope, and accept if we find the expected type in any of the three
150 // places.
151
152 if (SS.isInvalid())
153 return nullptr;
154
155 // Whether we've failed with a diagnostic already.
156 bool Failed = false;
157
158 llvm::SmallVector<NamedDecl*, 8> FoundDecls;
159 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 8> FoundDeclSet;
160
161 // If we have an object type, it's because we are in a
162 // pseudo-destructor-expression or a member access expression, and
163 // we know what type we're looking for.
164 QualType SearchType =
165 ObjectTypePtr ? GetTypeFromParser(Ty: ObjectTypePtr) : QualType();
166
167 auto CheckLookupResult = [&](LookupResult &Found) -> ParsedType {
168 auto IsAcceptableResult = [&](NamedDecl *D) -> bool {
169 auto *Type = dyn_cast<TypeDecl>(Val: D->getUnderlyingDecl());
170 if (!Type)
171 return false;
172
173 if (SearchType.isNull() || SearchType->isDependentType())
174 return true;
175
176 CanQualType T = Context.getCanonicalTypeDeclType(TD: Type);
177 return Context.hasSameUnqualifiedType(T1: T, T2: SearchType);
178 };
179
180 unsigned NumAcceptableResults = 0;
181 for (NamedDecl *D : Found) {
182 if (IsAcceptableResult(D))
183 ++NumAcceptableResults;
184
185 // Don't list a class twice in the lookup failure diagnostic if it's
186 // found by both its injected-class-name and by the name in the enclosing
187 // scope.
188 if (auto *RD = dyn_cast<CXXRecordDecl>(Val: D))
189 if (RD->isInjectedClassName())
190 D = cast<NamedDecl>(Val: RD->getParent());
191
192 if (FoundDeclSet.insert(Ptr: D).second)
193 FoundDecls.push_back(Elt: D);
194 }
195
196 // As an extension, attempt to "fix" an ambiguity by erasing all non-type
197 // results, and all non-matching results if we have a search type. It's not
198 // clear what the right behavior is if destructor lookup hits an ambiguity,
199 // but other compilers do generally accept at least some kinds of
200 // ambiguity.
201 if (Found.isAmbiguous() && NumAcceptableResults == 1) {
202 Diag(Loc: NameLoc, DiagID: diag::ext_dtor_name_ambiguous);
203 LookupResult::Filter F = Found.makeFilter();
204 while (F.hasNext()) {
205 NamedDecl *D = F.next();
206 if (auto *TD = dyn_cast<TypeDecl>(Val: D->getUnderlyingDecl()))
207 Diag(Loc: D->getLocation(), DiagID: diag::note_destructor_type_here)
208 << Context.getTypeDeclType(Keyword: ElaboratedTypeKeyword::None,
209 /*Qualifier=*/std::nullopt, Decl: TD);
210 else
211 Diag(Loc: D->getLocation(), DiagID: diag::note_destructor_nontype_here);
212
213 if (!IsAcceptableResult(D))
214 F.erase();
215 }
216 F.done();
217 }
218
219 if (Found.isAmbiguous())
220 Failed = true;
221
222 if (TypeDecl *Type = Found.getAsSingle<TypeDecl>()) {
223 if (IsAcceptableResult(Type)) {
224 QualType T = Context.getTypeDeclType(Keyword: ElaboratedTypeKeyword::None,
225 /*Qualifier=*/std::nullopt, Decl: Type);
226 MarkAnyDeclReferenced(Loc: Type->getLocation(), D: Type, /*OdrUse=*/MightBeOdrUse: false);
227 return CreateParsedType(T,
228 TInfo: Context.getTrivialTypeSourceInfo(T, Loc: NameLoc));
229 }
230 }
231
232 return nullptr;
233 };
234
235 bool IsDependent = false;
236
237 auto LookupInObjectType = [&]() -> ParsedType {
238 if (Failed || SearchType.isNull())
239 return nullptr;
240
241 IsDependent |= SearchType->isDependentType();
242
243 LookupResult Found(*this, &II, NameLoc, LookupDestructorName);
244 DeclContext *LookupCtx = computeDeclContext(T: SearchType);
245 if (!LookupCtx)
246 return nullptr;
247 LookupQualifiedName(R&: Found, LookupCtx);
248 return CheckLookupResult(Found);
249 };
250
251 auto LookupInNestedNameSpec = [&](CXXScopeSpec &LookupSS) -> ParsedType {
252 if (Failed)
253 return nullptr;
254
255 IsDependent |= isDependentScopeSpecifier(SS: LookupSS);
256 DeclContext *LookupCtx = computeDeclContext(SS: LookupSS, EnteringContext);
257 if (!LookupCtx)
258 return nullptr;
259
260 LookupResult Found(*this, &II, NameLoc, LookupDestructorName);
261 if (RequireCompleteDeclContext(SS&: LookupSS, DC: LookupCtx)) {
262 Failed = true;
263 return nullptr;
264 }
265 LookupQualifiedName(R&: Found, LookupCtx);
266 return CheckLookupResult(Found);
267 };
268
269 auto LookupInScope = [&]() -> ParsedType {
270 if (Failed || !S)
271 return nullptr;
272
273 LookupResult Found(*this, &II, NameLoc, LookupDestructorName);
274 LookupName(R&: Found, S);
275 return CheckLookupResult(Found);
276 };
277
278 // C++2a [basic.lookup.qual]p6:
279 // In a qualified-id of the form
280 //
281 // nested-name-specifier[opt] type-name :: ~ type-name
282 //
283 // the second type-name is looked up in the same scope as the first.
284 //
285 // We interpret this as meaning that if you do a dual-scope lookup for the
286 // first name, you also do a dual-scope lookup for the second name, per
287 // C++ [basic.lookup.classref]p4:
288 //
289 // If the id-expression in a class member access is a qualified-id of the
290 // form
291 //
292 // class-name-or-namespace-name :: ...
293 //
294 // the class-name-or-namespace-name following the . or -> is first looked
295 // up in the class of the object expression and the name, if found, is used.
296 // Otherwise, it is looked up in the context of the entire
297 // postfix-expression.
298 //
299 // This looks in the same scopes as for an unqualified destructor name:
300 //
301 // C++ [basic.lookup.classref]p3:
302 // If the unqualified-id is ~ type-name, the type-name is looked up
303 // in the context of the entire postfix-expression. If the type T
304 // of the object expression is of a class type C, the type-name is
305 // also looked up in the scope of class C. At least one of the
306 // lookups shall find a name that refers to cv T.
307 //
308 // FIXME: The intent is unclear here. Should type-name::~type-name look in
309 // the scope anyway if it finds a non-matching name declared in the class?
310 // If both lookups succeed and find a dependent result, which result should
311 // we retain? (Same question for p->~type-name().)
312
313 auto Prefix = [&]() -> NestedNameSpecifierLoc {
314 NestedNameSpecifierLoc NNS = SS.getWithLocInContext(Context);
315 if (!NNS)
316 return NestedNameSpecifierLoc();
317 if (auto TL = NNS.getAsTypeLoc())
318 return TL.getPrefix();
319 return NNS.getAsNamespaceAndPrefix().Prefix;
320 }();
321
322 if (Prefix) {
323 // This is
324 //
325 // nested-name-specifier type-name :: ~ type-name
326 //
327 // Look for the second type-name in the nested-name-specifier.
328 CXXScopeSpec PrefixSS;
329 PrefixSS.Adopt(Other: Prefix);
330 if (ParsedType T = LookupInNestedNameSpec(PrefixSS))
331 return T;
332 } else {
333 // This is one of
334 //
335 // type-name :: ~ type-name
336 // ~ type-name
337 //
338 // Look in the scope and (if any) the object type.
339 if (ParsedType T = LookupInScope())
340 return T;
341 if (ParsedType T = LookupInObjectType())
342 return T;
343 }
344
345 if (Failed)
346 return nullptr;
347
348 if (IsDependent) {
349 // We didn't find our type, but that's OK: it's dependent anyway.
350
351 // FIXME: What if we have no nested-name-specifier?
352 TypeSourceInfo *TSI = nullptr;
353 QualType T =
354 CheckTypenameType(Keyword: ElaboratedTypeKeyword::None, KeywordLoc: SourceLocation(),
355 QualifierLoc: SS.getWithLocInContext(Context), II, IILoc: NameLoc, TSI: &TSI,
356 /*DeducedTSTContext=*/true);
357 if (T.isNull())
358 return ParsedType();
359 return CreateParsedType(T, TInfo: TSI);
360 }
361
362 // The remaining cases are all non-standard extensions imitating the behavior
363 // of various other compilers.
364 unsigned NumNonExtensionDecls = FoundDecls.size();
365
366 if (SS.isSet()) {
367 // For compatibility with older broken C++ rules and existing code,
368 //
369 // nested-name-specifier :: ~ type-name
370 //
371 // also looks for type-name within the nested-name-specifier.
372 if (ParsedType T = LookupInNestedNameSpec(SS)) {
373 Diag(Loc: SS.getEndLoc(), DiagID: diag::ext_dtor_named_in_wrong_scope)
374 << SS.getRange()
375 << FixItHint::CreateInsertion(InsertionLoc: SS.getEndLoc(),
376 Code: ("::" + II.getName()).str());
377 return T;
378 }
379
380 // For compatibility with other compilers and older versions of Clang,
381 //
382 // nested-name-specifier type-name :: ~ type-name
383 //
384 // also looks for type-name in the scope. Unfortunately, we can't
385 // reasonably apply this fallback for dependent nested-name-specifiers.
386 if (Prefix) {
387 if (ParsedType T = LookupInScope()) {
388 Diag(Loc: SS.getEndLoc(), DiagID: diag::ext_qualified_dtor_named_in_lexical_scope)
389 << FixItHint::CreateRemoval(RemoveRange: SS.getRange());
390 Diag(Loc: FoundDecls.back()->getLocation(), DiagID: diag::note_destructor_type_here)
391 << GetTypeFromParser(Ty: T);
392 return T;
393 }
394 }
395 }
396
397 // We didn't find anything matching; tell the user what we did find (if
398 // anything).
399
400 // Don't tell the user about declarations we shouldn't have found.
401 FoundDecls.resize(N: NumNonExtensionDecls);
402
403 // List types before non-types.
404 llvm::stable_sort(Range&: FoundDecls, C: [](NamedDecl *A, NamedDecl *B) {
405 return isa<TypeDecl>(Val: A->getUnderlyingDecl()) >
406 isa<TypeDecl>(Val: B->getUnderlyingDecl());
407 });
408
409 // Suggest a fixit to properly name the destroyed type.
410 auto MakeFixItHint = [&]{
411 const CXXRecordDecl *Destroyed = nullptr;
412 // FIXME: If we have a scope specifier, suggest its last component?
413 if (!SearchType.isNull())
414 Destroyed = SearchType->getAsCXXRecordDecl();
415 else if (S)
416 Destroyed = dyn_cast_or_null<CXXRecordDecl>(Val: S->getEntity());
417 if (Destroyed)
418 return FixItHint::CreateReplacement(RemoveRange: SourceRange(NameLoc),
419 Code: Destroyed->getNameAsString());
420 return FixItHint();
421 };
422
423 if (FoundDecls.empty()) {
424 // FIXME: Attempt typo-correction?
425 Diag(Loc: NameLoc, DiagID: diag::err_undeclared_destructor_name)
426 << &II << MakeFixItHint();
427 } else if (!SearchType.isNull() && FoundDecls.size() == 1) {
428 if (auto *TD = dyn_cast<TypeDecl>(Val: FoundDecls[0]->getUnderlyingDecl())) {
429 assert(!SearchType.isNull() &&
430 "should only reject a type result if we have a search type");
431 Diag(Loc: NameLoc, DiagID: diag::err_destructor_expr_type_mismatch)
432 << Context.getTypeDeclType(Keyword: ElaboratedTypeKeyword::None,
433 /*Qualifier=*/std::nullopt, Decl: TD)
434 << SearchType << MakeFixItHint();
435 } else {
436 Diag(Loc: NameLoc, DiagID: diag::err_destructor_expr_nontype)
437 << &II << MakeFixItHint();
438 }
439 } else {
440 Diag(Loc: NameLoc, DiagID: SearchType.isNull() ? diag::err_destructor_name_nontype
441 : diag::err_destructor_expr_mismatch)
442 << &II << SearchType << MakeFixItHint();
443 }
444
445 for (NamedDecl *FoundD : FoundDecls) {
446 if (auto *TD = dyn_cast<TypeDecl>(Val: FoundD->getUnderlyingDecl()))
447 Diag(Loc: FoundD->getLocation(), DiagID: diag::note_destructor_type_here)
448 << Context.getTypeDeclType(Keyword: ElaboratedTypeKeyword::None,
449 /*Qualifier=*/std::nullopt, Decl: TD);
450 else
451 Diag(Loc: FoundD->getLocation(), DiagID: diag::note_destructor_nontype_here)
452 << FoundD;
453 }
454
455 return nullptr;
456}
457
458ParsedType Sema::getDestructorTypeForDecltype(const DeclSpec &DS,
459 ParsedType ObjectType) {
460 if (DS.getTypeSpecType() == DeclSpec::TST_error)
461 return nullptr;
462
463 if (DS.getTypeSpecType() == DeclSpec::TST_decltype_auto) {
464 Diag(Loc: DS.getTypeSpecTypeLoc(), DiagID: diag::err_decltype_auto_invalid);
465 return nullptr;
466 }
467
468 assert(DS.getTypeSpecType() == DeclSpec::TST_decltype &&
469 "unexpected type in getDestructorType");
470 QualType T = BuildDecltypeType(E: DS.getRepAsExpr());
471
472 // If we know the type of the object, check that the correct destructor
473 // type was named now; we can give better diagnostics this way.
474 QualType SearchType = GetTypeFromParser(Ty: ObjectType);
475 if (!SearchType.isNull() && !SearchType->isDependentType() &&
476 !Context.hasSameUnqualifiedType(T1: T, T2: SearchType)) {
477 Diag(Loc: DS.getTypeSpecTypeLoc(), DiagID: diag::err_destructor_expr_type_mismatch)
478 << T << SearchType;
479 return nullptr;
480 }
481
482 return ParsedType::make(P: T);
483}
484
485bool Sema::checkLiteralOperatorId(const CXXScopeSpec &SS,
486 const UnqualifiedId &Name, bool IsUDSuffix) {
487 assert(Name.getKind() == UnqualifiedIdKind::IK_LiteralOperatorId);
488 if (!IsUDSuffix) {
489 // [over.literal] p8
490 //
491 // double operator""_Bq(long double); // OK: not a reserved identifier
492 // double operator"" _Bq(long double); // ill-formed, no diagnostic required
493 const IdentifierInfo *II = Name.Identifier;
494 ReservedIdentifierStatus Status = II->isReserved(LangOpts: PP.getLangOpts());
495 SourceLocation Loc = Name.getEndLoc();
496
497 auto Hint = FixItHint::CreateReplacement(
498 RemoveRange: Name.getSourceRange(),
499 Code: (StringRef("operator\"\"") + II->getName()).str());
500
501 // Only emit this diagnostic if we start with an underscore, else the
502 // diagnostic for C++11 requiring a space between the quotes and the
503 // identifier conflicts with this and gets confusing. The diagnostic stating
504 // this is a reserved name should force the underscore, which gets this
505 // back.
506 if (II->isReservedLiteralSuffixId() !=
507 ReservedLiteralSuffixIdStatus::NotStartsWithUnderscore)
508 Diag(Loc, DiagID: diag::warn_deprecated_literal_operator_id) << II << Hint;
509
510 if (isReservedInAllContexts(Status))
511 Diag(Loc, DiagID: diag::warn_reserved_extern_symbol)
512 << II << static_cast<int>(Status) << Hint;
513 }
514
515 switch (SS.getScopeRep().getKind()) {
516 case NestedNameSpecifier::Kind::Type:
517 // Per C++11 [over.literal]p2, literal operators can only be declared at
518 // namespace scope. Therefore, this unqualified-id cannot name anything.
519 // Reject it early, because we have no AST representation for this in the
520 // case where the scope is dependent.
521 Diag(Loc: Name.getBeginLoc(), DiagID: diag::err_literal_operator_id_outside_namespace)
522 << SS.getScopeRep();
523 return true;
524
525 case NestedNameSpecifier::Kind::Null:
526 case NestedNameSpecifier::Kind::Global:
527 case NestedNameSpecifier::Kind::MicrosoftSuper:
528 case NestedNameSpecifier::Kind::Namespace:
529 return false;
530 }
531
532 llvm_unreachable("unknown nested name specifier kind");
533}
534
535ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
536 SourceLocation TypeidLoc,
537 TypeSourceInfo *Operand,
538 SourceLocation RParenLoc) {
539 // C++ [expr.typeid]p4:
540 // The top-level cv-qualifiers of the lvalue expression or the type-id
541 // that is the operand of typeid are always ignored.
542 // If the type of the type-id is a class type or a reference to a class
543 // type, the class shall be completely-defined.
544 Qualifiers Quals;
545 QualType T
546 = Context.getUnqualifiedArrayType(T: Operand->getType().getNonReferenceType(),
547 Quals);
548 if (T->isRecordType() &&
549 RequireCompleteType(Loc: TypeidLoc, T, DiagID: diag::err_incomplete_typeid))
550 return ExprError();
551
552 if (T->isVariablyModifiedType())
553 return ExprError(Diag(Loc: TypeidLoc, DiagID: diag::err_variably_modified_typeid) << T);
554
555 if (CheckQualifiedFunctionForTypeId(T, Loc: TypeidLoc))
556 return ExprError();
557
558 return new (Context) CXXTypeidExpr(TypeInfoType.withConst(), Operand,
559 SourceRange(TypeidLoc, RParenLoc));
560}
561
562ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
563 SourceLocation TypeidLoc,
564 Expr *E,
565 SourceLocation RParenLoc) {
566 bool WasEvaluated = false;
567 if (E && !E->isTypeDependent()) {
568 if (E->hasPlaceholderType()) {
569 ExprResult result = CheckPlaceholderExpr(E);
570 if (result.isInvalid()) return ExprError();
571 E = result.get();
572 }
573
574 QualType T = E->getType();
575 if (auto *RecordD = T->getAsCXXRecordDecl()) {
576 // C++ [expr.typeid]p3:
577 // [...] If the type of the expression is a class type, the class
578 // shall be completely-defined.
579 if (RequireCompleteType(Loc: TypeidLoc, T, DiagID: diag::err_incomplete_typeid))
580 return ExprError();
581
582 // C++ [expr.typeid]p3:
583 // When typeid is applied to an expression other than an glvalue of a
584 // polymorphic class type [...] [the] expression is an unevaluated
585 // operand. [...]
586 if (RecordD->isPolymorphic() && E->isGLValue()) {
587 if (isUnevaluatedContext()) {
588 // The operand was processed in unevaluated context, switch the
589 // context and recheck the subexpression.
590 ExprResult Result = TransformToPotentiallyEvaluated(E);
591 if (Result.isInvalid())
592 return ExprError();
593 E = Result.get();
594 }
595
596 // We require a vtable to query the type at run time.
597 MarkVTableUsed(Loc: TypeidLoc, Class: RecordD);
598 WasEvaluated = true;
599 }
600 }
601
602 ExprResult Result = CheckUnevaluatedOperand(E);
603 if (Result.isInvalid())
604 return ExprError();
605 E = Result.get();
606
607 // C++ [expr.typeid]p4:
608 // [...] If the type of the type-id is a reference to a possibly
609 // cv-qualified type, the result of the typeid expression refers to a
610 // std::type_info object representing the cv-unqualified referenced
611 // type.
612 Qualifiers Quals;
613 QualType UnqualT = Context.getUnqualifiedArrayType(T, Quals);
614 if (!Context.hasSameType(T1: T, T2: UnqualT)) {
615 T = UnqualT;
616 E = ImpCastExprToType(E, Type: UnqualT, CK: CK_NoOp, VK: E->getValueKind()).get();
617 }
618 }
619
620 if (E->getType()->isVariablyModifiedType())
621 return ExprError(Diag(Loc: TypeidLoc, DiagID: diag::err_variably_modified_typeid)
622 << E->getType());
623 else if (!inTemplateInstantiation() &&
624 E->HasSideEffects(Ctx: Context, IncludePossibleEffects: WasEvaluated)) {
625 // The expression operand for typeid is in an unevaluated expression
626 // context, so side effects could result in unintended consequences.
627 Diag(Loc: E->getExprLoc(), DiagID: WasEvaluated
628 ? diag::warn_side_effects_typeid
629 : diag::warn_side_effects_unevaluated_context);
630 }
631
632 return new (Context) CXXTypeidExpr(TypeInfoType.withConst(), E,
633 SourceRange(TypeidLoc, RParenLoc));
634}
635
636/// ActOnCXXTypeidOfType - Parse typeid( type-id ) or typeid (expression);
637ExprResult
638Sema::ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc,
639 bool isType, void *TyOrExpr, SourceLocation RParenLoc) {
640 // typeid is not supported in OpenCL.
641 if (getLangOpts().OpenCLCPlusPlus) {
642 return ExprError(Diag(Loc: OpLoc, DiagID: diag::err_openclcxx_not_supported)
643 << "typeid");
644 }
645
646 // Find the std::type_info type.
647 if (!getStdNamespace()) {
648 return ExprError(Diag(Loc: OpLoc, DiagID: diag::err_need_header_before_typeid)
649 << (getLangOpts().CPlusPlus20 ? 1 : 0));
650 }
651
652 if (!CXXTypeInfoDecl) {
653 IdentifierInfo *TypeInfoII = &PP.getIdentifierTable().get(Name: "type_info");
654 LookupResult R(*this, TypeInfoII, SourceLocation(), LookupTagName);
655 LookupQualifiedName(R, LookupCtx: getStdNamespace());
656 CXXTypeInfoDecl = R.getAsSingle<RecordDecl>();
657 // Microsoft's typeinfo doesn't have type_info in std but in the global
658 // namespace if _HAS_EXCEPTIONS is defined to 0. See PR13153.
659 if (!CXXTypeInfoDecl && LangOpts.MSVCCompat) {
660 LookupQualifiedName(R, LookupCtx: Context.getTranslationUnitDecl());
661 CXXTypeInfoDecl = R.getAsSingle<RecordDecl>();
662 }
663 if (!CXXTypeInfoDecl)
664 return ExprError(Diag(Loc: OpLoc, DiagID: diag::err_need_header_before_typeid)
665 << (getLangOpts().CPlusPlus20 ? 1 : 0));
666 }
667
668 if (!getLangOpts().RTTI) {
669 return ExprError(Diag(Loc: OpLoc, DiagID: diag::err_no_typeid_with_fno_rtti));
670 }
671
672 CanQualType TypeInfoType = Context.getCanonicalTagType(TD: CXXTypeInfoDecl);
673
674 if (isType) {
675 // The operand is a type; handle it as such.
676 TypeSourceInfo *TInfo = nullptr;
677 QualType T = GetTypeFromParser(Ty: ParsedType::getFromOpaquePtr(P: TyOrExpr),
678 TInfo: &TInfo);
679 if (T.isNull())
680 return ExprError();
681
682 if (!TInfo)
683 TInfo = Context.getTrivialTypeSourceInfo(T, Loc: OpLoc);
684
685 return BuildCXXTypeId(TypeInfoType, TypeidLoc: OpLoc, Operand: TInfo, RParenLoc);
686 }
687
688 // The operand is an expression.
689 ExprResult Result =
690 BuildCXXTypeId(TypeInfoType, TypeidLoc: OpLoc, E: (Expr *)TyOrExpr, RParenLoc);
691
692 if (!getLangOpts().RTTIData && !Result.isInvalid())
693 if (auto *CTE = dyn_cast<CXXTypeidExpr>(Val: Result.get()))
694 if (CTE->isPotentiallyEvaluated() && !CTE->isMostDerived(Context))
695 Diag(Loc: OpLoc, DiagID: diag::warn_no_typeid_with_rtti_disabled)
696 << (getDiagnostics().getDiagnosticOptions().getFormat() ==
697 DiagnosticOptions::MSVC);
698 return Result;
699}
700
701/// Grabs __declspec(uuid()) off a type, or returns 0 if we cannot resolve to
702/// a single GUID.
703static void
704getUuidAttrOfType(Sema &SemaRef, QualType QT,
705 llvm::SmallSetVector<const UuidAttr *, 1> &UuidAttrs) {
706 // Optionally remove one level of pointer, reference or array indirection.
707 const Type *Ty = QT.getTypePtr();
708 if (QT->isPointerOrReferenceType())
709 Ty = QT->getPointeeType().getTypePtr();
710 else if (QT->isArrayType())
711 Ty = Ty->getBaseElementTypeUnsafe();
712
713 const auto *TD = Ty->getAsTagDecl();
714 if (!TD)
715 return;
716
717 if (const auto *Uuid = TD->getMostRecentDecl()->getAttr<UuidAttr>()) {
718 UuidAttrs.insert(X: Uuid);
719 return;
720 }
721
722 // __uuidof can grab UUIDs from template arguments.
723 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(Val: TD)) {
724 const TemplateArgumentList &TAL = CTSD->getTemplateArgs();
725 for (const TemplateArgument &TA : TAL.asArray()) {
726 const UuidAttr *UuidForTA = nullptr;
727 if (TA.getKind() == TemplateArgument::Type)
728 getUuidAttrOfType(SemaRef, QT: TA.getAsType(), UuidAttrs);
729 else if (TA.getKind() == TemplateArgument::Declaration)
730 getUuidAttrOfType(SemaRef, QT: TA.getAsDecl()->getType(), UuidAttrs);
731
732 if (UuidForTA)
733 UuidAttrs.insert(X: UuidForTA);
734 }
735 }
736}
737
738ExprResult Sema::BuildCXXUuidof(QualType Type,
739 SourceLocation TypeidLoc,
740 TypeSourceInfo *Operand,
741 SourceLocation RParenLoc) {
742 MSGuidDecl *Guid = nullptr;
743 if (!Operand->getType()->isDependentType()) {
744 llvm::SmallSetVector<const UuidAttr *, 1> UuidAttrs;
745 getUuidAttrOfType(SemaRef&: *this, QT: Operand->getType(), UuidAttrs);
746 if (UuidAttrs.empty())
747 return ExprError(Diag(Loc: TypeidLoc, DiagID: diag::err_uuidof_without_guid));
748 if (UuidAttrs.size() > 1)
749 return ExprError(Diag(Loc: TypeidLoc, DiagID: diag::err_uuidof_with_multiple_guids));
750 Guid = UuidAttrs.back()->getGuidDecl();
751 }
752
753 return new (Context)
754 CXXUuidofExpr(Type, Operand, Guid, SourceRange(TypeidLoc, RParenLoc));
755}
756
757ExprResult Sema::BuildCXXUuidof(QualType Type, SourceLocation TypeidLoc,
758 Expr *E, SourceLocation RParenLoc) {
759 MSGuidDecl *Guid = nullptr;
760 if (!E->getType()->isDependentType()) {
761 if (E->isNullPointerConstant(Ctx&: Context, NPC: Expr::NPC_ValueDependentIsNull)) {
762 // A null pointer results in {00000000-0000-0000-0000-000000000000}.
763 Guid = Context.getMSGuidDecl(Parts: MSGuidDecl::Parts{});
764 } else {
765 llvm::SmallSetVector<const UuidAttr *, 1> UuidAttrs;
766 getUuidAttrOfType(SemaRef&: *this, QT: E->getType(), UuidAttrs);
767 if (UuidAttrs.empty())
768 return ExprError(Diag(Loc: TypeidLoc, DiagID: diag::err_uuidof_without_guid));
769 if (UuidAttrs.size() > 1)
770 return ExprError(Diag(Loc: TypeidLoc, DiagID: diag::err_uuidof_with_multiple_guids));
771 Guid = UuidAttrs.back()->getGuidDecl();
772 }
773 }
774
775 return new (Context)
776 CXXUuidofExpr(Type, E, Guid, SourceRange(TypeidLoc, RParenLoc));
777}
778
779/// ActOnCXXUuidof - Parse __uuidof( type-id ) or __uuidof (expression);
780ExprResult
781Sema::ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc,
782 bool isType, void *TyOrExpr, SourceLocation RParenLoc) {
783 QualType GuidType = Context.getMSGuidType();
784 GuidType.addConst();
785
786 if (isType) {
787 // The operand is a type; handle it as such.
788 TypeSourceInfo *TInfo = nullptr;
789 QualType T = GetTypeFromParser(Ty: ParsedType::getFromOpaquePtr(P: TyOrExpr),
790 TInfo: &TInfo);
791 if (T.isNull())
792 return ExprError();
793
794 if (!TInfo)
795 TInfo = Context.getTrivialTypeSourceInfo(T, Loc: OpLoc);
796
797 return BuildCXXUuidof(Type: GuidType, TypeidLoc: OpLoc, Operand: TInfo, RParenLoc);
798 }
799
800 // The operand is an expression.
801 return BuildCXXUuidof(Type: GuidType, TypeidLoc: OpLoc, E: (Expr*)TyOrExpr, RParenLoc);
802}
803
804ExprResult
805Sema::ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind) {
806 assert((Kind == tok::kw_true || Kind == tok::kw_false) &&
807 "Unknown C++ Boolean value!");
808 return new (Context)
809 CXXBoolLiteralExpr(Kind == tok::kw_true, Context.BoolTy, OpLoc);
810}
811
812ExprResult
813Sema::ActOnCXXNullPtrLiteral(SourceLocation Loc) {
814 return new (Context) CXXNullPtrLiteralExpr(Context.NullPtrTy, Loc);
815}
816
817ExprResult
818Sema::ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *Ex) {
819 bool IsThrownVarInScope = false;
820 if (Ex) {
821 // C++0x [class.copymove]p31:
822 // When certain criteria are met, an implementation is allowed to omit the
823 // copy/move construction of a class object [...]
824 //
825 // - in a throw-expression, when the operand is the name of a
826 // non-volatile automatic object (other than a function or catch-
827 // clause parameter) whose scope does not extend beyond the end of the
828 // innermost enclosing try-block (if there is one), the copy/move
829 // operation from the operand to the exception object (15.1) can be
830 // omitted by constructing the automatic object directly into the
831 // exception object
832 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: Ex->IgnoreParens()))
833 if (const auto *Var = dyn_cast<VarDecl>(Val: DRE->getDecl());
834 Var && Var->hasLocalStorage() &&
835 !Var->getType().isVolatileQualified()) {
836 for (; S; S = S->getParent()) {
837 if (S->isDeclScope(D: Var)) {
838 IsThrownVarInScope = true;
839 break;
840 }
841
842 // FIXME: Many of the scope checks here seem incorrect.
843 if (S->getFlags() &
844 (Scope::FnScope | Scope::ClassScope | Scope::BlockScope |
845 Scope::ObjCMethodScope | Scope::TryScope))
846 break;
847 }
848 }
849 }
850
851 return BuildCXXThrow(OpLoc, Ex, IsThrownVarInScope);
852}
853
854ExprResult Sema::BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
855 bool IsThrownVarInScope) {
856 const llvm::Triple &T = Context.getTargetInfo().getTriple();
857 const bool IsOpenMPGPUTarget =
858 getLangOpts().OpenMPIsTargetDevice && T.isGPU();
859
860 DiagnoseExceptionUse(Loc: OpLoc, /* IsTry= */ false);
861
862 // In OpenMP target regions, we replace 'throw' with a trap on GPU targets.
863 if (IsOpenMPGPUTarget)
864 targetDiag(Loc: OpLoc, DiagID: diag::warn_throw_not_valid_on_target) << T.str();
865
866 // Exceptions aren't allowed in CUDA device code.
867 if (getLangOpts().CUDA)
868 CUDA().DiagIfDeviceCode(Loc: OpLoc, DiagID: diag::err_cuda_device_exceptions)
869 << "throw" << CUDA().CurrentTarget();
870
871 if (getCurScope() && getCurScope()->isOpenMPSimdDirectiveScope())
872 Diag(Loc: OpLoc, DiagID: diag::err_omp_simd_region_cannot_use_stmt) << "throw";
873
874 // Exceptions that escape a compute construct are ill-formed.
875 if (getLangOpts().OpenACC && getCurScope() &&
876 getCurScope()->isInOpenACCComputeConstructScope(Flags: Scope::TryScope))
877 Diag(Loc: OpLoc, DiagID: diag::err_acc_branch_in_out_compute_construct)
878 << /*throw*/ 2 << /*out of*/ 0;
879
880 if (Ex && !Ex->isTypeDependent()) {
881 // Initialize the exception result. This implicitly weeds out
882 // abstract types or types with inaccessible copy constructors.
883
884 // C++0x [class.copymove]p31:
885 // When certain criteria are met, an implementation is allowed to omit the
886 // copy/move construction of a class object [...]
887 //
888 // - in a throw-expression, when the operand is the name of a
889 // non-volatile automatic object (other than a function or
890 // catch-clause
891 // parameter) whose scope does not extend beyond the end of the
892 // innermost enclosing try-block (if there is one), the copy/move
893 // operation from the operand to the exception object (15.1) can be
894 // omitted by constructing the automatic object directly into the
895 // exception object
896 NamedReturnInfo NRInfo =
897 IsThrownVarInScope ? getNamedReturnInfo(E&: Ex) : NamedReturnInfo();
898
899 QualType ExceptionObjectTy = Context.getExceptionObjectType(T: Ex->getType());
900 if (CheckCXXThrowOperand(ThrowLoc: OpLoc, ThrowTy: ExceptionObjectTy, E: Ex))
901 return ExprError();
902
903 InitializedEntity Entity =
904 InitializedEntity::InitializeException(ThrowLoc: OpLoc, Type: ExceptionObjectTy);
905 ExprResult Res = PerformMoveOrCopyInitialization(Entity, NRInfo, Value: Ex);
906 if (Res.isInvalid())
907 return ExprError();
908 Ex = Res.get();
909 }
910
911 // PPC MMA non-pointer types are not allowed as throw expr types.
912 if (Ex && Context.getTargetInfo().getTriple().isPPC64())
913 PPC().CheckPPCMMAType(Type: Ex->getType(), TypeLoc: Ex->getBeginLoc());
914
915 return new (Context)
916 CXXThrowExpr(Ex, Context.VoidTy, OpLoc, IsThrownVarInScope);
917}
918
919static void
920collectPublicBases(CXXRecordDecl *RD,
921 llvm::DenseMap<CXXRecordDecl *, unsigned> &SubobjectsSeen,
922 llvm::SmallPtrSetImpl<CXXRecordDecl *> &VBases,
923 llvm::SetVector<CXXRecordDecl *> &PublicSubobjectsSeen,
924 bool ParentIsPublic) {
925 for (const CXXBaseSpecifier &BS : RD->bases()) {
926 CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl();
927 bool NewSubobject;
928 // Virtual bases constitute the same subobject. Non-virtual bases are
929 // always distinct subobjects.
930 if (BS.isVirtual())
931 NewSubobject = VBases.insert(Ptr: BaseDecl).second;
932 else
933 NewSubobject = true;
934
935 if (NewSubobject)
936 ++SubobjectsSeen[BaseDecl];
937
938 // Only add subobjects which have public access throughout the entire chain.
939 bool PublicPath = ParentIsPublic && BS.getAccessSpecifier() == AS_public;
940 if (PublicPath)
941 PublicSubobjectsSeen.insert(X: BaseDecl);
942
943 // Recurse on to each base subobject.
944 collectPublicBases(RD: BaseDecl, SubobjectsSeen, VBases, PublicSubobjectsSeen,
945 ParentIsPublic: PublicPath);
946 }
947}
948
949static void getUnambiguousPublicSubobjects(
950 CXXRecordDecl *RD, llvm::SmallVectorImpl<CXXRecordDecl *> &Objects) {
951 llvm::DenseMap<CXXRecordDecl *, unsigned> SubobjectsSeen;
952 llvm::SmallPtrSet<CXXRecordDecl *, 2> VBases;
953 llvm::SetVector<CXXRecordDecl *> PublicSubobjectsSeen;
954 SubobjectsSeen[RD] = 1;
955 PublicSubobjectsSeen.insert(X: RD);
956 collectPublicBases(RD, SubobjectsSeen, VBases, PublicSubobjectsSeen,
957 /*ParentIsPublic=*/true);
958
959 for (CXXRecordDecl *PublicSubobject : PublicSubobjectsSeen) {
960 // Skip ambiguous objects.
961 if (SubobjectsSeen[PublicSubobject] > 1)
962 continue;
963
964 Objects.push_back(Elt: PublicSubobject);
965 }
966}
967
968bool Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc,
969 QualType ExceptionObjectTy, Expr *E) {
970 // If the type of the exception would be an incomplete type or a pointer
971 // to an incomplete type other than (cv) void the program is ill-formed.
972 QualType Ty = ExceptionObjectTy;
973 bool isPointer = false;
974 if (const PointerType* Ptr = Ty->getAs<PointerType>()) {
975 Ty = Ptr->getPointeeType();
976 isPointer = true;
977 }
978
979 // Cannot throw WebAssembly reference type.
980 if (Ty.isWebAssemblyReferenceType()) {
981 Diag(Loc: ThrowLoc, DiagID: diag::err_wasm_reftype_tc) << 0 << E->getSourceRange();
982 return true;
983 }
984
985 // Cannot throw WebAssembly table.
986 if (isPointer && Ty.isWebAssemblyReferenceType()) {
987 Diag(Loc: ThrowLoc, DiagID: diag::err_wasm_table_art) << 2 << E->getSourceRange();
988 return true;
989 }
990
991 if (!isPointer || !Ty->isVoidType()) {
992 if (RequireCompleteType(Loc: ThrowLoc, T: Ty,
993 DiagID: isPointer ? diag::err_throw_incomplete_ptr
994 : diag::err_throw_incomplete,
995 Args: E->getSourceRange()))
996 return true;
997
998 if (!isPointer && Ty->isSizelessType()) {
999 Diag(Loc: ThrowLoc, DiagID: diag::err_throw_sizeless) << Ty << E->getSourceRange();
1000 return true;
1001 }
1002
1003 if (RequireNonAbstractType(Loc: ThrowLoc, T: ExceptionObjectTy,
1004 DiagID: diag::err_throw_abstract_type, Args: E))
1005 return true;
1006 }
1007
1008 // If the exception has class type, we need additional handling.
1009 CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
1010 if (!RD)
1011 return false;
1012
1013 // If we are throwing a polymorphic class type or pointer thereof,
1014 // exception handling will make use of the vtable.
1015 MarkVTableUsed(Loc: ThrowLoc, Class: RD);
1016
1017 // If a pointer is thrown, the referenced object will not be destroyed.
1018 if (isPointer)
1019 return false;
1020
1021 // If the class has a destructor, we must be able to call it.
1022 if (!RD->hasIrrelevantDestructor()) {
1023 if (CXXDestructorDecl *Destructor = LookupDestructor(Class: RD)) {
1024 MarkFunctionReferenced(Loc: E->getExprLoc(), Func: Destructor);
1025 CheckDestructorAccess(Loc: E->getExprLoc(), Dtor: Destructor,
1026 PDiag: PDiag(DiagID: diag::err_access_dtor_exception) << Ty);
1027 if (DiagnoseUseOfDecl(D: Destructor, Locs: E->getExprLoc()))
1028 return true;
1029 }
1030 }
1031
1032 // The MSVC ABI creates a list of all types which can catch the exception
1033 // object. This list also references the appropriate copy constructor to call
1034 // if the object is caught by value and has a non-trivial copy constructor.
1035 if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
1036 // We are only interested in the public, unambiguous bases contained within
1037 // the exception object. Bases which are ambiguous or otherwise
1038 // inaccessible are not catchable types.
1039 llvm::SmallVector<CXXRecordDecl *, 2> UnambiguousPublicSubobjects;
1040 getUnambiguousPublicSubobjects(RD, Objects&: UnambiguousPublicSubobjects);
1041
1042 for (CXXRecordDecl *Subobject : UnambiguousPublicSubobjects) {
1043 // Attempt to lookup the copy constructor. Various pieces of machinery
1044 // will spring into action, like template instantiation, which means this
1045 // cannot be a simple walk of the class's decls. Instead, we must perform
1046 // lookup and overload resolution.
1047 CXXConstructorDecl *CD = LookupCopyingConstructor(Class: Subobject, Quals: 0);
1048 if (!CD || CD->isDeleted())
1049 continue;
1050
1051 // Mark the constructor referenced as it is used by this throw expression.
1052 MarkFunctionReferenced(Loc: E->getExprLoc(), Func: CD);
1053
1054 // Skip this copy constructor if it is trivial, we don't need to record it
1055 // in the catchable type data.
1056 if (CD->isTrivial())
1057 continue;
1058
1059 // The copy constructor is non-trivial, create a mapping from this class
1060 // type to this constructor.
1061 // N.B. The selection of copy constructor is not sensitive to this
1062 // particular throw-site. Lookup will be performed at the catch-site to
1063 // ensure that the copy constructor is, in fact, accessible (via
1064 // friendship or any other means).
1065 Context.addCopyConstructorForExceptionObject(RD: Subobject, CD);
1066
1067 // We don't keep the instantiated default argument expressions around so
1068 // we must rebuild them here.
1069 for (unsigned I = 1, E = CD->getNumParams(); I != E; ++I) {
1070 if (CheckCXXDefaultArgExpr(CallLoc: ThrowLoc, FD: CD, Param: CD->getParamDecl(i: I)))
1071 return true;
1072 }
1073 }
1074 }
1075
1076 // Under the Itanium C++ ABI, memory for the exception object is allocated by
1077 // the runtime with no ability for the compiler to request additional
1078 // alignment. Warn if the exception type requires alignment beyond the minimum
1079 // guaranteed by the target C++ runtime.
1080 if (Context.getTargetInfo().getCXXABI().isItaniumFamily()) {
1081 CharUnits TypeAlign = Context.getTypeAlignInChars(T: Ty);
1082 CharUnits ExnObjAlign = Context.getExnObjectAlignment();
1083 if (ExnObjAlign < TypeAlign) {
1084 Diag(Loc: ThrowLoc, DiagID: diag::warn_throw_underaligned_obj);
1085 Diag(Loc: ThrowLoc, DiagID: diag::note_throw_underaligned_obj)
1086 << Ty << (unsigned)TypeAlign.getQuantity()
1087 << (unsigned)ExnObjAlign.getQuantity();
1088 }
1089 }
1090 if (!isPointer && getLangOpts().AssumeNothrowExceptionDtor) {
1091 if (CXXDestructorDecl *Dtor = RD->getDestructor()) {
1092 auto Ty = Dtor->getType();
1093 if (auto *FT = Ty.getTypePtr()->getAs<FunctionProtoType>()) {
1094 if (!isUnresolvedExceptionSpec(ESpecType: FT->getExceptionSpecType()) &&
1095 !FT->isNothrow())
1096 Diag(Loc: ThrowLoc, DiagID: diag::err_throw_object_throwing_dtor) << RD;
1097 }
1098 }
1099 }
1100
1101 return false;
1102}
1103
1104static QualType adjustCVQualifiersForCXXThisWithinLambda(
1105 ArrayRef<FunctionScopeInfo *> FunctionScopes, QualType ThisTy,
1106 DeclContext *CurSemaContext, ASTContext &ASTCtx) {
1107
1108 QualType ClassType = ThisTy->getPointeeType();
1109 LambdaScopeInfo *CurLSI = nullptr;
1110 DeclContext *CurDC = CurSemaContext;
1111
1112 // Iterate through the stack of lambdas starting from the innermost lambda to
1113 // the outermost lambda, checking if '*this' is ever captured by copy - since
1114 // that could change the cv-qualifiers of the '*this' object.
1115 // The object referred to by '*this' starts out with the cv-qualifiers of its
1116 // member function. We then start with the innermost lambda and iterate
1117 // outward checking to see if any lambda performs a by-copy capture of '*this'
1118 // - and if so, any nested lambda must respect the 'constness' of that
1119 // capturing lamdbda's call operator.
1120 //
1121
1122 // Since the FunctionScopeInfo stack is representative of the lexical
1123 // nesting of the lambda expressions during initial parsing (and is the best
1124 // place for querying information about captures about lambdas that are
1125 // partially processed) and perhaps during instantiation of function templates
1126 // that contain lambda expressions that need to be transformed BUT not
1127 // necessarily during instantiation of a nested generic lambda's function call
1128 // operator (which might even be instantiated at the end of the TU) - at which
1129 // time the DeclContext tree is mature enough to query capture information
1130 // reliably - we use a two pronged approach to walk through all the lexically
1131 // enclosing lambda expressions:
1132 //
1133 // 1) Climb down the FunctionScopeInfo stack as long as each item represents
1134 // a Lambda (i.e. LambdaScopeInfo) AND each LSI's 'closure-type' is lexically
1135 // enclosed by the call-operator of the LSI below it on the stack (while
1136 // tracking the enclosing DC for step 2 if needed). Note the topmost LSI on
1137 // the stack represents the innermost lambda.
1138 //
1139 // 2) If we run out of enclosing LSI's, check if the enclosing DeclContext
1140 // represents a lambda's call operator. If it does, we must be instantiating
1141 // a generic lambda's call operator (represented by the Current LSI, and
1142 // should be the only scenario where an inconsistency between the LSI and the
1143 // DeclContext should occur), so climb out the DeclContexts if they
1144 // represent lambdas, while querying the corresponding closure types
1145 // regarding capture information.
1146
1147 // 1) Climb down the function scope info stack.
1148 for (int I = FunctionScopes.size();
1149 I-- && isa<LambdaScopeInfo>(Val: FunctionScopes[I]) &&
1150 (!CurLSI || !CurLSI->Lambda || CurLSI->Lambda->getDeclContext() ==
1151 cast<LambdaScopeInfo>(Val: FunctionScopes[I])->CallOperator);
1152 CurDC = getLambdaAwareParentOfDeclContext(DC: CurDC)) {
1153 CurLSI = cast<LambdaScopeInfo>(Val: FunctionScopes[I]);
1154
1155 if (!CurLSI->isCXXThisCaptured())
1156 continue;
1157
1158 auto C = CurLSI->getCXXThisCapture();
1159
1160 if (C.isCopyCapture()) {
1161 if (CurLSI->lambdaCaptureShouldBeConst())
1162 ClassType.addConst();
1163 return ASTCtx.getPointerType(T: ClassType);
1164 }
1165 }
1166
1167 // 2) We've run out of ScopeInfos but check 1. if CurDC is a lambda (which
1168 // can happen during instantiation of its nested generic lambda call
1169 // operator); 2. if we're in a lambda scope (lambda body).
1170 if (CurLSI && isLambdaCallOperator(DC: CurDC)) {
1171 assert(isGenericLambdaCallOperatorSpecialization(CurLSI->CallOperator) &&
1172 "While computing 'this' capture-type for a generic lambda, when we "
1173 "run out of enclosing LSI's, yet the enclosing DC is a "
1174 "lambda-call-operator we must be (i.e. Current LSI) in a generic "
1175 "lambda call oeprator");
1176 assert(CurDC == getLambdaAwareParentOfDeclContext(CurLSI->CallOperator));
1177
1178 auto IsThisCaptured =
1179 [](CXXRecordDecl *Closure, bool &IsByCopy, bool &IsConst) {
1180 IsConst = false;
1181 IsByCopy = false;
1182 for (auto &&C : Closure->captures()) {
1183 if (C.capturesThis()) {
1184 if (C.getCaptureKind() == LCK_StarThis)
1185 IsByCopy = true;
1186 if (Closure->getLambdaCallOperator()->isConst())
1187 IsConst = true;
1188 return true;
1189 }
1190 }
1191 return false;
1192 };
1193
1194 bool IsByCopyCapture = false;
1195 bool IsConstCapture = false;
1196 CXXRecordDecl *Closure = cast<CXXRecordDecl>(Val: CurDC->getParent());
1197 while (Closure &&
1198 IsThisCaptured(Closure, IsByCopyCapture, IsConstCapture)) {
1199 if (IsByCopyCapture) {
1200 if (IsConstCapture)
1201 ClassType.addConst();
1202 return ASTCtx.getPointerType(T: ClassType);
1203 }
1204 Closure = isLambdaCallOperator(DC: Closure->getParent())
1205 ? cast<CXXRecordDecl>(Val: Closure->getParent()->getParent())
1206 : nullptr;
1207 }
1208 }
1209 return ThisTy;
1210}
1211
1212QualType Sema::getCurrentThisType() {
1213 DeclContext *DC = getFunctionLevelDeclContext();
1214 QualType ThisTy = CXXThisTypeOverride;
1215
1216 if (CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(Val: DC)) {
1217 if (method && method->isImplicitObjectMemberFunction())
1218 ThisTy = method->getThisType().getNonReferenceType();
1219 }
1220
1221 if (ThisTy.isNull() && isLambdaCallWithImplicitObjectParameter(DC: CurContext) &&
1222 inTemplateInstantiation() && isa<CXXRecordDecl>(Val: DC)) {
1223
1224 // This is a lambda call operator that is being instantiated as a default
1225 // initializer. DC must point to the enclosing class type, so we can recover
1226 // the 'this' type from it.
1227 CanQualType ClassTy = Context.getCanonicalTagType(TD: cast<CXXRecordDecl>(Val: DC));
1228 // There are no cv-qualifiers for 'this' within default initializers,
1229 // per [expr.prim.general]p4.
1230 ThisTy = Context.getPointerType(T: ClassTy);
1231 }
1232
1233 // If we are within a lambda's call operator, the cv-qualifiers of 'this'
1234 // might need to be adjusted if the lambda or any of its enclosing lambda's
1235 // captures '*this' by copy.
1236 if (!ThisTy.isNull() && isLambdaCallOperator(DC: CurContext))
1237 return adjustCVQualifiersForCXXThisWithinLambda(FunctionScopes, ThisTy,
1238 CurSemaContext: CurContext, ASTCtx&: Context);
1239 return ThisTy;
1240}
1241
1242Sema::CXXThisScopeRAII::CXXThisScopeRAII(Sema &S,
1243 Decl *ContextDecl,
1244 Qualifiers CXXThisTypeQuals,
1245 bool Enabled)
1246 : S(S), OldCXXThisTypeOverride(S.CXXThisTypeOverride), Enabled(false)
1247{
1248 if (!Enabled || !ContextDecl)
1249 return;
1250
1251 CXXRecordDecl *Record = nullptr;
1252 if (ClassTemplateDecl *Template = dyn_cast<ClassTemplateDecl>(Val: ContextDecl))
1253 Record = Template->getTemplatedDecl();
1254 else
1255 Record = cast<CXXRecordDecl>(Val: ContextDecl);
1256
1257 // 'this' never refers to the lambda class itself.
1258 if (Record->isLambda())
1259 return;
1260
1261 QualType T = S.Context.getCanonicalTagType(TD: Record);
1262 T = S.getASTContext().getQualifiedType(T, Qs: CXXThisTypeQuals);
1263
1264 S.CXXThisTypeOverride =
1265 S.Context.getLangOpts().HLSL ? T : S.Context.getPointerType(T);
1266
1267 this->Enabled = true;
1268}
1269
1270
1271Sema::CXXThisScopeRAII::~CXXThisScopeRAII() {
1272 if (Enabled) {
1273 S.CXXThisTypeOverride = OldCXXThisTypeOverride;
1274 }
1275}
1276
1277static void buildLambdaThisCaptureFixit(Sema &Sema, LambdaScopeInfo *LSI) {
1278 SourceLocation DiagLoc = LSI->IntroducerRange.getEnd();
1279 assert(!LSI->isCXXThisCaptured());
1280 // [=, this] {}; // until C++20: Error: this when = is the default
1281 if (LSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByval &&
1282 !Sema.getLangOpts().CPlusPlus20)
1283 return;
1284 Sema.Diag(Loc: DiagLoc, DiagID: diag::note_lambda_this_capture_fixit)
1285 << FixItHint::CreateInsertion(
1286 InsertionLoc: DiagLoc, Code: LSI->NumExplicitCaptures > 0 ? ", this" : "this");
1287}
1288
1289bool Sema::CheckCXXThisCapture(SourceLocation Loc, const bool Explicit,
1290 bool BuildAndDiagnose, const unsigned *const FunctionScopeIndexToStopAt,
1291 const bool ByCopy) {
1292 // We don't need to capture this in an unevaluated context.
1293 if (isUnevaluatedContext() && !Explicit)
1294 return true;
1295
1296 assert((!ByCopy || Explicit) && "cannot implicitly capture *this by value");
1297
1298 const int MaxFunctionScopesIndex = FunctionScopeIndexToStopAt
1299 ? *FunctionScopeIndexToStopAt
1300 : FunctionScopes.size() - 1;
1301
1302 // Check that we can capture the *enclosing object* (referred to by '*this')
1303 // by the capturing-entity/closure (lambda/block/etc) at
1304 // MaxFunctionScopesIndex-deep on the FunctionScopes stack.
1305
1306 // Note: The *enclosing object* can only be captured by-value by a
1307 // closure that is a lambda, using the explicit notation:
1308 // [*this] { ... }.
1309 // Every other capture of the *enclosing object* results in its by-reference
1310 // capture.
1311
1312 // For a closure 'L' (at MaxFunctionScopesIndex in the FunctionScopes
1313 // stack), we can capture the *enclosing object* only if:
1314 // - 'L' has an explicit byref or byval capture of the *enclosing object*
1315 // - or, 'L' has an implicit capture.
1316 // AND
1317 // -- there is no enclosing closure
1318 // -- or, there is some enclosing closure 'E' that has already captured the
1319 // *enclosing object*, and every intervening closure (if any) between 'E'
1320 // and 'L' can implicitly capture the *enclosing object*.
1321 // -- or, every enclosing closure can implicitly capture the
1322 // *enclosing object*
1323
1324
1325 unsigned NumCapturingClosures = 0;
1326 for (int idx = MaxFunctionScopesIndex; idx >= 0; idx--) {
1327 if (CapturingScopeInfo *CSI =
1328 dyn_cast<CapturingScopeInfo>(Val: FunctionScopes[idx])) {
1329 if (CSI->CXXThisCaptureIndex != 0) {
1330 // 'this' is already being captured; there isn't anything more to do.
1331 CSI->Captures[CSI->CXXThisCaptureIndex - 1].markUsed(IsODRUse: BuildAndDiagnose);
1332 break;
1333 }
1334 LambdaScopeInfo *LSI = dyn_cast<LambdaScopeInfo>(Val: CSI);
1335 if (LSI && isGenericLambdaCallOperatorSpecialization(MD: LSI->CallOperator)) {
1336 // This context can't implicitly capture 'this'; fail out.
1337 if (BuildAndDiagnose) {
1338 LSI->CallOperator->setInvalidDecl();
1339 Diag(Loc, DiagID: diag::err_this_capture)
1340 << (Explicit && idx == MaxFunctionScopesIndex);
1341 if (!Explicit)
1342 buildLambdaThisCaptureFixit(Sema&: *this, LSI);
1343 }
1344 return true;
1345 }
1346 if (CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByref ||
1347 CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByval ||
1348 CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_Block ||
1349 CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_CapturedRegion ||
1350 (Explicit && idx == MaxFunctionScopesIndex)) {
1351 // Regarding (Explicit && idx == MaxFunctionScopesIndex): only the first
1352 // iteration through can be an explicit capture, all enclosing closures,
1353 // if any, must perform implicit captures.
1354
1355 // This closure can capture 'this'; continue looking upwards.
1356 NumCapturingClosures++;
1357 continue;
1358 }
1359 // This context can't implicitly capture 'this'; fail out.
1360 if (BuildAndDiagnose) {
1361 LSI->CallOperator->setInvalidDecl();
1362 Diag(Loc, DiagID: diag::err_this_capture)
1363 << (Explicit && idx == MaxFunctionScopesIndex);
1364 }
1365 if (!Explicit)
1366 buildLambdaThisCaptureFixit(Sema&: *this, LSI);
1367 return true;
1368 }
1369 break;
1370 }
1371 if (!BuildAndDiagnose) return false;
1372
1373 // If we got here, then the closure at MaxFunctionScopesIndex on the
1374 // FunctionScopes stack, can capture the *enclosing object*, so capture it
1375 // (including implicit by-reference captures in any enclosing closures).
1376
1377 // In the loop below, respect the ByCopy flag only for the closure requesting
1378 // the capture (i.e. first iteration through the loop below). Ignore it for
1379 // all enclosing closure's up to NumCapturingClosures (since they must be
1380 // implicitly capturing the *enclosing object* by reference (see loop
1381 // above)).
1382 assert((!ByCopy ||
1383 isa<LambdaScopeInfo>(FunctionScopes[MaxFunctionScopesIndex])) &&
1384 "Only a lambda can capture the enclosing object (referred to by "
1385 "*this) by copy");
1386 QualType ThisTy = getCurrentThisType();
1387 for (int idx = MaxFunctionScopesIndex; NumCapturingClosures;
1388 --idx, --NumCapturingClosures) {
1389 CapturingScopeInfo *CSI = cast<CapturingScopeInfo>(Val: FunctionScopes[idx]);
1390
1391 // The type of the corresponding data member (not a 'this' pointer if 'by
1392 // copy').
1393 QualType CaptureType = ByCopy ? ThisTy->getPointeeType() : ThisTy;
1394
1395 bool isNested = NumCapturingClosures > 1;
1396 CSI->addThisCapture(isNested, Loc, CaptureType, ByCopy);
1397 }
1398 return false;
1399}
1400
1401ExprResult Sema::ActOnCXXThis(SourceLocation Loc) {
1402 // C++20 [expr.prim.this]p1:
1403 // The keyword this names a pointer to the object for which an
1404 // implicit object member function is invoked or a non-static
1405 // data member's initializer is evaluated.
1406 QualType ThisTy = getCurrentThisType();
1407
1408 if (CheckCXXThisType(Loc, Type: ThisTy))
1409 return ExprError();
1410
1411 return BuildCXXThisExpr(Loc, Type: ThisTy, /*IsImplicit=*/false);
1412}
1413
1414bool Sema::CheckCXXThisType(SourceLocation Loc, QualType Type) {
1415 if (!Type.isNull())
1416 return false;
1417
1418 // C++20 [expr.prim.this]p3:
1419 // If a declaration declares a member function or member function template
1420 // of a class X, the expression this is a prvalue of type
1421 // "pointer to cv-qualifier-seq X" wherever X is the current class between
1422 // the optional cv-qualifier-seq and the end of the function-definition,
1423 // member-declarator, or declarator. It shall not appear within the
1424 // declaration of either a static member function or an explicit object
1425 // member function of the current class (although its type and value
1426 // category are defined within such member functions as they are within
1427 // an implicit object member function).
1428 DeclContext *DC = getFunctionLevelDeclContext();
1429 const auto *Method = dyn_cast<CXXMethodDecl>(Val: DC);
1430 if (Method && Method->isExplicitObjectMemberFunction()) {
1431 Diag(Loc, DiagID: diag::err_invalid_this_use) << 1;
1432 } else if (Method && isLambdaCallWithExplicitObjectParameter(DC: CurContext)) {
1433 Diag(Loc, DiagID: diag::err_invalid_this_use) << 1;
1434 } else {
1435 Diag(Loc, DiagID: diag::err_invalid_this_use) << 0;
1436 }
1437 return true;
1438}
1439
1440Expr *Sema::BuildCXXThisExpr(SourceLocation Loc, QualType Type,
1441 bool IsImplicit) {
1442 auto *This = CXXThisExpr::Create(Ctx: Context, L: Loc, Ty: Type, IsImplicit);
1443 MarkThisReferenced(This);
1444 return This;
1445}
1446
1447void Sema::MarkThisReferenced(CXXThisExpr *This) {
1448 CheckCXXThisCapture(Loc: This->getExprLoc());
1449 if (This->isTypeDependent())
1450 return;
1451
1452 // Check if 'this' is captured by value in a lambda with a dependent explicit
1453 // object parameter, and mark it as type-dependent as well if so.
1454 auto IsDependent = [&]() {
1455 for (auto *Scope : llvm::reverse(C&: FunctionScopes)) {
1456 auto *LSI = dyn_cast<sema::LambdaScopeInfo>(Val: Scope);
1457 if (!LSI)
1458 continue;
1459
1460 if (LSI->Lambda && !LSI->Lambda->Encloses(DC: CurContext) &&
1461 LSI->AfterParameterList)
1462 return false;
1463
1464 // If this lambda captures 'this' by value, then 'this' is dependent iff
1465 // this lambda has a dependent explicit object parameter. If we can't
1466 // determine whether it does (e.g. because the CXXMethodDecl's type is
1467 // null), assume it doesn't.
1468 if (LSI->isCXXThisCaptured()) {
1469 if (!LSI->getCXXThisCapture().isCopyCapture())
1470 continue;
1471
1472 const auto *MD = LSI->CallOperator;
1473 if (MD->getType().isNull())
1474 return false;
1475
1476 const auto *Ty = MD->getType()->getAs<FunctionProtoType>();
1477 return Ty && MD->isExplicitObjectMemberFunction() &&
1478 Ty->getParamType(i: 0)->isDependentType();
1479 }
1480 }
1481 return false;
1482 }();
1483
1484 This->setCapturedByCopyInLambdaWithExplicitObjectParameter(IsDependent);
1485}
1486
1487bool Sema::isThisOutsideMemberFunctionBody(QualType BaseType) {
1488 // If we're outside the body of a member function, then we'll have a specified
1489 // type for 'this'.
1490 if (CXXThisTypeOverride.isNull())
1491 return false;
1492
1493 // Determine whether we're looking into a class that's currently being
1494 // defined.
1495 CXXRecordDecl *Class = BaseType->getAsCXXRecordDecl();
1496 return Class && Class->isBeingDefined();
1497}
1498
1499ExprResult
1500Sema::ActOnCXXTypeConstructExpr(ParsedType TypeRep,
1501 SourceLocation LParenOrBraceLoc,
1502 MultiExprArg exprs,
1503 SourceLocation RParenOrBraceLoc,
1504 bool ListInitialization) {
1505 if (!TypeRep)
1506 return ExprError();
1507
1508 TypeSourceInfo *TInfo;
1509 QualType Ty = GetTypeFromParser(Ty: TypeRep, TInfo: &TInfo);
1510 if (!TInfo)
1511 TInfo = Context.getTrivialTypeSourceInfo(T: Ty, Loc: SourceLocation());
1512
1513 auto Result = BuildCXXTypeConstructExpr(Type: TInfo, LParenLoc: LParenOrBraceLoc, Exprs: exprs,
1514 RParenLoc: RParenOrBraceLoc, ListInitialization);
1515 if (Result.isInvalid())
1516 Result = CreateRecoveryExpr(Begin: TInfo->getTypeLoc().getBeginLoc(),
1517 End: RParenOrBraceLoc, SubExprs: exprs, T: Ty);
1518 return Result;
1519}
1520
1521ExprResult
1522Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
1523 SourceLocation LParenOrBraceLoc,
1524 MultiExprArg Exprs,
1525 SourceLocation RParenOrBraceLoc,
1526 bool ListInitialization) {
1527 QualType Ty = TInfo->getType();
1528 SourceLocation TyBeginLoc = TInfo->getTypeLoc().getBeginLoc();
1529 SourceRange FullRange = SourceRange(TyBeginLoc, RParenOrBraceLoc);
1530
1531 InitializedEntity Entity =
1532 InitializedEntity::InitializeTemporary(Context, TypeInfo: TInfo);
1533 InitializationKind Kind =
1534 Exprs.size()
1535 ? ListInitialization
1536 ? InitializationKind::CreateDirectList(
1537 InitLoc: TyBeginLoc, LBraceLoc: LParenOrBraceLoc, RBraceLoc: RParenOrBraceLoc)
1538 : InitializationKind::CreateDirect(InitLoc: TyBeginLoc, LParenLoc: LParenOrBraceLoc,
1539 RParenLoc: RParenOrBraceLoc)
1540 : InitializationKind::CreateValue(InitLoc: TyBeginLoc, LParenLoc: LParenOrBraceLoc,
1541 RParenLoc: RParenOrBraceLoc);
1542
1543 // C++17 [expr.type.conv]p1:
1544 // If the type is a placeholder for a deduced class type, [...perform class
1545 // template argument deduction...]
1546 // C++23:
1547 // Otherwise, if the type contains a placeholder type, it is replaced by the
1548 // type determined by placeholder type deduction.
1549 DeducedType *Deduced = Ty->getContainedDeducedType();
1550 if (Deduced && !Deduced->isDeduced() &&
1551 isa<DeducedTemplateSpecializationType>(Val: Deduced)) {
1552 Ty = DeduceTemplateSpecializationFromInitializer(TInfo, Entity,
1553 Kind, Init: Exprs);
1554 if (Ty.isNull())
1555 return ExprError();
1556 Entity = InitializedEntity::InitializeTemporary(TypeInfo: TInfo, Type: Ty);
1557 } else if (Deduced && !Deduced->isDeduced()) {
1558 MultiExprArg Inits = Exprs;
1559 if (ListInitialization) {
1560 auto *ILE = cast<InitListExpr>(Val: Exprs[0]);
1561 Inits = MultiExprArg(ILE->getInits(), ILE->getNumInits());
1562 }
1563
1564 if (Inits.empty())
1565 return ExprError(Diag(Loc: TyBeginLoc, DiagID: diag::err_auto_expr_init_no_expression)
1566 << Ty << FullRange);
1567 if (Inits.size() > 1) {
1568 Expr *FirstBad = Inits[1];
1569 return ExprError(Diag(Loc: FirstBad->getBeginLoc(),
1570 DiagID: diag::err_auto_expr_init_multiple_expressions)
1571 << Ty << FullRange);
1572 }
1573 if (getLangOpts().CPlusPlus23) {
1574 if (Ty->getAs<AutoType>())
1575 Diag(Loc: TyBeginLoc, DiagID: diag::warn_cxx20_compat_auto_expr) << FullRange;
1576 }
1577 Expr *Deduce = Inits[0];
1578 if (isa<InitListExpr>(Val: Deduce))
1579 return ExprError(
1580 Diag(Loc: Deduce->getBeginLoc(), DiagID: diag::err_auto_expr_init_paren_braces)
1581 << ListInitialization << Ty << FullRange);
1582 QualType DeducedType;
1583 TemplateDeductionInfo Info(Deduce->getExprLoc());
1584 TemplateDeductionResult Result =
1585 DeduceAutoType(AutoTypeLoc: TInfo->getTypeLoc(), Initializer: Deduce, Result&: DeducedType, Info);
1586 if (Result != TemplateDeductionResult::Success &&
1587 Result != TemplateDeductionResult::AlreadyDiagnosed)
1588 return ExprError(Diag(Loc: TyBeginLoc, DiagID: diag::err_auto_expr_deduction_failure)
1589 << Ty << Deduce->getType() << FullRange
1590 << Deduce->getSourceRange());
1591 if (DeducedType.isNull()) {
1592 assert(Result == TemplateDeductionResult::AlreadyDiagnosed);
1593 return ExprError();
1594 }
1595
1596 Ty = DeducedType;
1597 Entity = InitializedEntity::InitializeTemporary(TypeInfo: TInfo, Type: Ty);
1598 }
1599
1600 if (Ty->isDependentType() || CallExpr::hasAnyTypeDependentArguments(Exprs))
1601 return CXXUnresolvedConstructExpr::Create(
1602 Context, T: Ty.getNonReferenceType(), TSI: TInfo, LParenLoc: LParenOrBraceLoc, Args: Exprs,
1603 RParenLoc: RParenOrBraceLoc, IsListInit: ListInitialization);
1604
1605 // C++ [expr.type.conv]p1:
1606 // If the expression list is a parenthesized single expression, the type
1607 // conversion expression is equivalent (in definedness, and if defined in
1608 // meaning) to the corresponding cast expression.
1609 if (Exprs.size() == 1 && !ListInitialization &&
1610 !isa<InitListExpr>(Val: Exprs[0])) {
1611 Expr *Arg = Exprs[0];
1612 return BuildCXXFunctionalCastExpr(TInfo, Type: Ty, LParenLoc: LParenOrBraceLoc, CastExpr: Arg,
1613 RParenLoc: RParenOrBraceLoc);
1614 }
1615
1616 // For an expression of the form T(), T shall not be an array type.
1617 QualType ElemTy = Ty;
1618 if (Ty->isArrayType()) {
1619 if (!ListInitialization)
1620 return ExprError(Diag(Loc: TyBeginLoc, DiagID: diag::err_value_init_for_array_type)
1621 << FullRange);
1622 ElemTy = Context.getBaseElementType(QT: Ty);
1623 }
1624
1625 // Only construct objects with object types.
1626 // The standard doesn't explicitly forbid function types here, but that's an
1627 // obvious oversight, as there's no way to dynamically construct a function
1628 // in general.
1629 if (Ty->isFunctionType())
1630 return ExprError(Diag(Loc: TyBeginLoc, DiagID: diag::err_init_for_function_type)
1631 << Ty << FullRange);
1632
1633 // C++17 [expr.type.conv]p2, per DR2351:
1634 // If the type is cv void and the initializer is () or {}, the expression is
1635 // a prvalue of the specified type that performs no initialization.
1636 if (Ty->isVoidType()) {
1637 if (Exprs.empty())
1638 return new (Context) CXXScalarValueInitExpr(
1639 Ty.getUnqualifiedType(), TInfo, Kind.getRange().getEnd());
1640 if (ListInitialization &&
1641 cast<InitListExpr>(Val: Exprs[0])->getNumInits() == 0) {
1642 return CXXFunctionalCastExpr::Create(
1643 Context, T: Ty.getUnqualifiedType(), VK: VK_PRValue, Written: TInfo, Kind: CK_ToVoid,
1644 Op: Exprs[0], /*Path=*/nullptr, FPO: CurFPFeatureOverrides(),
1645 LPLoc: Exprs[0]->getBeginLoc(), RPLoc: Exprs[0]->getEndLoc());
1646 }
1647 } else if (RequireCompleteType(Loc: TyBeginLoc, T: ElemTy,
1648 DiagID: diag::err_invalid_incomplete_type_use,
1649 Args: FullRange))
1650 return ExprError();
1651
1652 // Otherwise, the expression is a prvalue of the specified type whose
1653 // result object is direct-initialized (11.6) with the initializer.
1654 InitializationSequence InitSeq(*this, Entity, Kind, Exprs);
1655 ExprResult Result = InitSeq.Perform(S&: *this, Entity, Kind, Args: Exprs);
1656
1657 if (Result.isInvalid())
1658 return Result;
1659
1660 Expr *Inner = Result.get();
1661 if (CXXBindTemporaryExpr *BTE = dyn_cast_or_null<CXXBindTemporaryExpr>(Val: Inner))
1662 Inner = BTE->getSubExpr();
1663 if (auto *CE = dyn_cast<ConstantExpr>(Val: Inner);
1664 CE && CE->isImmediateInvocation())
1665 Inner = CE->getSubExpr();
1666 if (!isa<CXXTemporaryObjectExpr>(Val: Inner) &&
1667 !isa<CXXScalarValueInitExpr>(Val: Inner)) {
1668 // If we created a CXXTemporaryObjectExpr, that node also represents the
1669 // functional cast. Otherwise, create an explicit cast to represent
1670 // the syntactic form of a functional-style cast that was used here.
1671 //
1672 // FIXME: Creating a CXXFunctionalCastExpr around a CXXConstructExpr
1673 // would give a more consistent AST representation than using a
1674 // CXXTemporaryObjectExpr. It's also weird that the functional cast
1675 // is sometimes handled by initialization and sometimes not.
1676 QualType ResultType = Result.get()->getType();
1677 SourceRange Locs = ListInitialization
1678 ? SourceRange()
1679 : SourceRange(LParenOrBraceLoc, RParenOrBraceLoc);
1680 Result = CXXFunctionalCastExpr::Create(
1681 Context, T: ResultType, VK: Expr::getValueKindForType(T: Ty), Written: TInfo, Kind: CK_NoOp,
1682 Op: Result.get(), /*Path=*/nullptr, FPO: CurFPFeatureOverrides(),
1683 LPLoc: Locs.getBegin(), RPLoc: Locs.getEnd());
1684 }
1685
1686 return Result;
1687}
1688
1689bool Sema::isUsualDeallocationFunction(const CXXMethodDecl *Method) {
1690 // [CUDA] Ignore this function, if we can't call it.
1691 const FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true);
1692 if (getLangOpts().CUDA) {
1693 auto CallPreference = CUDA().IdentifyPreference(Caller, Callee: Method);
1694 // If it's not callable at all, it's not the right function.
1695 if (CallPreference < SemaCUDA::CFP_WrongSide)
1696 return false;
1697 if (CallPreference == SemaCUDA::CFP_WrongSide) {
1698 // Maybe. We have to check if there are better alternatives.
1699 DeclContext::lookup_result R =
1700 Method->getDeclContext()->lookup(Name: Method->getDeclName());
1701 for (const auto *D : R) {
1702 if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
1703 if (CUDA().IdentifyPreference(Caller, Callee: FD) > SemaCUDA::CFP_WrongSide)
1704 return false;
1705 }
1706 }
1707 // We've found no better variants.
1708 }
1709 }
1710
1711 SmallVector<const FunctionDecl*, 4> PreventedBy;
1712 bool Result = Method->isUsualDeallocationFunction(PreventedBy);
1713
1714 if (Result || !getLangOpts().CUDA || PreventedBy.empty())
1715 return Result;
1716
1717 // In case of CUDA, return true if none of the 1-argument deallocator
1718 // functions are actually callable.
1719 return llvm::none_of(Range&: PreventedBy, P: [&](const FunctionDecl *FD) {
1720 assert(FD->getNumParams() == 1 &&
1721 "Only single-operand functions should be in PreventedBy");
1722 return CUDA().IdentifyPreference(Caller, Callee: FD) >= SemaCUDA::CFP_HostDevice;
1723 });
1724}
1725
1726/// Determine whether the given function is a non-placement
1727/// deallocation function.
1728static bool isNonPlacementDeallocationFunction(Sema &S, FunctionDecl *FD) {
1729 if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Val: FD))
1730 return S.isUsualDeallocationFunction(Method);
1731
1732 if (!FD->getDeclName().isAnyOperatorDelete())
1733 return false;
1734
1735 if (FD->isTypeAwareOperatorNewOrDelete())
1736 return FunctionDecl::RequiredTypeAwareDeleteParameterCount ==
1737 FD->getNumParams();
1738
1739 unsigned UsualParams = 1;
1740 if (S.getLangOpts().SizedDeallocation && UsualParams < FD->getNumParams() &&
1741 S.Context.hasSameUnqualifiedType(
1742 T1: FD->getParamDecl(i: UsualParams)->getType(),
1743 T2: S.Context.getSizeType()))
1744 ++UsualParams;
1745
1746 if (S.getLangOpts().AlignedAllocation && UsualParams < FD->getNumParams() &&
1747 S.Context.hasSameUnqualifiedType(
1748 T1: FD->getParamDecl(i: UsualParams)->getType(),
1749 T2: S.Context.getCanonicalTagType(TD: S.getStdAlignValT())))
1750 ++UsualParams;
1751
1752 return UsualParams == FD->getNumParams();
1753}
1754
1755namespace {
1756 struct UsualDeallocFnInfo {
1757 UsualDeallocFnInfo()
1758 : Found(), FD(nullptr),
1759 IDP(AlignedAllocationMode::No, SizedDeallocationMode::No) {}
1760 UsualDeallocFnInfo(Sema &S, DeclAccessPair Found, QualType AllocType,
1761 SourceLocation Loc)
1762 : Found(Found), FD(dyn_cast<FunctionDecl>(Val: Found->getUnderlyingDecl())),
1763 Destroying(false),
1764 IDP({AllocType, TypeAwareAllocationMode::No,
1765 AlignedAllocationMode::No, SizedDeallocationMode::No}),
1766 CUDAPref(SemaCUDA::CFP_Native) {
1767 // A function template declaration is only a usual deallocation function
1768 // if it is a typed delete.
1769 if (!FD) {
1770 if (AllocType.isNull())
1771 return;
1772 auto *FTD = dyn_cast<FunctionTemplateDecl>(Val: Found->getUnderlyingDecl());
1773 if (!FTD)
1774 return;
1775 FunctionDecl *InstantiatedDecl =
1776 S.BuildTypeAwareUsualDelete(FnDecl: FTD, AllocType, Loc);
1777 if (!InstantiatedDecl)
1778 return;
1779 FD = InstantiatedDecl;
1780 }
1781 unsigned NumBaseParams = 1;
1782 if (FD->isTypeAwareOperatorNewOrDelete()) {
1783 // If this is a type aware operator delete we instantiate an appropriate
1784 // specialization of std::type_identity<>. If we do not know the
1785 // type being deallocated, or if the type-identity parameter of the
1786 // deallocation function does not match the constructed type_identity
1787 // specialization we reject the declaration.
1788 if (AllocType.isNull()) {
1789 FD = nullptr;
1790 return;
1791 }
1792 QualType TypeIdentityTag = FD->getParamDecl(i: 0)->getType();
1793 QualType ExpectedTypeIdentityTag =
1794 S.tryBuildStdTypeIdentity(Type: AllocType, Loc);
1795 if (ExpectedTypeIdentityTag.isNull()) {
1796 FD = nullptr;
1797 return;
1798 }
1799 if (!S.Context.hasSameType(T1: TypeIdentityTag, T2: ExpectedTypeIdentityTag)) {
1800 FD = nullptr;
1801 return;
1802 }
1803 IDP.PassTypeIdentity = TypeAwareAllocationMode::Yes;
1804 ++NumBaseParams;
1805 }
1806
1807 if (FD->isDestroyingOperatorDelete()) {
1808 Destroying = true;
1809 ++NumBaseParams;
1810 }
1811
1812 if (NumBaseParams < FD->getNumParams() &&
1813 S.Context.hasSameUnqualifiedType(
1814 T1: FD->getParamDecl(i: NumBaseParams)->getType(),
1815 T2: S.Context.getSizeType())) {
1816 ++NumBaseParams;
1817 IDP.PassSize = SizedDeallocationMode::Yes;
1818 }
1819
1820 if (NumBaseParams < FD->getNumParams() &&
1821 FD->getParamDecl(i: NumBaseParams)->getType()->isAlignValT()) {
1822 ++NumBaseParams;
1823 IDP.PassAlignment = AlignedAllocationMode::Yes;
1824 }
1825
1826 // In CUDA, determine how much we'd like / dislike to call this.
1827 if (S.getLangOpts().CUDA)
1828 CUDAPref = S.CUDA().IdentifyPreference(
1829 Caller: S.getCurFunctionDecl(/*AllowLambda=*/true), Callee: FD);
1830 }
1831
1832 explicit operator bool() const { return FD; }
1833
1834 int Compare(Sema &S, const UsualDeallocFnInfo &Other,
1835 ImplicitDeallocationParameters TargetIDP) const {
1836 assert(!TargetIDP.Type.isNull() ||
1837 !isTypeAwareAllocation(Other.IDP.PassTypeIdentity));
1838
1839 // C++ P0722:
1840 // A destroying operator delete is preferred over a non-destroying
1841 // operator delete.
1842 if (Destroying != Other.Destroying)
1843 return Destroying ? 1 : -1;
1844
1845 const ImplicitDeallocationParameters &OtherIDP = Other.IDP;
1846 // Selection for type awareness has priority over alignment and size
1847 if (IDP.PassTypeIdentity != OtherIDP.PassTypeIdentity)
1848 return IDP.PassTypeIdentity == TargetIDP.PassTypeIdentity ? 1 : -1;
1849
1850 // C++17 [expr.delete]p10:
1851 // If the type has new-extended alignment, a function with a parameter
1852 // of type std::align_val_t is preferred; otherwise a function without
1853 // such a parameter is preferred
1854 if (IDP.PassAlignment != OtherIDP.PassAlignment)
1855 return IDP.PassAlignment == TargetIDP.PassAlignment ? 1 : -1;
1856
1857 if (IDP.PassSize != OtherIDP.PassSize)
1858 return IDP.PassSize == TargetIDP.PassSize ? 1 : -1;
1859
1860 if (isTypeAwareAllocation(Mode: IDP.PassTypeIdentity)) {
1861 // Type aware allocation involves templates so we need to choose
1862 // the best type
1863 FunctionTemplateDecl *PrimaryTemplate = FD->getPrimaryTemplate();
1864 FunctionTemplateDecl *OtherPrimaryTemplate =
1865 Other.FD->getPrimaryTemplate();
1866 if ((!PrimaryTemplate) != (!OtherPrimaryTemplate))
1867 return OtherPrimaryTemplate ? 1 : -1;
1868
1869 if (PrimaryTemplate && OtherPrimaryTemplate) {
1870 const auto *DC = dyn_cast<CXXRecordDecl>(Val: Found->getDeclContext());
1871 const auto *OtherDC =
1872 dyn_cast<CXXRecordDecl>(Val: Other.Found->getDeclContext());
1873 unsigned ImplicitArgCount = Destroying + IDP.getNumImplicitArgs();
1874 if (FunctionTemplateDecl *Best = S.getMoreSpecializedTemplate(
1875 FT1: PrimaryTemplate, FT2: OtherPrimaryTemplate, Loc: SourceLocation(),
1876 TPOC: TPOC_Call, NumCallArguments1: ImplicitArgCount,
1877 RawObj1Ty: DC ? S.Context.getCanonicalTagType(TD: DC) : QualType{},
1878 RawObj2Ty: OtherDC ? S.Context.getCanonicalTagType(TD: OtherDC) : QualType{},
1879 Reversed: false)) {
1880 return Best == PrimaryTemplate ? 1 : -1;
1881 }
1882 }
1883 }
1884
1885 // Use CUDA call preference as a tiebreaker.
1886 if (CUDAPref > Other.CUDAPref)
1887 return 1;
1888 if (CUDAPref == Other.CUDAPref)
1889 return 0;
1890 return -1;
1891 }
1892
1893 DeclAccessPair Found;
1894 FunctionDecl *FD;
1895 bool Destroying;
1896 ImplicitDeallocationParameters IDP;
1897 SemaCUDA::CUDAFunctionPreference CUDAPref;
1898 };
1899}
1900
1901/// Determine whether a type has new-extended alignment. This may be called when
1902/// the type is incomplete (for a delete-expression with an incomplete pointee
1903/// type), in which case it will conservatively return false if the alignment is
1904/// not known.
1905static bool hasNewExtendedAlignment(Sema &S, QualType AllocType) {
1906 return S.getLangOpts().AlignedAllocation &&
1907 S.getASTContext().getTypeAlignIfKnown(T: AllocType) >
1908 S.getASTContext().getTargetInfo().getNewAlign();
1909}
1910
1911static bool CheckDeleteOperator(Sema &S, SourceLocation StartLoc,
1912 SourceRange Range, bool Diagnose,
1913 CXXRecordDecl *NamingClass, DeclAccessPair Decl,
1914 FunctionDecl *Operator) {
1915 if (Operator->isTypeAwareOperatorNewOrDelete()) {
1916 QualType SelectedTypeIdentityParameter =
1917 Operator->getParamDecl(i: 0)->getType();
1918 if (S.RequireCompleteType(Loc: StartLoc, T: SelectedTypeIdentityParameter,
1919 DiagID: diag::err_incomplete_type))
1920 return true;
1921 }
1922
1923 // FIXME: DiagnoseUseOfDecl?
1924 if (Operator->isDeleted()) {
1925 if (Diagnose) {
1926 StringLiteral *Msg = Operator->getDeletedMessage();
1927 S.Diag(Loc: StartLoc, DiagID: diag::err_deleted_function_use)
1928 << (Msg != nullptr) << (Msg ? Msg->getString() : StringRef());
1929 S.NoteDeletedFunction(FD: Operator);
1930 }
1931 return true;
1932 }
1933 Sema::AccessResult Accessible =
1934 S.CheckAllocationAccess(OperatorLoc: StartLoc, PlacementRange: Range, NamingClass, FoundDecl: Decl, Diagnose);
1935 return Accessible == Sema::AR_inaccessible;
1936}
1937
1938/// Select the correct "usual" deallocation function to use from a selection of
1939/// deallocation functions (either global or class-scope).
1940static UsualDeallocFnInfo resolveDeallocationOverload(
1941 Sema &S, LookupResult &R, const ImplicitDeallocationParameters &IDP,
1942 SourceLocation Loc,
1943 llvm::SmallVectorImpl<UsualDeallocFnInfo> *BestFns = nullptr) {
1944
1945 UsualDeallocFnInfo Best;
1946 for (auto I = R.begin(), E = R.end(); I != E; ++I) {
1947 UsualDeallocFnInfo Info(S, I.getPair(), IDP.Type, Loc);
1948 if (!Info || !isNonPlacementDeallocationFunction(S, FD: Info.FD) ||
1949 Info.CUDAPref == SemaCUDA::CFP_Never)
1950 continue;
1951
1952 if (!isTypeAwareAllocation(Mode: IDP.PassTypeIdentity) &&
1953 isTypeAwareAllocation(Mode: Info.IDP.PassTypeIdentity))
1954 continue;
1955 if (!Best) {
1956 Best = Info;
1957 if (BestFns)
1958 BestFns->push_back(Elt: Info);
1959 continue;
1960 }
1961 int ComparisonResult = Best.Compare(S, Other: Info, TargetIDP: IDP);
1962 if (ComparisonResult > 0)
1963 continue;
1964
1965 // If more than one preferred function is found, all non-preferred
1966 // functions are eliminated from further consideration.
1967 if (BestFns && ComparisonResult < 0)
1968 BestFns->clear();
1969
1970 Best = Info;
1971 if (BestFns)
1972 BestFns->push_back(Elt: Info);
1973 }
1974
1975 return Best;
1976}
1977
1978/// Determine whether a given type is a class for which 'delete[]' would call
1979/// a member 'operator delete[]' with a 'size_t' parameter. This implies that
1980/// we need to store the array size (even if the type is
1981/// trivially-destructible).
1982static bool doesUsualArrayDeleteWantSize(Sema &S, SourceLocation loc,
1983 TypeAwareAllocationMode PassType,
1984 QualType allocType) {
1985 const auto *record =
1986 allocType->getBaseElementTypeUnsafe()->getAsCanonical<RecordType>();
1987 if (!record) return false;
1988
1989 // Try to find an operator delete[] in class scope.
1990
1991 DeclarationName deleteName =
1992 S.Context.DeclarationNames.getCXXOperatorName(Op: OO_Array_Delete);
1993 LookupResult ops(S, deleteName, loc, Sema::LookupOrdinaryName);
1994 S.LookupQualifiedName(R&: ops, LookupCtx: record->getDecl()->getDefinitionOrSelf());
1995
1996 // We're just doing this for information.
1997 ops.suppressDiagnostics();
1998
1999 // Very likely: there's no operator delete[].
2000 if (ops.empty()) return false;
2001
2002 // If it's ambiguous, it should be illegal to call operator delete[]
2003 // on this thing, so it doesn't matter if we allocate extra space or not.
2004 if (ops.isAmbiguous()) return false;
2005
2006 // C++17 [expr.delete]p10:
2007 // If the deallocation functions have class scope, the one without a
2008 // parameter of type std::size_t is selected.
2009 ImplicitDeallocationParameters IDP = {
2010 allocType, PassType,
2011 alignedAllocationModeFromBool(IsAligned: hasNewExtendedAlignment(S, AllocType: allocType)),
2012 SizedDeallocationMode::No};
2013 auto Best = resolveDeallocationOverload(S, R&: ops, IDP, Loc: loc);
2014 return Best && isSizedDeallocation(Mode: Best.IDP.PassSize);
2015}
2016
2017ExprResult
2018Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
2019 SourceLocation PlacementLParen, MultiExprArg PlacementArgs,
2020 SourceLocation PlacementRParen, SourceRange TypeIdParens,
2021 Declarator &D, Expr *Initializer) {
2022 std::optional<Expr *> ArraySize;
2023 // If the specified type is an array, unwrap it and save the expression.
2024 if (D.getNumTypeObjects() > 0 &&
2025 D.getTypeObject(i: 0).Kind == DeclaratorChunk::Array) {
2026 DeclaratorChunk &Chunk = D.getTypeObject(i: 0);
2027 if (D.getDeclSpec().hasAutoTypeSpec())
2028 return ExprError(Diag(Loc: Chunk.Loc, DiagID: diag::err_new_array_of_auto)
2029 << D.getSourceRange());
2030 if (Chunk.Arr.hasStatic)
2031 return ExprError(Diag(Loc: Chunk.Loc, DiagID: diag::err_static_illegal_in_new)
2032 << D.getSourceRange());
2033 if (!Chunk.Arr.NumElts && !Initializer)
2034 return ExprError(Diag(Loc: Chunk.Loc, DiagID: diag::err_array_new_needs_size)
2035 << D.getSourceRange());
2036
2037 ArraySize = Chunk.Arr.NumElts;
2038 D.DropFirstTypeObject();
2039 }
2040
2041 // Every dimension shall be of constant size.
2042 if (ArraySize) {
2043 for (unsigned I = 0, N = D.getNumTypeObjects(); I < N; ++I) {
2044 if (D.getTypeObject(i: I).Kind != DeclaratorChunk::Array)
2045 break;
2046
2047 DeclaratorChunk::ArrayTypeInfo &Array = D.getTypeObject(i: I).Arr;
2048 if (Expr *NumElts = Array.NumElts) {
2049 if (!NumElts->isTypeDependent() && !NumElts->isValueDependent()) {
2050 // FIXME: GCC permits constant folding here. We should either do so consistently
2051 // or not do so at all, rather than changing behavior in C++14 onwards.
2052 if (getLangOpts().CPlusPlus14) {
2053 // C++1y [expr.new]p6: Every constant-expression in a noptr-new-declarator
2054 // shall be a converted constant expression (5.19) of type std::size_t
2055 // and shall evaluate to a strictly positive value.
2056 llvm::APSInt Value(Context.getIntWidth(T: Context.getSizeType()));
2057 Array.NumElts =
2058 CheckConvertedConstantExpression(From: NumElts, T: Context.getSizeType(),
2059 Value, CCE: CCEKind::ArrayBound)
2060 .get();
2061 } else {
2062 Array.NumElts = VerifyIntegerConstantExpression(
2063 E: NumElts, Result: nullptr, DiagID: diag::err_new_array_nonconst,
2064 CanFold: AllowFoldKind::Allow)
2065 .get();
2066 }
2067 if (!Array.NumElts)
2068 return ExprError();
2069 }
2070 }
2071 }
2072 }
2073
2074 TypeSourceInfo *TInfo = GetTypeForDeclarator(D);
2075 QualType AllocType = TInfo->getType();
2076 if (D.isInvalidType())
2077 return ExprError();
2078
2079 SourceRange DirectInitRange;
2080 if (ParenListExpr *List = dyn_cast_or_null<ParenListExpr>(Val: Initializer))
2081 DirectInitRange = List->getSourceRange();
2082
2083 return BuildCXXNew(Range: SourceRange(StartLoc, D.getEndLoc()), UseGlobal,
2084 PlacementLParen, PlacementArgs, PlacementRParen,
2085 TypeIdParens, AllocType, AllocTypeInfo: TInfo, ArraySize, DirectInitRange,
2086 Initializer);
2087}
2088
2089static bool isLegalArrayNewInitializer(CXXNewInitializationStyle Style,
2090 Expr *Init, bool IsCPlusPlus20) {
2091 if (!Init)
2092 return true;
2093 if (ParenListExpr *PLE = dyn_cast<ParenListExpr>(Val: Init))
2094 return IsCPlusPlus20 || PLE->getNumExprs() == 0;
2095 if (isa<ImplicitValueInitExpr>(Val: Init))
2096 return true;
2097 else if (CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Val: Init))
2098 return !CCE->isListInitialization() &&
2099 CCE->getConstructor()->isDefaultConstructor();
2100 else if (Style == CXXNewInitializationStyle::Braces) {
2101 assert(isa<InitListExpr>(Init) &&
2102 "Shouldn't create list CXXConstructExprs for arrays.");
2103 return true;
2104 }
2105 return false;
2106}
2107
2108bool
2109Sema::isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const {
2110 if (!getLangOpts().AlignedAllocationUnavailable)
2111 return false;
2112 if (FD.isDefined())
2113 return false;
2114 UnsignedOrNone AlignmentParam = std::nullopt;
2115 if (FD.isReplaceableGlobalAllocationFunction(AlignmentParam: &AlignmentParam) &&
2116 AlignmentParam)
2117 return true;
2118 return false;
2119}
2120
2121// Emit a diagnostic if an aligned allocation/deallocation function that is not
2122// implemented in the standard library is selected.
2123void Sema::diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
2124 SourceLocation Loc) {
2125 if (isUnavailableAlignedAllocationFunction(FD)) {
2126 const llvm::Triple &T = getASTContext().getTargetInfo().getTriple();
2127 StringRef OSName = AvailabilityAttr::getPlatformNameSourceSpelling(
2128 Platform: getASTContext().getTargetInfo().getPlatformName());
2129 VersionTuple OSVersion = alignedAllocMinVersion(OS: T.getOS());
2130
2131 bool IsDelete = FD.getDeclName().isAnyOperatorDelete();
2132 Diag(Loc, DiagID: diag::err_aligned_allocation_unavailable)
2133 << IsDelete << FD.getType().getAsString() << OSName
2134 << OSVersion.getAsString() << OSVersion.empty();
2135 Diag(Loc, DiagID: diag::note_silence_aligned_allocation_unavailable);
2136 }
2137}
2138
2139ExprResult Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
2140 SourceLocation PlacementLParen,
2141 MultiExprArg PlacementArgs,
2142 SourceLocation PlacementRParen,
2143 SourceRange TypeIdParens, QualType AllocType,
2144 TypeSourceInfo *AllocTypeInfo,
2145 std::optional<Expr *> ArraySize,
2146 SourceRange DirectInitRange, Expr *Initializer) {
2147 SourceRange TypeRange = AllocTypeInfo->getTypeLoc().getSourceRange();
2148 SourceLocation StartLoc = Range.getBegin();
2149
2150 CXXNewInitializationStyle InitStyle;
2151 if (DirectInitRange.isValid()) {
2152 assert(Initializer && "Have parens but no initializer.");
2153 InitStyle = CXXNewInitializationStyle::Parens;
2154 } else if (isa_and_nonnull<InitListExpr>(Val: Initializer))
2155 InitStyle = CXXNewInitializationStyle::Braces;
2156 else {
2157 assert((!Initializer || isa<ImplicitValueInitExpr>(Initializer) ||
2158 isa<CXXConstructExpr>(Initializer)) &&
2159 "Initializer expression that cannot have been implicitly created.");
2160 InitStyle = CXXNewInitializationStyle::None;
2161 }
2162
2163 MultiExprArg Exprs(&Initializer, Initializer ? 1 : 0);
2164 if (ParenListExpr *List = dyn_cast_or_null<ParenListExpr>(Val: Initializer)) {
2165 assert(InitStyle == CXXNewInitializationStyle::Parens &&
2166 "paren init for non-call init");
2167 Exprs = MultiExprArg(List->getExprs(), List->getNumExprs());
2168 } else if (auto *List = dyn_cast_or_null<CXXParenListInitExpr>(Val: Initializer)) {
2169 assert(InitStyle == CXXNewInitializationStyle::Parens &&
2170 "paren init for non-call init");
2171 Exprs = List->getInitExprs();
2172 }
2173
2174 // C++11 [expr.new]p15:
2175 // A new-expression that creates an object of type T initializes that
2176 // object as follows:
2177 InitializationKind Kind = [&] {
2178 switch (InitStyle) {
2179 // - If the new-initializer is omitted, the object is default-
2180 // initialized (8.5); if no initialization is performed,
2181 // the object has indeterminate value
2182 case CXXNewInitializationStyle::None:
2183 return InitializationKind::CreateDefault(InitLoc: TypeRange.getBegin());
2184 // - Otherwise, the new-initializer is interpreted according to the
2185 // initialization rules of 8.5 for direct-initialization.
2186 case CXXNewInitializationStyle::Parens:
2187 return InitializationKind::CreateDirect(InitLoc: TypeRange.getBegin(),
2188 LParenLoc: DirectInitRange.getBegin(),
2189 RParenLoc: DirectInitRange.getEnd());
2190 case CXXNewInitializationStyle::Braces:
2191 return InitializationKind::CreateDirectList(InitLoc: TypeRange.getBegin(),
2192 LBraceLoc: Initializer->getBeginLoc(),
2193 RBraceLoc: Initializer->getEndLoc());
2194 }
2195 llvm_unreachable("Unknown initialization kind");
2196 }();
2197
2198 // C++11 [dcl.spec.auto]p6. Deduce the type which 'auto' stands in for.
2199 auto *Deduced = AllocType->getContainedDeducedType();
2200 if (Deduced && !Deduced->isDeduced() &&
2201 isa<DeducedTemplateSpecializationType>(Val: Deduced)) {
2202 if (ArraySize)
2203 return ExprError(
2204 Diag(Loc: *ArraySize ? (*ArraySize)->getExprLoc() : TypeRange.getBegin(),
2205 DiagID: diag::err_deduced_class_template_compound_type)
2206 << /*array*/ 2
2207 << (*ArraySize ? (*ArraySize)->getSourceRange() : TypeRange));
2208
2209 InitializedEntity Entity =
2210 InitializedEntity::InitializeNew(NewLoc: StartLoc, Type: AllocType,
2211 /*VariableLengthArrayNew=*/false);
2212 AllocType = DeduceTemplateSpecializationFromInitializer(
2213 TInfo: AllocTypeInfo, Entity, Kind, Init: Exprs);
2214 if (AllocType.isNull())
2215 return ExprError();
2216 } else if (Deduced && !Deduced->isDeduced()) {
2217 MultiExprArg Inits = Exprs;
2218 bool Braced = (InitStyle == CXXNewInitializationStyle::Braces);
2219 if (Braced) {
2220 auto *ILE = cast<InitListExpr>(Val: Exprs[0]);
2221 Inits = MultiExprArg(ILE->getInits(), ILE->getNumInits());
2222 }
2223
2224 if (InitStyle == CXXNewInitializationStyle::None || Inits.empty())
2225 return ExprError(Diag(Loc: StartLoc, DiagID: diag::err_auto_new_requires_ctor_arg)
2226 << AllocType << TypeRange);
2227 if (Inits.size() > 1) {
2228 Expr *FirstBad = Inits[1];
2229 return ExprError(Diag(Loc: FirstBad->getBeginLoc(),
2230 DiagID: diag::err_auto_new_ctor_multiple_expressions)
2231 << AllocType << TypeRange);
2232 }
2233 if (Braced && !getLangOpts().CPlusPlus17)
2234 Diag(Loc: Initializer->getBeginLoc(), DiagID: diag::ext_auto_new_list_init)
2235 << AllocType << TypeRange;
2236 Expr *Deduce = Inits[0];
2237 if (isa<InitListExpr>(Val: Deduce))
2238 return ExprError(
2239 Diag(Loc: Deduce->getBeginLoc(), DiagID: diag::err_auto_expr_init_paren_braces)
2240 << Braced << AllocType << TypeRange);
2241 QualType DeducedType;
2242 TemplateDeductionInfo Info(Deduce->getExprLoc());
2243 TemplateDeductionResult Result =
2244 DeduceAutoType(AutoTypeLoc: AllocTypeInfo->getTypeLoc(), Initializer: Deduce, Result&: DeducedType, Info);
2245 if (Result != TemplateDeductionResult::Success &&
2246 Result != TemplateDeductionResult::AlreadyDiagnosed)
2247 return ExprError(Diag(Loc: StartLoc, DiagID: diag::err_auto_new_deduction_failure)
2248 << AllocType << Deduce->getType() << TypeRange
2249 << Deduce->getSourceRange());
2250 if (DeducedType.isNull()) {
2251 assert(Result == TemplateDeductionResult::AlreadyDiagnosed);
2252 return ExprError();
2253 }
2254 AllocType = DeducedType;
2255 }
2256
2257 // Per C++0x [expr.new]p5, the type being constructed may be a
2258 // typedef of an array type.
2259 // Dependent case will be handled separately.
2260 if (!ArraySize && !AllocType->isDependentType()) {
2261 if (const ConstantArrayType *Array
2262 = Context.getAsConstantArrayType(T: AllocType)) {
2263 ArraySize = IntegerLiteral::Create(C: Context, V: Array->getSize(),
2264 type: Context.getSizeType(),
2265 l: TypeRange.getEnd());
2266 AllocType = Array->getElementType();
2267 }
2268 }
2269
2270 if (CheckAllocatedType(AllocType, Loc: TypeRange.getBegin(), R: TypeRange))
2271 return ExprError();
2272
2273 if (ArraySize && !checkArrayElementAlignment(EltTy: AllocType, Loc: TypeRange.getBegin()))
2274 return ExprError();
2275
2276 // In ARC, infer 'retaining' for the allocated
2277 if (getLangOpts().ObjCAutoRefCount &&
2278 AllocType.getObjCLifetime() == Qualifiers::OCL_None &&
2279 AllocType->isObjCLifetimeType()) {
2280 AllocType = Context.getLifetimeQualifiedType(type: AllocType,
2281 lifetime: AllocType->getObjCARCImplicitLifetime());
2282 }
2283
2284 QualType ResultType = Context.getPointerType(T: AllocType);
2285
2286 if (ArraySize && *ArraySize &&
2287 (*ArraySize)->getType()->isNonOverloadPlaceholderType()) {
2288 ExprResult result = CheckPlaceholderExpr(E: *ArraySize);
2289 if (result.isInvalid()) return ExprError();
2290 ArraySize = result.get();
2291 }
2292 // C++98 5.3.4p6: "The expression in a direct-new-declarator shall have
2293 // integral or enumeration type with a non-negative value."
2294 // C++11 [expr.new]p6: The expression [...] shall be of integral or unscoped
2295 // enumeration type, or a class type for which a single non-explicit
2296 // conversion function to integral or unscoped enumeration type exists.
2297 // C++1y [expr.new]p6: The expression [...] is implicitly converted to
2298 // std::size_t.
2299 std::optional<uint64_t> KnownArraySize;
2300 if (ArraySize && *ArraySize && !(*ArraySize)->isTypeDependent()) {
2301 ExprResult ConvertedSize;
2302 if (getLangOpts().CPlusPlus14) {
2303 assert(Context.getTargetInfo().getIntWidth() && "Builtin type of size 0?");
2304
2305 ConvertedSize = PerformImplicitConversion(
2306 From: *ArraySize, ToType: Context.getSizeType(), Action: AssignmentAction::Converting);
2307
2308 if (!ConvertedSize.isInvalid() && (*ArraySize)->getType()->isRecordType())
2309 // Diagnose the compatibility of this conversion.
2310 Diag(Loc: StartLoc, DiagID: diag::warn_cxx98_compat_array_size_conversion)
2311 << (*ArraySize)->getType() << 0 << "'size_t'";
2312 } else {
2313 class SizeConvertDiagnoser : public ICEConvertDiagnoser {
2314 protected:
2315 Expr *ArraySize;
2316
2317 public:
2318 SizeConvertDiagnoser(Expr *ArraySize)
2319 : ICEConvertDiagnoser(/*AllowScopedEnumerations*/false, false, false),
2320 ArraySize(ArraySize) {}
2321
2322 SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc,
2323 QualType T) override {
2324 return S.Diag(Loc, DiagID: diag::err_array_size_not_integral)
2325 << S.getLangOpts().CPlusPlus11 << T;
2326 }
2327
2328 SemaDiagnosticBuilder diagnoseIncomplete(
2329 Sema &S, SourceLocation Loc, QualType T) override {
2330 return S.Diag(Loc, DiagID: diag::err_array_size_incomplete_type)
2331 << T << ArraySize->getSourceRange();
2332 }
2333
2334 SemaDiagnosticBuilder diagnoseExplicitConv(
2335 Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override {
2336 return S.Diag(Loc, DiagID: diag::err_array_size_explicit_conversion) << T << ConvTy;
2337 }
2338
2339 SemaDiagnosticBuilder noteExplicitConv(
2340 Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
2341 return S.Diag(Loc: Conv->getLocation(), DiagID: diag::note_array_size_conversion)
2342 << ConvTy->isEnumeralType() << ConvTy;
2343 }
2344
2345 SemaDiagnosticBuilder diagnoseAmbiguous(
2346 Sema &S, SourceLocation Loc, QualType T) override {
2347 return S.Diag(Loc, DiagID: diag::err_array_size_ambiguous_conversion) << T;
2348 }
2349
2350 SemaDiagnosticBuilder noteAmbiguous(
2351 Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
2352 return S.Diag(Loc: Conv->getLocation(), DiagID: diag::note_array_size_conversion)
2353 << ConvTy->isEnumeralType() << ConvTy;
2354 }
2355
2356 SemaDiagnosticBuilder diagnoseConversion(Sema &S, SourceLocation Loc,
2357 QualType T,
2358 QualType ConvTy) override {
2359 return S.Diag(Loc,
2360 DiagID: S.getLangOpts().CPlusPlus11
2361 ? diag::warn_cxx98_compat_array_size_conversion
2362 : diag::ext_array_size_conversion)
2363 << T << ConvTy->isEnumeralType() << ConvTy;
2364 }
2365 } SizeDiagnoser(*ArraySize);
2366
2367 ConvertedSize = PerformContextualImplicitConversion(Loc: StartLoc, FromE: *ArraySize,
2368 Converter&: SizeDiagnoser);
2369 }
2370 if (ConvertedSize.isInvalid())
2371 return ExprError();
2372
2373 ArraySize = ConvertedSize.get();
2374 QualType SizeType = (*ArraySize)->getType();
2375
2376 if (!SizeType->isIntegralOrUnscopedEnumerationType())
2377 return ExprError();
2378
2379 // C++98 [expr.new]p7:
2380 // The expression in a direct-new-declarator shall have integral type
2381 // with a non-negative value.
2382 //
2383 // Let's see if this is a constant < 0. If so, we reject it out of hand,
2384 // per CWG1464. Otherwise, if it's not a constant, we must have an
2385 // unparenthesized array type.
2386
2387 // We've already performed any required implicit conversion to integer or
2388 // unscoped enumeration type.
2389 // FIXME: Per CWG1464, we are required to check the value prior to
2390 // converting to size_t. This will never find a negative array size in
2391 // C++14 onwards, because Value is always unsigned here!
2392 if (std::optional<llvm::APSInt> Value =
2393 (*ArraySize)->getIntegerConstantExpr(Ctx: Context)) {
2394 if (Value->isSigned() && Value->isNegative()) {
2395 return ExprError(Diag(Loc: (*ArraySize)->getBeginLoc(),
2396 DiagID: diag::err_typecheck_negative_array_size)
2397 << (*ArraySize)->getSourceRange());
2398 }
2399
2400 if (!AllocType->isDependentType()) {
2401 unsigned ActiveSizeBits =
2402 ConstantArrayType::getNumAddressingBits(Context, ElementType: AllocType, NumElements: *Value);
2403 if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context))
2404 return ExprError(
2405 Diag(Loc: (*ArraySize)->getBeginLoc(), DiagID: diag::err_array_too_large)
2406 << toString(I: *Value, Radix: 10, Signed: Value->isSigned(),
2407 /*formatAsCLiteral=*/false, /*UpperCase=*/false,
2408 /*InsertSeparators=*/true)
2409 << (*ArraySize)->getSourceRange());
2410 }
2411
2412 KnownArraySize = Value->getZExtValue();
2413 } else if (TypeIdParens.isValid()) {
2414 // Can't have dynamic array size when the type-id is in parentheses.
2415 Diag(Loc: (*ArraySize)->getBeginLoc(), DiagID: diag::ext_new_paren_array_nonconst)
2416 << (*ArraySize)->getSourceRange()
2417 << FixItHint::CreateRemoval(RemoveRange: TypeIdParens.getBegin())
2418 << FixItHint::CreateRemoval(RemoveRange: TypeIdParens.getEnd());
2419
2420 TypeIdParens = SourceRange();
2421 }
2422
2423 // Note that we do *not* convert the argument in any way. It can
2424 // be signed, larger than size_t, whatever.
2425 }
2426
2427 FunctionDecl *OperatorNew = nullptr;
2428 FunctionDecl *OperatorDelete = nullptr;
2429 unsigned Alignment =
2430 AllocType->isDependentType() ? 0 : Context.getTypeAlign(T: AllocType);
2431 unsigned NewAlignment = Context.getTargetInfo().getNewAlign();
2432 ImplicitAllocationParameters IAP = {
2433 AllocType, ShouldUseTypeAwareOperatorNewOrDelete(),
2434 alignedAllocationModeFromBool(IsAligned: getLangOpts().AlignedAllocation &&
2435 Alignment > NewAlignment)};
2436
2437 if (CheckArgsForPlaceholders(args: PlacementArgs))
2438 return ExprError();
2439
2440 AllocationFunctionScope Scope = UseGlobal ? AllocationFunctionScope::Global
2441 : AllocationFunctionScope::Both;
2442 SourceRange AllocationParameterRange = Range;
2443 if (PlacementLParen.isValid() && PlacementRParen.isValid())
2444 AllocationParameterRange = SourceRange(PlacementLParen, PlacementRParen);
2445 if (!AllocType->isDependentType() &&
2446 !Expr::hasAnyTypeDependentArguments(Exprs: PlacementArgs) &&
2447 FindAllocationFunctions(StartLoc, Range: AllocationParameterRange, NewScope: Scope, DeleteScope: Scope,
2448 AllocType, IsArray: ArraySize.has_value(), IAP,
2449 PlaceArgs: PlacementArgs, OperatorNew, OperatorDelete))
2450 return ExprError();
2451
2452 // If this is an array allocation, compute whether the usual array
2453 // deallocation function for the type has a size_t parameter.
2454 bool UsualArrayDeleteWantsSize = false;
2455 if (ArraySize && !AllocType->isDependentType())
2456 UsualArrayDeleteWantsSize = doesUsualArrayDeleteWantSize(
2457 S&: *this, loc: StartLoc, PassType: IAP.PassTypeIdentity, allocType: AllocType);
2458
2459 SmallVector<Expr *, 8> AllPlaceArgs;
2460 if (OperatorNew) {
2461 auto *Proto = OperatorNew->getType()->castAs<FunctionProtoType>();
2462 VariadicCallType CallType = Proto->isVariadic()
2463 ? VariadicCallType::Function
2464 : VariadicCallType::DoesNotApply;
2465
2466 // We've already converted the placement args, just fill in any default
2467 // arguments. Skip the first parameter because we don't have a corresponding
2468 // argument. Skip the second parameter too if we're passing in the
2469 // alignment; we've already filled it in.
2470 unsigned NumImplicitArgs = 1;
2471 if (isTypeAwareAllocation(Mode: IAP.PassTypeIdentity)) {
2472 assert(OperatorNew->isTypeAwareOperatorNewOrDelete());
2473 NumImplicitArgs++;
2474 }
2475 if (isAlignedAllocation(Mode: IAP.PassAlignment))
2476 NumImplicitArgs++;
2477 if (GatherArgumentsForCall(CallLoc: AllocationParameterRange.getBegin(), FDecl: OperatorNew,
2478 Proto, FirstParam: NumImplicitArgs, Args: PlacementArgs,
2479 AllArgs&: AllPlaceArgs, CallType))
2480 return ExprError();
2481
2482 if (!AllPlaceArgs.empty())
2483 PlacementArgs = AllPlaceArgs;
2484
2485 // We would like to perform some checking on the given `operator new` call,
2486 // but the PlacementArgs does not contain the implicit arguments,
2487 // namely allocation size and maybe allocation alignment,
2488 // so we need to conjure them.
2489
2490 QualType SizeTy = Context.getSizeType();
2491 unsigned SizeTyWidth = Context.getTypeSize(T: SizeTy);
2492
2493 llvm::APInt SingleEltSize(
2494 SizeTyWidth, Context.getTypeSizeInChars(T: AllocType).getQuantity());
2495
2496 // How many bytes do we want to allocate here?
2497 std::optional<llvm::APInt> AllocationSize;
2498 if (!ArraySize && !AllocType->isDependentType()) {
2499 // For non-array operator new, we only want to allocate one element.
2500 AllocationSize = SingleEltSize;
2501 } else if (KnownArraySize && !AllocType->isDependentType()) {
2502 // For array operator new, only deal with static array size case.
2503 bool Overflow;
2504 AllocationSize = llvm::APInt(SizeTyWidth, *KnownArraySize)
2505 .umul_ov(RHS: SingleEltSize, Overflow);
2506 (void)Overflow;
2507 assert(
2508 !Overflow &&
2509 "Expected that all the overflows would have been handled already.");
2510 }
2511
2512 IntegerLiteral AllocationSizeLiteral(
2513 Context, AllocationSize.value_or(u: llvm::APInt::getZero(numBits: SizeTyWidth)),
2514 SizeTy, StartLoc);
2515 // Otherwise, if we failed to constant-fold the allocation size, we'll
2516 // just give up and pass-in something opaque, that isn't a null pointer.
2517 OpaqueValueExpr OpaqueAllocationSize(StartLoc, SizeTy, VK_PRValue,
2518 OK_Ordinary, /*SourceExpr=*/nullptr);
2519
2520 // Let's synthesize the alignment argument in case we will need it.
2521 // Since we *really* want to allocate these on stack, this is slightly ugly
2522 // because there might not be a `std::align_val_t` type.
2523 EnumDecl *StdAlignValT = getStdAlignValT();
2524 QualType AlignValT =
2525 StdAlignValT ? Context.getCanonicalTagType(TD: StdAlignValT) : SizeTy;
2526 IntegerLiteral AlignmentLiteral(
2527 Context,
2528 llvm::APInt(Context.getTypeSize(T: SizeTy),
2529 Alignment / Context.getCharWidth()),
2530 SizeTy, StartLoc);
2531 ImplicitCastExpr DesiredAlignment(ImplicitCastExpr::OnStack, AlignValT,
2532 CK_IntegralCast, &AlignmentLiteral,
2533 VK_PRValue, FPOptionsOverride());
2534
2535 // Adjust placement args by prepending conjured size and alignment exprs.
2536 llvm::SmallVector<Expr *, 8> CallArgs;
2537 CallArgs.reserve(N: NumImplicitArgs + PlacementArgs.size());
2538 CallArgs.emplace_back(Args: AllocationSize
2539 ? static_cast<Expr *>(&AllocationSizeLiteral)
2540 : &OpaqueAllocationSize);
2541 if (isAlignedAllocation(Mode: IAP.PassAlignment))
2542 CallArgs.emplace_back(Args: &DesiredAlignment);
2543 llvm::append_range(C&: CallArgs, R&: PlacementArgs);
2544
2545 DiagnoseSentinelCalls(D: OperatorNew, Loc: PlacementLParen, Args: CallArgs);
2546
2547 checkCall(FDecl: OperatorNew, Proto, /*ThisArg=*/nullptr, Args: CallArgs,
2548 /*IsMemberFunction=*/false, Loc: StartLoc, Range, CallType);
2549
2550 // Warn if the type is over-aligned and is being allocated by (unaligned)
2551 // global operator new.
2552 if (PlacementArgs.empty() && !isAlignedAllocation(Mode: IAP.PassAlignment) &&
2553 (OperatorNew->isImplicit() ||
2554 (OperatorNew->getBeginLoc().isValid() &&
2555 getSourceManager().isInSystemHeader(Loc: OperatorNew->getBeginLoc())))) {
2556 if (Alignment > NewAlignment)
2557 Diag(Loc: StartLoc, DiagID: diag::warn_overaligned_type)
2558 << AllocType
2559 << unsigned(Alignment / Context.getCharWidth())
2560 << unsigned(NewAlignment / Context.getCharWidth());
2561 }
2562 }
2563
2564 // Array 'new' can't have any initializers except empty parentheses.
2565 // Initializer lists are also allowed, in C++11. Rely on the parser for the
2566 // dialect distinction.
2567 if (ArraySize && !isLegalArrayNewInitializer(Style: InitStyle, Init: Initializer,
2568 IsCPlusPlus20: getLangOpts().CPlusPlus20)) {
2569 SourceRange InitRange(Exprs.front()->getBeginLoc(),
2570 Exprs.back()->getEndLoc());
2571 Diag(Loc: StartLoc, DiagID: diag::err_new_array_init_args) << InitRange;
2572 return ExprError();
2573 }
2574
2575 // If we can perform the initialization, and we've not already done so,
2576 // do it now.
2577 if (!AllocType->isDependentType() &&
2578 !Expr::hasAnyTypeDependentArguments(Exprs)) {
2579 // The type we initialize is the complete type, including the array bound.
2580 QualType InitType;
2581 if (KnownArraySize)
2582 InitType = Context.getConstantArrayType(
2583 EltTy: AllocType,
2584 ArySize: llvm::APInt(Context.getTypeSize(T: Context.getSizeType()),
2585 *KnownArraySize),
2586 SizeExpr: *ArraySize, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
2587 else if (ArraySize)
2588 InitType = Context.getIncompleteArrayType(EltTy: AllocType,
2589 ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
2590 else
2591 InitType = AllocType;
2592
2593 bool VariableLengthArrayNew = ArraySize && *ArraySize && !KnownArraySize;
2594 InitializedEntity Entity = InitializedEntity::InitializeNew(
2595 NewLoc: StartLoc, Type: InitType, VariableLengthArrayNew);
2596 InitializationSequence InitSeq(*this, Entity, Kind, Exprs);
2597 ExprResult FullInit = InitSeq.Perform(S&: *this, Entity, Kind, Args: Exprs);
2598 if (FullInit.isInvalid())
2599 return ExprError();
2600
2601 // FullInit is our initializer; strip off CXXBindTemporaryExprs, because
2602 // we don't want the initialized object to be destructed.
2603 // FIXME: We should not create these in the first place.
2604 if (CXXBindTemporaryExpr *Binder =
2605 dyn_cast_or_null<CXXBindTemporaryExpr>(Val: FullInit.get()))
2606 FullInit = Binder->getSubExpr();
2607
2608 Initializer = FullInit.get();
2609
2610 // FIXME: If we have a KnownArraySize, check that the array bound of the
2611 // initializer is no greater than that constant value.
2612
2613 if (ArraySize && !*ArraySize) {
2614 auto *CAT = Context.getAsConstantArrayType(T: Initializer->getType());
2615 if (CAT) {
2616 // FIXME: Track that the array size was inferred rather than explicitly
2617 // specified.
2618 ArraySize = IntegerLiteral::Create(
2619 C: Context, V: CAT->getSize(), type: Context.getSizeType(), l: TypeRange.getEnd());
2620 } else {
2621 Diag(Loc: TypeRange.getEnd(), DiagID: diag::err_new_array_size_unknown_from_init)
2622 << Initializer->getSourceRange();
2623 }
2624 }
2625 }
2626
2627 // Mark the new and delete operators as referenced.
2628 if (OperatorNew) {
2629 if (DiagnoseUseOfDecl(D: OperatorNew, Locs: StartLoc))
2630 return ExprError();
2631 MarkFunctionReferenced(Loc: StartLoc, Func: OperatorNew);
2632 }
2633 if (OperatorDelete) {
2634 if (DiagnoseUseOfDecl(D: OperatorDelete, Locs: StartLoc))
2635 return ExprError();
2636 MarkFunctionReferenced(Loc: StartLoc, Func: OperatorDelete);
2637 }
2638
2639 // new[] will trigger vector deleting destructor emission if the class has
2640 // virtual destructor for MSVC compatibility. Perform necessary checks.
2641 if (Context.getTargetInfo().emitVectorDeletingDtors(Context.getLangOpts())) {
2642 if (const CXXConstructExpr *CCE =
2643 dyn_cast_or_null<CXXConstructExpr>(Val: Initializer);
2644 CCE && ArraySize) {
2645 CXXRecordDecl *ClassDecl = CCE->getConstructor()->getParent();
2646 // We probably already did this for another new[] with this class so don't
2647 // do it twice.
2648 if (!Context.classMaybeNeedsVectorDeletingDestructor(RD: ClassDecl)) {
2649 auto *Dtor = ClassDecl->getDestructor();
2650 if (Dtor && Dtor->isVirtual() && !Dtor->isDeleted()) {
2651 Context.setClassMaybeNeedsVectorDeletingDestructor(ClassDecl);
2652 if (!Dtor->isDefined() && !Dtor->isInvalidDecl()) {
2653 // Call CheckDestructor if destructor is not defined. This is
2654 // needed to find operators delete and delete[] for vector deleting
2655 // destructor body because new[] will trigger emission of vector
2656 // deleting destructor body even if destructor is defined in another
2657 // translation unit.
2658 ContextRAII SavedContext(*this, Dtor);
2659 CheckDestructor(Destructor: Dtor);
2660 }
2661 }
2662 }
2663 }
2664 }
2665
2666 return CXXNewExpr::Create(Ctx: Context, IsGlobalNew: UseGlobal, OperatorNew, OperatorDelete,
2667 IAP, UsualArrayDeleteWantsSize, PlacementArgs,
2668 TypeIdParens, ArraySize, InitializationStyle: InitStyle, Initializer,
2669 Ty: ResultType, AllocatedTypeInfo: AllocTypeInfo, Range, DirectInitRange);
2670}
2671
2672bool Sema::CheckAllocatedType(QualType AllocType, SourceLocation Loc,
2673 SourceRange R) {
2674 // C++ 5.3.4p1: "[The] type shall be a complete object type, but not an
2675 // abstract class type or array thereof.
2676 if (AllocType->isFunctionType())
2677 return Diag(Loc, DiagID: diag::err_bad_new_type)
2678 << AllocType << 0 << R;
2679 else if (AllocType->isReferenceType())
2680 return Diag(Loc, DiagID: diag::err_bad_new_type)
2681 << AllocType << 1 << R;
2682 else if (!AllocType->isDependentType() &&
2683 RequireCompleteSizedType(
2684 Loc, T: AllocType, DiagID: diag::err_new_incomplete_or_sizeless_type, Args: R))
2685 return true;
2686 else if (RequireNonAbstractType(Loc, T: AllocType,
2687 DiagID: diag::err_allocation_of_abstract_type))
2688 return true;
2689 else if (AllocType->isVariablyModifiedType())
2690 return Diag(Loc, DiagID: diag::err_variably_modified_new_type)
2691 << AllocType;
2692 else if (AllocType.getAddressSpace() != LangAS::Default &&
2693 !getLangOpts().OpenCLCPlusPlus)
2694 return Diag(Loc, DiagID: diag::err_address_space_qualified_new)
2695 << AllocType.getUnqualifiedType()
2696 << Qualifiers::getAddrSpaceAsString(AS: AllocType.getAddressSpace());
2697
2698 else if (getLangOpts().ObjCAutoRefCount) {
2699 if (const ArrayType *AT = Context.getAsArrayType(T: AllocType)) {
2700 QualType BaseAllocType = Context.getBaseElementType(VAT: AT);
2701 if (BaseAllocType.getObjCLifetime() == Qualifiers::OCL_None &&
2702 BaseAllocType->isObjCLifetimeType())
2703 return Diag(Loc, DiagID: diag::err_arc_new_array_without_ownership)
2704 << BaseAllocType;
2705 }
2706 }
2707
2708 return false;
2709}
2710
2711enum class ResolveMode { Typed, Untyped };
2712static bool resolveAllocationOverloadInterior(
2713 Sema &S, LookupResult &R, SourceRange Range, ResolveMode Mode,
2714 SmallVectorImpl<Expr *> &Args, AlignedAllocationMode &PassAlignment,
2715 FunctionDecl *&Operator, OverloadCandidateSet *AlignedCandidates,
2716 Expr *AlignArg, bool Diagnose) {
2717 unsigned NonTypeArgumentOffset = 0;
2718 if (Mode == ResolveMode::Typed) {
2719 ++NonTypeArgumentOffset;
2720 }
2721
2722 OverloadCandidateSet Candidates(R.getNameLoc(),
2723 OverloadCandidateSet::CSK_Normal);
2724 for (LookupResult::iterator Alloc = R.begin(), AllocEnd = R.end();
2725 Alloc != AllocEnd; ++Alloc) {
2726 // Even member operator new/delete are implicitly treated as
2727 // static, so don't use AddMemberCandidate.
2728 NamedDecl *D = (*Alloc)->getUnderlyingDecl();
2729 bool IsTypeAware = D->getAsFunction()->isTypeAwareOperatorNewOrDelete();
2730 if (IsTypeAware == (Mode != ResolveMode::Typed))
2731 continue;
2732
2733 if (FunctionTemplateDecl *FnTemplate = dyn_cast<FunctionTemplateDecl>(Val: D)) {
2734 S.AddTemplateOverloadCandidate(FunctionTemplate: FnTemplate, FoundDecl: Alloc.getPair(),
2735 /*ExplicitTemplateArgs=*/nullptr, Args,
2736 CandidateSet&: Candidates,
2737 /*SuppressUserConversions=*/false);
2738 continue;
2739 }
2740
2741 FunctionDecl *Fn = cast<FunctionDecl>(Val: D);
2742 S.AddOverloadCandidate(Function: Fn, FoundDecl: Alloc.getPair(), Args, CandidateSet&: Candidates,
2743 /*SuppressUserConversions=*/false);
2744 }
2745
2746 // Do the resolution.
2747 OverloadCandidateSet::iterator Best;
2748 switch (Candidates.BestViableFunction(S, Loc: R.getNameLoc(), Best)) {
2749 case OR_Success: {
2750 // Got one!
2751 FunctionDecl *FnDecl = Best->Function;
2752 if (S.CheckAllocationAccess(OperatorLoc: R.getNameLoc(), PlacementRange: Range, NamingClass: R.getNamingClass(),
2753 FoundDecl: Best->FoundDecl) == Sema::AR_inaccessible)
2754 return true;
2755
2756 Operator = FnDecl;
2757 return false;
2758 }
2759
2760 case OR_No_Viable_Function:
2761 // C++17 [expr.new]p13:
2762 // If no matching function is found and the allocated object type has
2763 // new-extended alignment, the alignment argument is removed from the
2764 // argument list, and overload resolution is performed again.
2765 if (isAlignedAllocation(Mode: PassAlignment)) {
2766 PassAlignment = AlignedAllocationMode::No;
2767 AlignArg = Args[NonTypeArgumentOffset + 1];
2768 Args.erase(CI: Args.begin() + NonTypeArgumentOffset + 1);
2769 return resolveAllocationOverloadInterior(S, R, Range, Mode, Args,
2770 PassAlignment, Operator,
2771 AlignedCandidates: &Candidates, AlignArg, Diagnose);
2772 }
2773
2774 // MSVC will fall back on trying to find a matching global operator new
2775 // if operator new[] cannot be found. Also, MSVC will leak by not
2776 // generating a call to operator delete or operator delete[], but we
2777 // will not replicate that bug.
2778 // FIXME: Find out how this interacts with the std::align_val_t fallback
2779 // once MSVC implements it.
2780 if (R.getLookupName().getCXXOverloadedOperator() == OO_Array_New &&
2781 S.Context.getLangOpts().MSVCCompat && Mode != ResolveMode::Typed) {
2782 R.clear();
2783 R.setLookupName(S.Context.DeclarationNames.getCXXOperatorName(Op: OO_New));
2784 S.LookupQualifiedName(R, LookupCtx: S.Context.getTranslationUnitDecl());
2785 // FIXME: This will give bad diagnostics pointing at the wrong functions.
2786 return resolveAllocationOverloadInterior(S, R, Range, Mode, Args,
2787 PassAlignment, Operator,
2788 /*Candidates=*/AlignedCandidates: nullptr,
2789 /*AlignArg=*/nullptr, Diagnose);
2790 }
2791 if (Mode == ResolveMode::Typed) {
2792 // If we can't find a matching type aware operator we don't consider this
2793 // a failure.
2794 Operator = nullptr;
2795 return false;
2796 }
2797 if (Diagnose) {
2798 // If this is an allocation of the form 'new (p) X' for some object
2799 // pointer p (or an expression that will decay to such a pointer),
2800 // diagnose the reason for the error.
2801 if (!R.isClassLookup() && Args.size() == 2 &&
2802 (Args[1]->getType()->isObjectPointerType() ||
2803 Args[1]->getType()->isArrayType())) {
2804 const QualType Arg1Type = Args[1]->getType();
2805 QualType UnderlyingType = S.Context.getBaseElementType(QT: Arg1Type);
2806 if (UnderlyingType->isPointerType())
2807 UnderlyingType = UnderlyingType->getPointeeType();
2808 if (UnderlyingType.isConstQualified()) {
2809 S.Diag(Loc: Args[1]->getExprLoc(),
2810 DiagID: diag::err_placement_new_into_const_qualified_storage)
2811 << Arg1Type << Args[1]->getSourceRange();
2812 return true;
2813 }
2814 S.Diag(Loc: R.getNameLoc(), DiagID: diag::err_need_header_before_placement_new)
2815 << R.getLookupName() << Range;
2816 // Listing the candidates is unlikely to be useful; skip it.
2817 return true;
2818 }
2819
2820 // Finish checking all candidates before we note any. This checking can
2821 // produce additional diagnostics so can't be interleaved with our
2822 // emission of notes.
2823 //
2824 // For an aligned allocation, separately check the aligned and unaligned
2825 // candidates with their respective argument lists.
2826 SmallVector<OverloadCandidate*, 32> Cands;
2827 SmallVector<OverloadCandidate*, 32> AlignedCands;
2828 llvm::SmallVector<Expr*, 4> AlignedArgs;
2829 if (AlignedCandidates) {
2830 auto IsAligned = [NonTypeArgumentOffset](OverloadCandidate &C) {
2831 auto AlignArgOffset = NonTypeArgumentOffset + 1;
2832 return C.Function->getNumParams() > AlignArgOffset &&
2833 C.Function->getParamDecl(i: AlignArgOffset)
2834 ->getType()
2835 ->isAlignValT();
2836 };
2837 auto IsUnaligned = [&](OverloadCandidate &C) { return !IsAligned(C); };
2838
2839 AlignedArgs.reserve(N: Args.size() + NonTypeArgumentOffset + 1);
2840 for (unsigned Idx = 0; Idx < NonTypeArgumentOffset + 1; ++Idx)
2841 AlignedArgs.push_back(Elt: Args[Idx]);
2842 AlignedArgs.push_back(Elt: AlignArg);
2843 AlignedArgs.append(in_start: Args.begin() + NonTypeArgumentOffset + 1,
2844 in_end: Args.end());
2845 AlignedCands = AlignedCandidates->CompleteCandidates(
2846 S, OCD: OCD_AllCandidates, Args: AlignedArgs, OpLoc: R.getNameLoc(), Filter: IsAligned);
2847
2848 Cands = Candidates.CompleteCandidates(S, OCD: OCD_AllCandidates, Args,
2849 OpLoc: R.getNameLoc(), Filter: IsUnaligned);
2850 } else {
2851 Cands = Candidates.CompleteCandidates(S, OCD: OCD_AllCandidates, Args,
2852 OpLoc: R.getNameLoc());
2853 }
2854
2855 S.Diag(Loc: R.getNameLoc(), DiagID: diag::err_ovl_no_viable_function_in_call)
2856 << R.getLookupName() << Range;
2857 if (AlignedCandidates)
2858 AlignedCandidates->NoteCandidates(S, Args: AlignedArgs, Cands: AlignedCands, Opc: "",
2859 OpLoc: R.getNameLoc());
2860 Candidates.NoteCandidates(S, Args, Cands, Opc: "", OpLoc: R.getNameLoc());
2861 }
2862 return true;
2863
2864 case OR_Ambiguous:
2865 if (Diagnose) {
2866 Candidates.NoteCandidates(
2867 PA: PartialDiagnosticAt(R.getNameLoc(),
2868 S.PDiag(DiagID: diag::err_ovl_ambiguous_call)
2869 << R.getLookupName() << Range),
2870 S, OCD: OCD_AmbiguousCandidates, Args);
2871 }
2872 return true;
2873
2874 case OR_Deleted: {
2875 if (Diagnose)
2876 S.DiagnoseUseOfDeletedFunction(Loc: R.getNameLoc(), Range, Name: R.getLookupName(),
2877 CandidateSet&: Candidates, Fn: Best->Function, Args);
2878 return true;
2879 }
2880 }
2881 llvm_unreachable("Unreachable, bad result from BestViableFunction");
2882}
2883
2884enum class DeallocLookupMode { Untyped, OptionallyTyped };
2885
2886static void LookupGlobalDeallocationFunctions(Sema &S, SourceLocation Loc,
2887 LookupResult &FoundDelete,
2888 DeallocLookupMode Mode,
2889 DeclarationName Name) {
2890 S.LookupQualifiedName(R&: FoundDelete, LookupCtx: S.Context.getTranslationUnitDecl());
2891 if (Mode != DeallocLookupMode::OptionallyTyped) {
2892 // We're going to remove either the typed or the non-typed
2893 bool RemoveTypedDecl = Mode == DeallocLookupMode::Untyped;
2894 LookupResult::Filter Filter = FoundDelete.makeFilter();
2895 while (Filter.hasNext()) {
2896 FunctionDecl *FD = Filter.next()->getUnderlyingDecl()->getAsFunction();
2897 if (FD->isTypeAwareOperatorNewOrDelete() == RemoveTypedDecl)
2898 Filter.erase();
2899 }
2900 Filter.done();
2901 }
2902}
2903
2904static bool resolveAllocationOverload(
2905 Sema &S, LookupResult &R, SourceRange Range, SmallVectorImpl<Expr *> &Args,
2906 ImplicitAllocationParameters &IAP, FunctionDecl *&Operator,
2907 OverloadCandidateSet *AlignedCandidates, Expr *AlignArg, bool Diagnose) {
2908 Operator = nullptr;
2909 if (isTypeAwareAllocation(Mode: IAP.PassTypeIdentity)) {
2910 assert(S.isStdTypeIdentity(Args[0]->getType(), nullptr));
2911 // The internal overload resolution work mutates the argument list
2912 // in accordance with the spec. We may want to change that in future,
2913 // but for now we deal with this by making a copy of the non-type-identity
2914 // arguments.
2915 SmallVector<Expr *> UntypedParameters;
2916 UntypedParameters.reserve(N: Args.size() - 1);
2917 UntypedParameters.push_back(Elt: Args[1]);
2918 // Type aware allocation implicitly includes the alignment parameter so
2919 // only include it in the untyped parameter list if alignment was explicitly
2920 // requested
2921 if (isAlignedAllocation(Mode: IAP.PassAlignment))
2922 UntypedParameters.push_back(Elt: Args[2]);
2923 UntypedParameters.append(in_start: Args.begin() + 3, in_end: Args.end());
2924
2925 AlignedAllocationMode InitialAlignmentMode = IAP.PassAlignment;
2926 IAP.PassAlignment = AlignedAllocationMode::Yes;
2927 if (resolveAllocationOverloadInterior(
2928 S, R, Range, Mode: ResolveMode::Typed, Args, PassAlignment&: IAP.PassAlignment, Operator,
2929 AlignedCandidates, AlignArg, Diagnose))
2930 return true;
2931 if (Operator)
2932 return false;
2933
2934 // If we got to this point we could not find a matching typed operator
2935 // so we update the IAP flags, and revert to our stored copy of the
2936 // type-identity-less argument list.
2937 IAP.PassTypeIdentity = TypeAwareAllocationMode::No;
2938 IAP.PassAlignment = InitialAlignmentMode;
2939 Args = std::move(UntypedParameters);
2940 }
2941 assert(!S.isStdTypeIdentity(Args[0]->getType(), nullptr));
2942 return resolveAllocationOverloadInterior(
2943 S, R, Range, Mode: ResolveMode::Untyped, Args, PassAlignment&: IAP.PassAlignment, Operator,
2944 AlignedCandidates, AlignArg, Diagnose);
2945}
2946
2947bool Sema::FindAllocationFunctions(
2948 SourceLocation StartLoc, SourceRange Range,
2949 AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope,
2950 QualType AllocType, bool IsArray, ImplicitAllocationParameters &IAP,
2951 MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew,
2952 FunctionDecl *&OperatorDelete, bool Diagnose) {
2953 // --- Choosing an allocation function ---
2954 // C++ 5.3.4p8 - 14 & 18
2955 // 1) If looking in AllocationFunctionScope::Global scope for allocation
2956 // functions, only look in
2957 // the global scope. Else, if AllocationFunctionScope::Class, only look in
2958 // the scope of the allocated class. If AllocationFunctionScope::Both, look
2959 // in both.
2960 // 2) If an array size is given, look for operator new[], else look for
2961 // operator new.
2962 // 3) The first argument is always size_t. Append the arguments from the
2963 // placement form.
2964
2965 SmallVector<Expr*, 8> AllocArgs;
2966 AllocArgs.reserve(N: IAP.getNumImplicitArgs() + PlaceArgs.size());
2967
2968 // C++ [expr.new]p8:
2969 // If the allocated type is a non-array type, the allocation
2970 // function's name is operator new and the deallocation function's
2971 // name is operator delete. If the allocated type is an array
2972 // type, the allocation function's name is operator new[] and the
2973 // deallocation function's name is operator delete[].
2974 DeclarationName NewName = Context.DeclarationNames.getCXXOperatorName(
2975 Op: IsArray ? OO_Array_New : OO_New);
2976
2977 QualType AllocElemType = Context.getBaseElementType(QT: AllocType);
2978
2979 // We don't care about the actual value of these arguments.
2980 // FIXME: Should the Sema create the expression and embed it in the syntax
2981 // tree? Or should the consumer just recalculate the value?
2982 // FIXME: Using a dummy value will interact poorly with attribute enable_if.
2983
2984 // We use size_t as a stand in so that we can construct the init
2985 // expr on the stack
2986 QualType TypeIdentity = Context.getSizeType();
2987 if (isTypeAwareAllocation(Mode: IAP.PassTypeIdentity)) {
2988 QualType SpecializedTypeIdentity =
2989 tryBuildStdTypeIdentity(Type: IAP.Type, Loc: StartLoc);
2990 if (!SpecializedTypeIdentity.isNull()) {
2991 TypeIdentity = SpecializedTypeIdentity;
2992 if (RequireCompleteType(Loc: StartLoc, T: TypeIdentity,
2993 DiagID: diag::err_incomplete_type))
2994 return true;
2995 } else
2996 IAP.PassTypeIdentity = TypeAwareAllocationMode::No;
2997 }
2998 TypeAwareAllocationMode OriginalTypeAwareState = IAP.PassTypeIdentity;
2999
3000 CXXScalarValueInitExpr TypeIdentityParam(TypeIdentity, nullptr, StartLoc);
3001 if (isTypeAwareAllocation(Mode: IAP.PassTypeIdentity))
3002 AllocArgs.push_back(Elt: &TypeIdentityParam);
3003
3004 QualType SizeTy = Context.getSizeType();
3005 unsigned SizeTyWidth = Context.getTypeSize(T: SizeTy);
3006 IntegerLiteral Size(Context, llvm::APInt::getZero(numBits: SizeTyWidth), SizeTy,
3007 SourceLocation());
3008 AllocArgs.push_back(Elt: &Size);
3009
3010 QualType AlignValT = Context.VoidTy;
3011 bool IncludeAlignParam = isAlignedAllocation(Mode: IAP.PassAlignment) ||
3012 isTypeAwareAllocation(Mode: IAP.PassTypeIdentity);
3013 if (IncludeAlignParam) {
3014 DeclareGlobalNewDelete();
3015 AlignValT = Context.getCanonicalTagType(TD: getStdAlignValT());
3016 }
3017 CXXScalarValueInitExpr Align(AlignValT, nullptr, SourceLocation());
3018 if (IncludeAlignParam)
3019 AllocArgs.push_back(Elt: &Align);
3020
3021 llvm::append_range(C&: AllocArgs, R&: PlaceArgs);
3022
3023 // Find the allocation function.
3024 {
3025 LookupResult R(*this, NewName, StartLoc, LookupOrdinaryName);
3026
3027 // C++1z [expr.new]p9:
3028 // If the new-expression begins with a unary :: operator, the allocation
3029 // function's name is looked up in the global scope. Otherwise, if the
3030 // allocated type is a class type T or array thereof, the allocation
3031 // function's name is looked up in the scope of T.
3032 if (AllocElemType->isRecordType() &&
3033 NewScope != AllocationFunctionScope::Global)
3034 LookupQualifiedName(R, LookupCtx: AllocElemType->getAsCXXRecordDecl());
3035
3036 // We can see ambiguity here if the allocation function is found in
3037 // multiple base classes.
3038 if (R.isAmbiguous())
3039 return true;
3040
3041 // If this lookup fails to find the name, or if the allocated type is not
3042 // a class type, the allocation function's name is looked up in the
3043 // global scope.
3044 if (R.empty()) {
3045 if (NewScope == AllocationFunctionScope::Class)
3046 return true;
3047
3048 LookupQualifiedName(R, LookupCtx: Context.getTranslationUnitDecl());
3049 }
3050
3051 if (getLangOpts().OpenCLCPlusPlus && R.empty()) {
3052 if (PlaceArgs.empty()) {
3053 Diag(Loc: StartLoc, DiagID: diag::err_openclcxx_not_supported) << "default new";
3054 } else {
3055 Diag(Loc: StartLoc, DiagID: diag::err_openclcxx_placement_new);
3056 }
3057 return true;
3058 }
3059
3060 assert(!R.empty() && "implicitly declared allocation functions not found");
3061 assert(!R.isAmbiguous() && "global allocation functions are ambiguous");
3062
3063 // We do our own custom access checks below.
3064 R.suppressDiagnostics();
3065
3066 if (resolveAllocationOverload(S&: *this, R, Range, Args&: AllocArgs, IAP, Operator&: OperatorNew,
3067 /*Candidates=*/AlignedCandidates: nullptr,
3068 /*AlignArg=*/nullptr, Diagnose))
3069 return true;
3070 }
3071
3072 // We don't need an operator delete if we're running under -fno-exceptions.
3073 if (!getLangOpts().Exceptions) {
3074 OperatorDelete = nullptr;
3075 return false;
3076 }
3077
3078 // Note, the name of OperatorNew might have been changed from array to
3079 // non-array by resolveAllocationOverload.
3080 DeclarationName DeleteName = Context.DeclarationNames.getCXXOperatorName(
3081 Op: OperatorNew->getDeclName().getCXXOverloadedOperator() == OO_Array_New
3082 ? OO_Array_Delete
3083 : OO_Delete);
3084
3085 // C++ [expr.new]p19:
3086 //
3087 // If the new-expression begins with a unary :: operator, the
3088 // deallocation function's name is looked up in the global
3089 // scope. Otherwise, if the allocated type is a class type T or an
3090 // array thereof, the deallocation function's name is looked up in
3091 // the scope of T. If this lookup fails to find the name, or if
3092 // the allocated type is not a class type or array thereof, the
3093 // deallocation function's name is looked up in the global scope.
3094 LookupResult FoundDelete(*this, DeleteName, StartLoc, LookupOrdinaryName);
3095 if (AllocElemType->isRecordType() &&
3096 DeleteScope != AllocationFunctionScope::Global) {
3097 auto *RD = AllocElemType->castAsCXXRecordDecl();
3098 LookupQualifiedName(R&: FoundDelete, LookupCtx: RD);
3099 }
3100 if (FoundDelete.isAmbiguous())
3101 return true; // FIXME: clean up expressions?
3102
3103 // Filter out any destroying operator deletes. We can't possibly call such a
3104 // function in this context, because we're handling the case where the object
3105 // was not successfully constructed.
3106 // FIXME: This is not covered by the language rules yet.
3107 {
3108 LookupResult::Filter Filter = FoundDelete.makeFilter();
3109 while (Filter.hasNext()) {
3110 auto *FD = dyn_cast<FunctionDecl>(Val: Filter.next()->getUnderlyingDecl());
3111 if (FD && FD->isDestroyingOperatorDelete())
3112 Filter.erase();
3113 }
3114 Filter.done();
3115 }
3116
3117 auto GetRedeclContext = [](Decl *D) {
3118 return D->getDeclContext()->getRedeclContext();
3119 };
3120
3121 DeclContext *OperatorNewContext = GetRedeclContext(OperatorNew);
3122
3123 bool FoundGlobalDelete = FoundDelete.empty();
3124 bool IsClassScopedTypeAwareNew =
3125 isTypeAwareAllocation(Mode: IAP.PassTypeIdentity) &&
3126 OperatorNewContext->isRecord();
3127 auto DiagnoseMissingTypeAwareCleanupOperator = [&](bool IsPlacementOperator) {
3128 assert(isTypeAwareAllocation(IAP.PassTypeIdentity));
3129 if (Diagnose) {
3130 Diag(Loc: StartLoc, DiagID: diag::err_mismatching_type_aware_cleanup_deallocator)
3131 << OperatorNew->getDeclName() << IsPlacementOperator << DeleteName;
3132 Diag(Loc: OperatorNew->getLocation(), DiagID: diag::note_type_aware_operator_declared)
3133 << OperatorNew->isTypeAwareOperatorNewOrDelete()
3134 << OperatorNew->getDeclName() << OperatorNewContext;
3135 }
3136 };
3137 if (IsClassScopedTypeAwareNew && FoundDelete.empty()) {
3138 DiagnoseMissingTypeAwareCleanupOperator(/*isPlacementNew=*/false);
3139 return true;
3140 }
3141 if (FoundDelete.empty()) {
3142 FoundDelete.clear(Kind: LookupOrdinaryName);
3143
3144 if (DeleteScope == AllocationFunctionScope::Class)
3145 return true;
3146
3147 DeclareGlobalNewDelete();
3148 DeallocLookupMode LookupMode = isTypeAwareAllocation(Mode: OriginalTypeAwareState)
3149 ? DeallocLookupMode::OptionallyTyped
3150 : DeallocLookupMode::Untyped;
3151 LookupGlobalDeallocationFunctions(S&: *this, Loc: StartLoc, FoundDelete, Mode: LookupMode,
3152 Name: DeleteName);
3153 }
3154
3155 FoundDelete.suppressDiagnostics();
3156
3157 SmallVector<std::pair<DeclAccessPair,FunctionDecl*>, 2> Matches;
3158
3159 // Whether we're looking for a placement operator delete is dictated
3160 // by whether we selected a placement operator new, not by whether
3161 // we had explicit placement arguments. This matters for things like
3162 // struct A { void *operator new(size_t, int = 0); ... };
3163 // A *a = new A()
3164 //
3165 // We don't have any definition for what a "placement allocation function"
3166 // is, but we assume it's any allocation function whose
3167 // parameter-declaration-clause is anything other than (size_t).
3168 //
3169 // FIXME: Should (size_t, std::align_val_t) also be considered non-placement?
3170 // This affects whether an exception from the constructor of an overaligned
3171 // type uses the sized or non-sized form of aligned operator delete.
3172
3173 unsigned NonPlacementNewArgCount = 1; // size parameter
3174 if (isTypeAwareAllocation(Mode: IAP.PassTypeIdentity))
3175 NonPlacementNewArgCount =
3176 /* type-identity */ 1 + /* size */ 1 + /* alignment */ 1;
3177 bool isPlacementNew = !PlaceArgs.empty() ||
3178 OperatorNew->param_size() != NonPlacementNewArgCount ||
3179 OperatorNew->isVariadic();
3180
3181 if (isPlacementNew) {
3182 // C++ [expr.new]p20:
3183 // A declaration of a placement deallocation function matches the
3184 // declaration of a placement allocation function if it has the
3185 // same number of parameters and, after parameter transformations
3186 // (8.3.5), all parameter types except the first are
3187 // identical. [...]
3188 //
3189 // To perform this comparison, we compute the function type that
3190 // the deallocation function should have, and use that type both
3191 // for template argument deduction and for comparison purposes.
3192 QualType ExpectedFunctionType;
3193 {
3194 auto *Proto = OperatorNew->getType()->castAs<FunctionProtoType>();
3195
3196 SmallVector<QualType, 6> ArgTypes;
3197 int InitialParamOffset = 0;
3198 if (isTypeAwareAllocation(Mode: IAP.PassTypeIdentity)) {
3199 ArgTypes.push_back(Elt: TypeIdentity);
3200 InitialParamOffset = 1;
3201 }
3202 ArgTypes.push_back(Elt: Context.VoidPtrTy);
3203 for (unsigned I = ArgTypes.size() - InitialParamOffset,
3204 N = Proto->getNumParams();
3205 I < N; ++I)
3206 ArgTypes.push_back(Elt: Proto->getParamType(i: I));
3207
3208 FunctionProtoType::ExtProtoInfo EPI;
3209 // FIXME: This is not part of the standard's rule.
3210 EPI.Variadic = Proto->isVariadic();
3211
3212 ExpectedFunctionType
3213 = Context.getFunctionType(ResultTy: Context.VoidTy, Args: ArgTypes, EPI);
3214 }
3215
3216 for (LookupResult::iterator D = FoundDelete.begin(),
3217 DEnd = FoundDelete.end();
3218 D != DEnd; ++D) {
3219 FunctionDecl *Fn = nullptr;
3220 if (FunctionTemplateDecl *FnTmpl =
3221 dyn_cast<FunctionTemplateDecl>(Val: (*D)->getUnderlyingDecl())) {
3222 // Perform template argument deduction to try to match the
3223 // expected function type.
3224 TemplateDeductionInfo Info(StartLoc);
3225 if (DeduceTemplateArguments(FunctionTemplate: FnTmpl, ExplicitTemplateArgs: nullptr, ArgFunctionType: ExpectedFunctionType, Specialization&: Fn,
3226 Info) != TemplateDeductionResult::Success)
3227 continue;
3228 } else
3229 Fn = cast<FunctionDecl>(Val: (*D)->getUnderlyingDecl());
3230
3231 if (Context.hasSameType(T1: adjustCCAndNoReturn(ArgFunctionType: Fn->getType(),
3232 FunctionType: ExpectedFunctionType,
3233 /*AdjustExcpetionSpec*/AdjustExceptionSpec: true),
3234 T2: ExpectedFunctionType))
3235 Matches.push_back(Elt: std::make_pair(x: D.getPair(), y&: Fn));
3236 }
3237
3238 if (getLangOpts().CUDA)
3239 CUDA().EraseUnwantedMatches(Caller: getCurFunctionDecl(/*AllowLambda=*/true),
3240 Matches);
3241 if (Matches.empty() && isTypeAwareAllocation(Mode: IAP.PassTypeIdentity)) {
3242 DiagnoseMissingTypeAwareCleanupOperator(isPlacementNew);
3243 return true;
3244 }
3245 } else {
3246 // C++1y [expr.new]p22:
3247 // For a non-placement allocation function, the normal deallocation
3248 // function lookup is used
3249 //
3250 // Per [expr.delete]p10, this lookup prefers a member operator delete
3251 // without a size_t argument, but prefers a non-member operator delete
3252 // with a size_t where possible (which it always is in this case).
3253 llvm::SmallVector<UsualDeallocFnInfo, 4> BestDeallocFns;
3254 ImplicitDeallocationParameters IDP = {
3255 AllocElemType, OriginalTypeAwareState,
3256 alignedAllocationModeFromBool(
3257 IsAligned: hasNewExtendedAlignment(S&: *this, AllocType: AllocElemType)),
3258 sizedDeallocationModeFromBool(IsSized: FoundGlobalDelete)};
3259 UsualDeallocFnInfo Selected = resolveDeallocationOverload(
3260 S&: *this, R&: FoundDelete, IDP, Loc: StartLoc, BestFns: &BestDeallocFns);
3261 if (Selected && BestDeallocFns.empty())
3262 Matches.push_back(Elt: std::make_pair(x&: Selected.Found, y&: Selected.FD));
3263 else {
3264 // If we failed to select an operator, all remaining functions are viable
3265 // but ambiguous.
3266 for (auto Fn : BestDeallocFns)
3267 Matches.push_back(Elt: std::make_pair(x&: Fn.Found, y&: Fn.FD));
3268 }
3269 }
3270
3271 // C++ [expr.new]p20:
3272 // [...] If the lookup finds a single matching deallocation
3273 // function, that function will be called; otherwise, no
3274 // deallocation function will be called.
3275 if (Matches.size() == 1) {
3276 OperatorDelete = Matches[0].second;
3277 DeclContext *OperatorDeleteContext = GetRedeclContext(OperatorDelete);
3278 bool FoundTypeAwareOperator =
3279 OperatorDelete->isTypeAwareOperatorNewOrDelete() ||
3280 OperatorNew->isTypeAwareOperatorNewOrDelete();
3281 if (Diagnose && FoundTypeAwareOperator) {
3282 bool MismatchedTypeAwareness =
3283 OperatorDelete->isTypeAwareOperatorNewOrDelete() !=
3284 OperatorNew->isTypeAwareOperatorNewOrDelete();
3285 bool MismatchedContext = OperatorDeleteContext != OperatorNewContext;
3286 if (MismatchedTypeAwareness || MismatchedContext) {
3287 FunctionDecl *Operators[] = {OperatorDelete, OperatorNew};
3288 bool TypeAwareOperatorIndex =
3289 OperatorNew->isTypeAwareOperatorNewOrDelete();
3290 Diag(Loc: StartLoc, DiagID: diag::err_mismatching_type_aware_cleanup_deallocator)
3291 << Operators[TypeAwareOperatorIndex]->getDeclName()
3292 << isPlacementNew
3293 << Operators[!TypeAwareOperatorIndex]->getDeclName()
3294 << GetRedeclContext(Operators[TypeAwareOperatorIndex]);
3295 Diag(Loc: OperatorNew->getLocation(),
3296 DiagID: diag::note_type_aware_operator_declared)
3297 << OperatorNew->isTypeAwareOperatorNewOrDelete()
3298 << OperatorNew->getDeclName() << OperatorNewContext;
3299 Diag(Loc: OperatorDelete->getLocation(),
3300 DiagID: diag::note_type_aware_operator_declared)
3301 << OperatorDelete->isTypeAwareOperatorNewOrDelete()
3302 << OperatorDelete->getDeclName() << OperatorDeleteContext;
3303 }
3304 }
3305
3306 // C++1z [expr.new]p23:
3307 // If the lookup finds a usual deallocation function (3.7.4.2)
3308 // with a parameter of type std::size_t and that function, considered
3309 // as a placement deallocation function, would have been
3310 // selected as a match for the allocation function, the program
3311 // is ill-formed.
3312 if (getLangOpts().CPlusPlus11 && isPlacementNew &&
3313 isNonPlacementDeallocationFunction(S&: *this, FD: OperatorDelete)) {
3314 UsualDeallocFnInfo Info(*this,
3315 DeclAccessPair::make(D: OperatorDelete, AS: AS_public),
3316 AllocElemType, StartLoc);
3317 // Core issue, per mail to core reflector, 2016-10-09:
3318 // If this is a member operator delete, and there is a corresponding
3319 // non-sized member operator delete, this isn't /really/ a sized
3320 // deallocation function, it just happens to have a size_t parameter.
3321 bool IsSizedDelete = isSizedDeallocation(Mode: Info.IDP.PassSize);
3322 if (IsSizedDelete && !FoundGlobalDelete) {
3323 ImplicitDeallocationParameters SizeTestingIDP = {
3324 AllocElemType, Info.IDP.PassTypeIdentity, Info.IDP.PassAlignment,
3325 SizedDeallocationMode::No};
3326 auto NonSizedDelete = resolveDeallocationOverload(
3327 S&: *this, R&: FoundDelete, IDP: SizeTestingIDP, Loc: StartLoc);
3328 if (NonSizedDelete &&
3329 !isSizedDeallocation(Mode: NonSizedDelete.IDP.PassSize) &&
3330 NonSizedDelete.IDP.PassAlignment == Info.IDP.PassAlignment)
3331 IsSizedDelete = false;
3332 }
3333
3334 if (IsSizedDelete && !isTypeAwareAllocation(Mode: IAP.PassTypeIdentity)) {
3335 SourceRange R = PlaceArgs.empty()
3336 ? SourceRange()
3337 : SourceRange(PlaceArgs.front()->getBeginLoc(),
3338 PlaceArgs.back()->getEndLoc());
3339 Diag(Loc: StartLoc, DiagID: diag::err_placement_new_non_placement_delete) << R;
3340 if (!OperatorDelete->isImplicit())
3341 Diag(Loc: OperatorDelete->getLocation(), DiagID: diag::note_previous_decl)
3342 << DeleteName;
3343 }
3344 }
3345 if (CheckDeleteOperator(S&: *this, StartLoc, Range, Diagnose,
3346 NamingClass: FoundDelete.getNamingClass(), Decl: Matches[0].first,
3347 Operator: Matches[0].second))
3348 return true;
3349
3350 } else if (!Matches.empty()) {
3351 // We found multiple suitable operators. Per [expr.new]p20, that means we
3352 // call no 'operator delete' function, but we should at least warn the user.
3353 // FIXME: Suppress this warning if the construction cannot throw.
3354 Diag(Loc: StartLoc, DiagID: diag::warn_ambiguous_suitable_delete_function_found)
3355 << DeleteName << AllocElemType;
3356
3357 for (auto &Match : Matches)
3358 Diag(Loc: Match.second->getLocation(),
3359 DiagID: diag::note_member_declared_here) << DeleteName;
3360 }
3361
3362 return false;
3363}
3364
3365void Sema::DeclareGlobalNewDelete() {
3366 if (GlobalNewDeleteDeclared)
3367 return;
3368
3369 // The implicitly declared new and delete operators
3370 // are not supported in OpenCL.
3371 if (getLangOpts().OpenCLCPlusPlus)
3372 return;
3373
3374 // C++ [basic.stc.dynamic.general]p2:
3375 // The library provides default definitions for the global allocation
3376 // and deallocation functions. Some global allocation and deallocation
3377 // functions are replaceable ([new.delete]); these are attached to the
3378 // global module ([module.unit]).
3379 if (getLangOpts().CPlusPlusModules && getCurrentModule())
3380 PushGlobalModuleFragment(BeginLoc: SourceLocation());
3381
3382 // C++ [basic.std.dynamic]p2:
3383 // [...] The following allocation and deallocation functions (18.4) are
3384 // implicitly declared in global scope in each translation unit of a
3385 // program
3386 //
3387 // C++03:
3388 // void* operator new(std::size_t) throw(std::bad_alloc);
3389 // void* operator new[](std::size_t) throw(std::bad_alloc);
3390 // void operator delete(void*) throw();
3391 // void operator delete[](void*) throw();
3392 // C++11:
3393 // void* operator new(std::size_t);
3394 // void* operator new[](std::size_t);
3395 // void operator delete(void*) noexcept;
3396 // void operator delete[](void*) noexcept;
3397 // C++1y:
3398 // void* operator new(std::size_t);
3399 // void* operator new[](std::size_t);
3400 // void operator delete(void*) noexcept;
3401 // void operator delete[](void*) noexcept;
3402 // void operator delete(void*, std::size_t) noexcept;
3403 // void operator delete[](void*, std::size_t) noexcept;
3404 //
3405 // These implicit declarations introduce only the function names operator
3406 // new, operator new[], operator delete, operator delete[].
3407 //
3408 // Here, we need to refer to std::bad_alloc, so we will implicitly declare
3409 // "std" or "bad_alloc" as necessary to form the exception specification.
3410 // However, we do not make these implicit declarations visible to name
3411 // lookup.
3412 if (!StdBadAlloc && !getLangOpts().CPlusPlus11) {
3413 // The "std::bad_alloc" class has not yet been declared, so build it
3414 // implicitly.
3415 StdBadAlloc = CXXRecordDecl::Create(
3416 C: Context, TK: TagTypeKind::Class, DC: getOrCreateStdNamespace(),
3417 StartLoc: SourceLocation(), IdLoc: SourceLocation(),
3418 Id: &PP.getIdentifierTable().get(Name: "bad_alloc"), PrevDecl: nullptr);
3419 getStdBadAlloc()->setImplicit(true);
3420
3421 // The implicitly declared "std::bad_alloc" should live in global module
3422 // fragment.
3423 if (TheGlobalModuleFragment) {
3424 getStdBadAlloc()->setModuleOwnershipKind(
3425 Decl::ModuleOwnershipKind::ReachableWhenImported);
3426 getStdBadAlloc()->setLocalOwningModule(TheGlobalModuleFragment);
3427 }
3428 }
3429 if (!StdAlignValT && getLangOpts().AlignedAllocation) {
3430 // The "std::align_val_t" enum class has not yet been declared, so build it
3431 // implicitly.
3432 auto *AlignValT = EnumDecl::Create(
3433 C&: Context, DC: getOrCreateStdNamespace(), StartLoc: SourceLocation(), IdLoc: SourceLocation(),
3434 Id: &PP.getIdentifierTable().get(Name: "align_val_t"), PrevDecl: nullptr, IsScoped: true, IsScopedUsingClassTag: true, IsFixed: true);
3435
3436 // The implicitly declared "std::align_val_t" should live in global module
3437 // fragment.
3438 if (TheGlobalModuleFragment) {
3439 AlignValT->setModuleOwnershipKind(
3440 Decl::ModuleOwnershipKind::ReachableWhenImported);
3441 AlignValT->setLocalOwningModule(TheGlobalModuleFragment);
3442 }
3443
3444 AlignValT->setIntegerType(Context.getSizeType());
3445 AlignValT->setPromotionType(Context.getSizeType());
3446 AlignValT->setImplicit(true);
3447
3448 StdAlignValT = AlignValT;
3449 }
3450
3451 GlobalNewDeleteDeclared = true;
3452
3453 QualType VoidPtr = Context.getPointerType(T: Context.VoidTy);
3454 QualType SizeT = Context.getSizeType();
3455
3456 auto DeclareGlobalAllocationFunctions = [&](OverloadedOperatorKind Kind,
3457 QualType Return, QualType Param) {
3458 llvm::SmallVector<QualType, 3> Params;
3459 Params.push_back(Elt: Param);
3460
3461 // Create up to four variants of the function (sized/aligned).
3462 bool HasSizedVariant = getLangOpts().SizedDeallocation &&
3463 (Kind == OO_Delete || Kind == OO_Array_Delete);
3464 bool HasAlignedVariant = getLangOpts().AlignedAllocation;
3465
3466 int NumSizeVariants = (HasSizedVariant ? 2 : 1);
3467 int NumAlignVariants = (HasAlignedVariant ? 2 : 1);
3468 for (int Sized = 0; Sized < NumSizeVariants; ++Sized) {
3469 if (Sized)
3470 Params.push_back(Elt: SizeT);
3471
3472 for (int Aligned = 0; Aligned < NumAlignVariants; ++Aligned) {
3473 if (Aligned)
3474 Params.push_back(Elt: Context.getCanonicalTagType(TD: getStdAlignValT()));
3475
3476 DeclareGlobalAllocationFunction(
3477 Name: Context.DeclarationNames.getCXXOperatorName(Op: Kind), Return, Params);
3478
3479 if (Aligned)
3480 Params.pop_back();
3481 }
3482 }
3483 };
3484
3485 DeclareGlobalAllocationFunctions(OO_New, VoidPtr, SizeT);
3486 DeclareGlobalAllocationFunctions(OO_Array_New, VoidPtr, SizeT);
3487 DeclareGlobalAllocationFunctions(OO_Delete, Context.VoidTy, VoidPtr);
3488 DeclareGlobalAllocationFunctions(OO_Array_Delete, Context.VoidTy, VoidPtr);
3489
3490 if (getLangOpts().CPlusPlusModules && getCurrentModule())
3491 PopGlobalModuleFragment();
3492}
3493
3494/// DeclareGlobalAllocationFunction - Declares a single implicit global
3495/// allocation function if it doesn't already exist.
3496void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
3497 QualType Return,
3498 ArrayRef<QualType> Params) {
3499 DeclContext *GlobalCtx = Context.getTranslationUnitDecl();
3500
3501 // Check if this function is already declared.
3502 DeclContext::lookup_result R = GlobalCtx->lookup(Name);
3503 for (DeclContext::lookup_iterator Alloc = R.begin(), AllocEnd = R.end();
3504 Alloc != AllocEnd; ++Alloc) {
3505 // Only look at non-template functions, as it is the predefined,
3506 // non-templated allocation function we are trying to declare here.
3507 if (FunctionDecl *Func = dyn_cast<FunctionDecl>(Val: *Alloc)) {
3508 if (Func->getNumParams() == Params.size()) {
3509 if (std::equal(first1: Func->param_begin(), last1: Func->param_end(), first2: Params.begin(),
3510 last2: Params.end(), binary_pred: [&](ParmVarDecl *D, QualType RT) {
3511 return Context.hasSameUnqualifiedType(T1: D->getType(),
3512 T2: RT);
3513 })) {
3514 // Make the function visible to name lookup, even if we found it in
3515 // an unimported module. It either is an implicitly-declared global
3516 // allocation function, or is suppressing that function.
3517 Func->setVisibleDespiteOwningModule();
3518 return;
3519 }
3520 }
3521 }
3522 }
3523
3524 FunctionProtoType::ExtProtoInfo EPI(
3525 Context.getTargetInfo().getDefaultCallingConv());
3526
3527 QualType BadAllocType;
3528 bool HasBadAllocExceptionSpec = Name.isAnyOperatorNew();
3529 if (HasBadAllocExceptionSpec) {
3530 if (!getLangOpts().CPlusPlus11) {
3531 BadAllocType = Context.getCanonicalTagType(TD: getStdBadAlloc());
3532 assert(StdBadAlloc && "Must have std::bad_alloc declared");
3533 EPI.ExceptionSpec.Type = EST_Dynamic;
3534 EPI.ExceptionSpec.Exceptions = llvm::ArrayRef(BadAllocType);
3535 }
3536 if (getLangOpts().NewInfallible) {
3537 EPI.ExceptionSpec.Type = EST_DynamicNone;
3538 }
3539 } else {
3540 EPI.ExceptionSpec =
3541 getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone;
3542 }
3543
3544 auto CreateAllocationFunctionDecl = [&](Attr *ExtraAttr) {
3545 // The MSVC STL has explicit cdecl on its (host-side) allocation function
3546 // specializations for the allocation, so in order to prevent a CC clash
3547 // we use the host's CC, if available, or CC_C as a fallback, for the
3548 // host-side implicit decls, knowing these do not get emitted when compiling
3549 // for device.
3550 if (getLangOpts().CUDAIsDevice && ExtraAttr &&
3551 isa<CUDAHostAttr>(Val: ExtraAttr) &&
3552 Context.getTargetInfo().getTriple().isSPIRV()) {
3553 if (auto *ATI = Context.getAuxTargetInfo())
3554 EPI.ExtInfo = EPI.ExtInfo.withCallingConv(cc: ATI->getDefaultCallingConv());
3555 else
3556 EPI.ExtInfo = EPI.ExtInfo.withCallingConv(cc: CallingConv::CC_C);
3557 }
3558 QualType FnType = Context.getFunctionType(ResultTy: Return, Args: Params, EPI);
3559 FunctionDecl *Alloc = FunctionDecl::Create(
3560 C&: Context, DC: GlobalCtx, StartLoc: SourceLocation(), NLoc: SourceLocation(), N: Name, T: FnType,
3561 /*TInfo=*/nullptr, SC: SC_None, UsesFPIntrin: getCurFPFeatures().isFPConstrained(), isInlineSpecified: false,
3562 hasWrittenPrototype: true);
3563 Alloc->setImplicit();
3564 // Global allocation functions should always be visible.
3565 Alloc->setVisibleDespiteOwningModule();
3566
3567 if (HasBadAllocExceptionSpec && getLangOpts().NewInfallible &&
3568 !getLangOpts().CheckNew)
3569 Alloc->addAttr(
3570 A: ReturnsNonNullAttr::CreateImplicit(Ctx&: Context, Range: Alloc->getLocation()));
3571
3572 // C++ [basic.stc.dynamic.general]p2:
3573 // The library provides default definitions for the global allocation
3574 // and deallocation functions. Some global allocation and deallocation
3575 // functions are replaceable ([new.delete]); these are attached to the
3576 // global module ([module.unit]).
3577 //
3578 // In the language wording, these functions are attched to the global
3579 // module all the time. But in the implementation, the global module
3580 // is only meaningful when we're in a module unit. So here we attach
3581 // these allocation functions to global module conditionally.
3582 if (TheGlobalModuleFragment) {
3583 Alloc->setModuleOwnershipKind(
3584 Decl::ModuleOwnershipKind::ReachableWhenImported);
3585 Alloc->setLocalOwningModule(TheGlobalModuleFragment);
3586 }
3587
3588 if (LangOpts.hasGlobalAllocationFunctionVisibility())
3589 Alloc->addAttr(A: VisibilityAttr::CreateImplicit(
3590 Ctx&: Context, Visibility: LangOpts.hasHiddenGlobalAllocationFunctionVisibility()
3591 ? VisibilityAttr::Hidden
3592 : LangOpts.hasProtectedGlobalAllocationFunctionVisibility()
3593 ? VisibilityAttr::Protected
3594 : VisibilityAttr::Default));
3595
3596 llvm::SmallVector<ParmVarDecl *, 3> ParamDecls;
3597 for (QualType T : Params) {
3598 ParamDecls.push_back(Elt: ParmVarDecl::Create(
3599 C&: Context, DC: Alloc, StartLoc: SourceLocation(), IdLoc: SourceLocation(), Id: nullptr, T,
3600 /*TInfo=*/nullptr, S: SC_None, DefArg: nullptr));
3601 ParamDecls.back()->setImplicit();
3602 }
3603 Alloc->setParams(ParamDecls);
3604 if (ExtraAttr)
3605 Alloc->addAttr(A: ExtraAttr);
3606 AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(FD: Alloc);
3607 Context.getTranslationUnitDecl()->addDecl(D: Alloc);
3608 IdResolver.tryAddTopLevelDecl(D: Alloc, Name);
3609 };
3610
3611 if (!LangOpts.CUDA)
3612 CreateAllocationFunctionDecl(nullptr);
3613 else {
3614 // Host and device get their own declaration so each can be
3615 // defined or re-declared independently.
3616 CreateAllocationFunctionDecl(CUDAHostAttr::CreateImplicit(Ctx&: Context));
3617 CreateAllocationFunctionDecl(CUDADeviceAttr::CreateImplicit(Ctx&: Context));
3618 }
3619}
3620
3621FunctionDecl *
3622Sema::FindUsualDeallocationFunction(SourceLocation StartLoc,
3623 ImplicitDeallocationParameters IDP,
3624 DeclarationName Name, bool Diagnose) {
3625 DeclareGlobalNewDelete();
3626
3627 LookupResult FoundDelete(*this, Name, StartLoc, LookupOrdinaryName);
3628 LookupGlobalDeallocationFunctions(S&: *this, Loc: StartLoc, FoundDelete,
3629 Mode: DeallocLookupMode::OptionallyTyped, Name);
3630
3631 // FIXME: It's possible for this to result in ambiguity, through a
3632 // user-declared variadic operator delete or the enable_if attribute. We
3633 // should probably not consider those cases to be usual deallocation
3634 // functions. But for now we just make an arbitrary choice in that case.
3635 auto Result = resolveDeallocationOverload(S&: *this, R&: FoundDelete, IDP, Loc: StartLoc);
3636 if (!Result)
3637 return nullptr;
3638
3639 if (CheckDeleteOperator(S&: *this, StartLoc, Range: StartLoc, Diagnose,
3640 NamingClass: FoundDelete.getNamingClass(), Decl: Result.Found,
3641 Operator: Result.FD))
3642 return nullptr;
3643
3644 assert(Result.FD && "operator delete missing from global scope?");
3645 return Result.FD;
3646}
3647
3648FunctionDecl *Sema::FindDeallocationFunctionForDestructor(
3649 SourceLocation Loc, CXXRecordDecl *RD, bool Diagnose, bool LookForGlobal,
3650 DeclarationName Name) {
3651
3652 FunctionDecl *OperatorDelete = nullptr;
3653 CanQualType DeallocType = Context.getCanonicalTagType(TD: RD);
3654 ImplicitDeallocationParameters IDP = {
3655 DeallocType, ShouldUseTypeAwareOperatorNewOrDelete(),
3656 AlignedAllocationMode::No, SizedDeallocationMode::No};
3657
3658 if (!LookForGlobal) {
3659 if (FindDeallocationFunction(StartLoc: Loc, RD, Name, Operator&: OperatorDelete, IDP, Diagnose))
3660 return nullptr;
3661
3662 if (OperatorDelete)
3663 return OperatorDelete;
3664 }
3665
3666 // If there's no class-specific operator delete, look up the global
3667 // non-array delete.
3668 IDP.PassAlignment = alignedAllocationModeFromBool(
3669 IsAligned: hasNewExtendedAlignment(S&: *this, AllocType: DeallocType));
3670 IDP.PassSize = SizedDeallocationMode::Yes;
3671 return FindUsualDeallocationFunction(StartLoc: Loc, IDP, Name, Diagnose);
3672}
3673
3674bool Sema::FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
3675 DeclarationName Name,
3676 FunctionDecl *&Operator,
3677 ImplicitDeallocationParameters IDP,
3678 bool Diagnose) {
3679 LookupResult Found(*this, Name, StartLoc, LookupOrdinaryName);
3680 // Try to find operator delete/operator delete[] in class scope.
3681 LookupQualifiedName(R&: Found, LookupCtx: RD);
3682
3683 if (Found.isAmbiguous()) {
3684 if (!Diagnose)
3685 Found.suppressDiagnostics();
3686 return true;
3687 }
3688
3689 Found.suppressDiagnostics();
3690
3691 if (!isAlignedAllocation(Mode: IDP.PassAlignment) &&
3692 hasNewExtendedAlignment(S&: *this, AllocType: Context.getCanonicalTagType(TD: RD)))
3693 IDP.PassAlignment = AlignedAllocationMode::Yes;
3694
3695 // C++17 [expr.delete]p10:
3696 // If the deallocation functions have class scope, the one without a
3697 // parameter of type std::size_t is selected.
3698 llvm::SmallVector<UsualDeallocFnInfo, 4> Matches;
3699 resolveDeallocationOverload(S&: *this, R&: Found, IDP, Loc: StartLoc, BestFns: &Matches);
3700
3701 // If we could find an overload, use it.
3702 if (Matches.size() == 1) {
3703 Operator = cast<CXXMethodDecl>(Val: Matches[0].FD);
3704 return CheckDeleteOperator(S&: *this, StartLoc, Range: StartLoc, Diagnose,
3705 NamingClass: Found.getNamingClass(), Decl: Matches[0].Found,
3706 Operator);
3707 }
3708
3709 // We found multiple suitable operators; complain about the ambiguity.
3710 // FIXME: The standard doesn't say to do this; it appears that the intent
3711 // is that this should never happen.
3712 if (!Matches.empty()) {
3713 if (Diagnose) {
3714 Diag(Loc: StartLoc, DiagID: diag::err_ambiguous_suitable_delete_member_function_found)
3715 << Name << RD;
3716 for (auto &Match : Matches)
3717 Diag(Loc: Match.FD->getLocation(), DiagID: diag::note_member_declared_here) << Name;
3718 }
3719 return true;
3720 }
3721
3722 // We did find operator delete/operator delete[] declarations, but
3723 // none of them were suitable.
3724 if (!Found.empty()) {
3725 if (Diagnose) {
3726 Diag(Loc: StartLoc, DiagID: diag::err_no_suitable_delete_member_function_found)
3727 << Name << RD;
3728
3729 for (NamedDecl *D : Found)
3730 Diag(Loc: D->getUnderlyingDecl()->getLocation(),
3731 DiagID: diag::note_member_declared_here) << Name;
3732 }
3733 return true;
3734 }
3735
3736 Operator = nullptr;
3737 return false;
3738}
3739
3740namespace {
3741/// Checks whether delete-expression, and new-expression used for
3742/// initializing deletee have the same array form.
3743class MismatchingNewDeleteDetector {
3744public:
3745 enum MismatchResult {
3746 /// Indicates that there is no mismatch or a mismatch cannot be proven.
3747 NoMismatch,
3748 /// Indicates that variable is initialized with mismatching form of \a new.
3749 VarInitMismatches,
3750 /// Indicates that member is initialized with mismatching form of \a new.
3751 MemberInitMismatches,
3752 /// Indicates that 1 or more constructors' definitions could not been
3753 /// analyzed, and they will be checked again at the end of translation unit.
3754 AnalyzeLater
3755 };
3756
3757 /// \param EndOfTU True, if this is the final analysis at the end of
3758 /// translation unit. False, if this is the initial analysis at the point
3759 /// delete-expression was encountered.
3760 explicit MismatchingNewDeleteDetector(bool EndOfTU)
3761 : Field(nullptr), IsArrayForm(false), EndOfTU(EndOfTU),
3762 HasUndefinedConstructors(false) {}
3763
3764 /// Checks whether pointee of a delete-expression is initialized with
3765 /// matching form of new-expression.
3766 ///
3767 /// If return value is \c VarInitMismatches or \c MemberInitMismatches at the
3768 /// point where delete-expression is encountered, then a warning will be
3769 /// issued immediately. If return value is \c AnalyzeLater at the point where
3770 /// delete-expression is seen, then member will be analyzed at the end of
3771 /// translation unit. \c AnalyzeLater is returned iff at least one constructor
3772 /// couldn't be analyzed. If at least one constructor initializes the member
3773 /// with matching type of new, the return value is \c NoMismatch.
3774 MismatchResult analyzeDeleteExpr(const CXXDeleteExpr *DE);
3775 /// Analyzes a class member.
3776 /// \param Field Class member to analyze.
3777 /// \param DeleteWasArrayForm Array form-ness of the delete-expression used
3778 /// for deleting the \p Field.
3779 MismatchResult analyzeField(FieldDecl *Field, bool DeleteWasArrayForm);
3780 FieldDecl *Field;
3781 /// List of mismatching new-expressions used for initialization of the pointee
3782 llvm::SmallVector<const CXXNewExpr *, 4> NewExprs;
3783 /// Indicates whether delete-expression was in array form.
3784 bool IsArrayForm;
3785
3786private:
3787 const bool EndOfTU;
3788 /// Indicates that there is at least one constructor without body.
3789 bool HasUndefinedConstructors;
3790 /// Returns \c CXXNewExpr from given initialization expression.
3791 /// \param E Expression used for initializing pointee in delete-expression.
3792 /// E can be a single-element \c InitListExpr consisting of new-expression.
3793 const CXXNewExpr *getNewExprFromInitListOrExpr(const Expr *E);
3794 /// Returns whether member is initialized with mismatching form of
3795 /// \c new either by the member initializer or in-class initialization.
3796 ///
3797 /// If bodies of all constructors are not visible at the end of translation
3798 /// unit or at least one constructor initializes member with the matching
3799 /// form of \c new, mismatch cannot be proven, and this function will return
3800 /// \c NoMismatch.
3801 MismatchResult analyzeMemberExpr(const MemberExpr *ME);
3802 /// Returns whether variable is initialized with mismatching form of
3803 /// \c new.
3804 ///
3805 /// If variable is initialized with matching form of \c new or variable is not
3806 /// initialized with a \c new expression, this function will return true.
3807 /// If variable is initialized with mismatching form of \c new, returns false.
3808 /// \param D Variable to analyze.
3809 bool hasMatchingVarInit(const DeclRefExpr *D);
3810 /// Checks whether the constructor initializes pointee with mismatching
3811 /// form of \c new.
3812 ///
3813 /// Returns true, if member is initialized with matching form of \c new in
3814 /// member initializer list. Returns false, if member is initialized with the
3815 /// matching form of \c new in this constructor's initializer or given
3816 /// constructor isn't defined at the point where delete-expression is seen, or
3817 /// member isn't initialized by the constructor.
3818 bool hasMatchingNewInCtor(const CXXConstructorDecl *CD);
3819 /// Checks whether member is initialized with matching form of
3820 /// \c new in member initializer list.
3821 bool hasMatchingNewInCtorInit(const CXXCtorInitializer *CI);
3822 /// Checks whether member is initialized with mismatching form of \c new by
3823 /// in-class initializer.
3824 MismatchResult analyzeInClassInitializer();
3825};
3826}
3827
3828MismatchingNewDeleteDetector::MismatchResult
3829MismatchingNewDeleteDetector::analyzeDeleteExpr(const CXXDeleteExpr *DE) {
3830 NewExprs.clear();
3831 assert(DE && "Expected delete-expression");
3832 IsArrayForm = DE->isArrayForm();
3833 const Expr *E = DE->getArgument()->IgnoreParenImpCasts();
3834 if (const MemberExpr *ME = dyn_cast<const MemberExpr>(Val: E)) {
3835 return analyzeMemberExpr(ME);
3836 } else if (const DeclRefExpr *D = dyn_cast<const DeclRefExpr>(Val: E)) {
3837 if (!hasMatchingVarInit(D))
3838 return VarInitMismatches;
3839 }
3840 return NoMismatch;
3841}
3842
3843const CXXNewExpr *
3844MismatchingNewDeleteDetector::getNewExprFromInitListOrExpr(const Expr *E) {
3845 assert(E != nullptr && "Expected a valid initializer expression");
3846 E = E->IgnoreParenImpCasts();
3847 if (const InitListExpr *ILE = dyn_cast<const InitListExpr>(Val: E)) {
3848 if (ILE->getNumInits() == 1)
3849 E = dyn_cast<const CXXNewExpr>(Val: ILE->getInit(Init: 0)->IgnoreParenImpCasts());
3850 }
3851
3852 return dyn_cast_or_null<const CXXNewExpr>(Val: E);
3853}
3854
3855bool MismatchingNewDeleteDetector::hasMatchingNewInCtorInit(
3856 const CXXCtorInitializer *CI) {
3857 const CXXNewExpr *NE = nullptr;
3858 if (Field == CI->getMember() &&
3859 (NE = getNewExprFromInitListOrExpr(E: CI->getInit()))) {
3860 if (NE->isArray() == IsArrayForm)
3861 return true;
3862 else
3863 NewExprs.push_back(Elt: NE);
3864 }
3865 return false;
3866}
3867
3868bool MismatchingNewDeleteDetector::hasMatchingNewInCtor(
3869 const CXXConstructorDecl *CD) {
3870 if (CD->isImplicit())
3871 return false;
3872 const FunctionDecl *Definition = CD;
3873 if (!CD->isThisDeclarationADefinition() && !CD->isDefined(Definition)) {
3874 HasUndefinedConstructors = true;
3875 return EndOfTU;
3876 }
3877 for (const auto *CI : cast<const CXXConstructorDecl>(Val: Definition)->inits()) {
3878 if (hasMatchingNewInCtorInit(CI))
3879 return true;
3880 }
3881 return false;
3882}
3883
3884MismatchingNewDeleteDetector::MismatchResult
3885MismatchingNewDeleteDetector::analyzeInClassInitializer() {
3886 assert(Field != nullptr && "This should be called only for members");
3887 const Expr *InitExpr = Field->getInClassInitializer();
3888 if (!InitExpr)
3889 return EndOfTU ? NoMismatch : AnalyzeLater;
3890 if (const CXXNewExpr *NE = getNewExprFromInitListOrExpr(E: InitExpr)) {
3891 if (NE->isArray() != IsArrayForm) {
3892 NewExprs.push_back(Elt: NE);
3893 return MemberInitMismatches;
3894 }
3895 }
3896 return NoMismatch;
3897}
3898
3899MismatchingNewDeleteDetector::MismatchResult
3900MismatchingNewDeleteDetector::analyzeField(FieldDecl *Field,
3901 bool DeleteWasArrayForm) {
3902 assert(Field != nullptr && "Analysis requires a valid class member.");
3903 this->Field = Field;
3904 IsArrayForm = DeleteWasArrayForm;
3905 const CXXRecordDecl *RD = cast<const CXXRecordDecl>(Val: Field->getParent());
3906 for (const auto *CD : RD->ctors()) {
3907 if (hasMatchingNewInCtor(CD))
3908 return NoMismatch;
3909 }
3910 if (HasUndefinedConstructors)
3911 return EndOfTU ? NoMismatch : AnalyzeLater;
3912 if (!NewExprs.empty())
3913 return MemberInitMismatches;
3914 return Field->hasInClassInitializer() ? analyzeInClassInitializer()
3915 : NoMismatch;
3916}
3917
3918MismatchingNewDeleteDetector::MismatchResult
3919MismatchingNewDeleteDetector::analyzeMemberExpr(const MemberExpr *ME) {
3920 assert(ME != nullptr && "Expected a member expression");
3921 if (FieldDecl *F = dyn_cast<FieldDecl>(Val: ME->getMemberDecl()))
3922 return analyzeField(Field: F, DeleteWasArrayForm: IsArrayForm);
3923 return NoMismatch;
3924}
3925
3926bool MismatchingNewDeleteDetector::hasMatchingVarInit(const DeclRefExpr *D) {
3927 const CXXNewExpr *NE = nullptr;
3928 if (const VarDecl *VD = dyn_cast<const VarDecl>(Val: D->getDecl())) {
3929 if (VD->hasInit() && (NE = getNewExprFromInitListOrExpr(E: VD->getInit())) &&
3930 NE->isArray() != IsArrayForm) {
3931 NewExprs.push_back(Elt: NE);
3932 }
3933 }
3934 return NewExprs.empty();
3935}
3936
3937static void
3938DiagnoseMismatchedNewDelete(Sema &SemaRef, SourceLocation DeleteLoc,
3939 const MismatchingNewDeleteDetector &Detector) {
3940 SourceLocation EndOfDelete = SemaRef.getLocForEndOfToken(Loc: DeleteLoc);
3941 FixItHint H;
3942 if (!Detector.IsArrayForm)
3943 H = FixItHint::CreateInsertion(InsertionLoc: EndOfDelete, Code: "[]");
3944 else {
3945 SourceLocation RSquare = Lexer::findLocationAfterToken(
3946 loc: DeleteLoc, TKind: tok::l_square, SM: SemaRef.getSourceManager(),
3947 LangOpts: SemaRef.getLangOpts(), SkipTrailingWhitespaceAndNewLine: true);
3948 if (RSquare.isValid())
3949 H = FixItHint::CreateRemoval(RemoveRange: SourceRange(EndOfDelete, RSquare));
3950 }
3951 SemaRef.Diag(Loc: DeleteLoc, DiagID: diag::warn_mismatched_delete_new)
3952 << Detector.IsArrayForm << H;
3953
3954 for (const auto *NE : Detector.NewExprs)
3955 SemaRef.Diag(Loc: NE->getExprLoc(), DiagID: diag::note_allocated_here)
3956 << Detector.IsArrayForm;
3957}
3958
3959void Sema::AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE) {
3960 if (Diags.isIgnored(DiagID: diag::warn_mismatched_delete_new, Loc: SourceLocation()))
3961 return;
3962 MismatchingNewDeleteDetector Detector(/*EndOfTU=*/false);
3963 switch (Detector.analyzeDeleteExpr(DE)) {
3964 case MismatchingNewDeleteDetector::VarInitMismatches:
3965 case MismatchingNewDeleteDetector::MemberInitMismatches: {
3966 DiagnoseMismatchedNewDelete(SemaRef&: *this, DeleteLoc: DE->getBeginLoc(), Detector);
3967 break;
3968 }
3969 case MismatchingNewDeleteDetector::AnalyzeLater: {
3970 DeleteExprs[Detector.Field].push_back(
3971 Elt: std::make_pair(x: DE->getBeginLoc(), y: DE->isArrayForm()));
3972 break;
3973 }
3974 case MismatchingNewDeleteDetector::NoMismatch:
3975 break;
3976 }
3977}
3978
3979void Sema::AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
3980 bool DeleteWasArrayForm) {
3981 MismatchingNewDeleteDetector Detector(/*EndOfTU=*/true);
3982 switch (Detector.analyzeField(Field, DeleteWasArrayForm)) {
3983 case MismatchingNewDeleteDetector::VarInitMismatches:
3984 llvm_unreachable("This analysis should have been done for class members.");
3985 case MismatchingNewDeleteDetector::AnalyzeLater:
3986 llvm_unreachable("Analysis cannot be postponed any point beyond end of "
3987 "translation unit.");
3988 case MismatchingNewDeleteDetector::MemberInitMismatches:
3989 DiagnoseMismatchedNewDelete(SemaRef&: *this, DeleteLoc, Detector);
3990 break;
3991 case MismatchingNewDeleteDetector::NoMismatch:
3992 break;
3993 }
3994}
3995
3996ExprResult
3997Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
3998 bool ArrayForm, Expr *ExE) {
3999 // C++ [expr.delete]p1:
4000 // The operand shall have a pointer type, or a class type having a single
4001 // non-explicit conversion function to a pointer type. The result has type
4002 // void.
4003 //
4004 // DR599 amends "pointer type" to "pointer to object type" in both cases.
4005
4006 ExprResult Ex = ExE;
4007 FunctionDecl *OperatorDelete = nullptr;
4008 bool ArrayFormAsWritten = ArrayForm;
4009 bool UsualArrayDeleteWantsSize = false;
4010
4011 if (!Ex.get()->isTypeDependent()) {
4012 // Perform lvalue-to-rvalue cast, if needed.
4013 Ex = DefaultLvalueConversion(E: Ex.get());
4014 if (Ex.isInvalid())
4015 return ExprError();
4016
4017 QualType Type = Ex.get()->getType();
4018
4019 class DeleteConverter : public ContextualImplicitConverter {
4020 public:
4021 DeleteConverter() : ContextualImplicitConverter(false, true) {}
4022
4023 bool match(QualType ConvType) override {
4024 // FIXME: If we have an operator T* and an operator void*, we must pick
4025 // the operator T*.
4026 if (const PointerType *ConvPtrType = ConvType->getAs<PointerType>())
4027 if (ConvPtrType->getPointeeType()->isIncompleteOrObjectType())
4028 return true;
4029 return false;
4030 }
4031
4032 SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc,
4033 QualType T) override {
4034 return S.Diag(Loc, DiagID: diag::err_delete_operand) << T;
4035 }
4036
4037 SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc,
4038 QualType T) override {
4039 return S.Diag(Loc, DiagID: diag::err_delete_incomplete_class_type) << T;
4040 }
4041
4042 SemaDiagnosticBuilder diagnoseExplicitConv(Sema &S, SourceLocation Loc,
4043 QualType T,
4044 QualType ConvTy) override {
4045 return S.Diag(Loc, DiagID: diag::err_delete_explicit_conversion) << T << ConvTy;
4046 }
4047
4048 SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv,
4049 QualType ConvTy) override {
4050 return S.Diag(Loc: Conv->getLocation(), DiagID: diag::note_delete_conversion)
4051 << ConvTy;
4052 }
4053
4054 SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc,
4055 QualType T) override {
4056 return S.Diag(Loc, DiagID: diag::err_ambiguous_delete_operand) << T;
4057 }
4058
4059 SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv,
4060 QualType ConvTy) override {
4061 return S.Diag(Loc: Conv->getLocation(), DiagID: diag::note_delete_conversion)
4062 << ConvTy;
4063 }
4064
4065 SemaDiagnosticBuilder diagnoseConversion(Sema &S, SourceLocation Loc,
4066 QualType T,
4067 QualType ConvTy) override {
4068 llvm_unreachable("conversion functions are permitted");
4069 }
4070 } Converter;
4071
4072 Ex = PerformContextualImplicitConversion(Loc: StartLoc, FromE: Ex.get(), Converter);
4073 if (Ex.isInvalid())
4074 return ExprError();
4075 Type = Ex.get()->getType();
4076 if (!Converter.match(ConvType: Type))
4077 // FIXME: PerformContextualImplicitConversion should return ExprError
4078 // itself in this case.
4079 return ExprError();
4080
4081 QualType Pointee = Type->castAs<PointerType>()->getPointeeType();
4082 QualType PointeeElem = Context.getBaseElementType(QT: Pointee);
4083
4084 if (Pointee.getAddressSpace() != LangAS::Default &&
4085 !getLangOpts().OpenCLCPlusPlus)
4086 return Diag(Loc: Ex.get()->getBeginLoc(),
4087 DiagID: diag::err_address_space_qualified_delete)
4088 << Pointee.getUnqualifiedType()
4089 << Qualifiers::getAddrSpaceAsString(AS: Pointee.getAddressSpace());
4090
4091 CXXRecordDecl *PointeeRD = nullptr;
4092 if (Pointee->isVoidType() && !isSFINAEContext()) {
4093 // The C++ standard bans deleting a pointer to a non-object type, which
4094 // effectively bans deletion of "void*". However, most compilers support
4095 // this, so we treat it as a warning unless we're in a SFINAE context.
4096 // But we still prohibit this since C++26.
4097 Diag(Loc: StartLoc, DiagID: LangOpts.CPlusPlus26 ? diag::err_delete_incomplete
4098 : diag::ext_delete_void_ptr_operand)
4099 << (LangOpts.CPlusPlus26 ? Pointee : Type)
4100 << Ex.get()->getSourceRange();
4101 } else if (Pointee->isFunctionType() || Pointee->isVoidType() ||
4102 Pointee->isSizelessType()) {
4103 return ExprError(Diag(Loc: StartLoc, DiagID: diag::err_delete_operand)
4104 << Type << Ex.get()->getSourceRange());
4105 } else if (!Pointee->isDependentType()) {
4106 // FIXME: This can result in errors if the definition was imported from a
4107 // module but is hidden.
4108 if (Pointee->isEnumeralType() ||
4109 !RequireCompleteType(Loc: StartLoc, T: Pointee,
4110 DiagID: LangOpts.CPlusPlus26
4111 ? diag::err_delete_incomplete
4112 : diag::warn_delete_incomplete,
4113 Args: Ex.get())) {
4114 PointeeRD = PointeeElem->getAsCXXRecordDecl();
4115 }
4116 }
4117
4118 if (Pointee->isArrayType() && !ArrayForm) {
4119 Diag(Loc: StartLoc, DiagID: diag::warn_delete_array_type)
4120 << Type << Ex.get()->getSourceRange()
4121 << FixItHint::CreateInsertion(InsertionLoc: getLocForEndOfToken(Loc: StartLoc), Code: "[]");
4122 ArrayForm = true;
4123 }
4124
4125 DeclarationName DeleteName = Context.DeclarationNames.getCXXOperatorName(
4126 Op: ArrayForm ? OO_Array_Delete : OO_Delete);
4127
4128 if (PointeeRD) {
4129 ImplicitDeallocationParameters IDP = {
4130 Pointee, ShouldUseTypeAwareOperatorNewOrDelete(),
4131 AlignedAllocationMode::No, SizedDeallocationMode::No};
4132 if (!UseGlobal &&
4133 FindDeallocationFunction(StartLoc, RD: PointeeRD, Name: DeleteName,
4134 Operator&: OperatorDelete, IDP))
4135 return ExprError();
4136
4137 // If we're allocating an array of records, check whether the
4138 // usual operator delete[] has a size_t parameter.
4139 if (ArrayForm) {
4140 // If the user specifically asked to use the global allocator,
4141 // we'll need to do the lookup into the class.
4142 if (UseGlobal)
4143 UsualArrayDeleteWantsSize = doesUsualArrayDeleteWantSize(
4144 S&: *this, loc: StartLoc, PassType: IDP.PassTypeIdentity, allocType: PointeeElem);
4145
4146 // Otherwise, the usual operator delete[] should be the
4147 // function we just found.
4148 else if (isa_and_nonnull<CXXMethodDecl>(Val: OperatorDelete)) {
4149 UsualDeallocFnInfo UDFI(
4150 *this, DeclAccessPair::make(D: OperatorDelete, AS: AS_public), Pointee,
4151 StartLoc);
4152 UsualArrayDeleteWantsSize = isSizedDeallocation(Mode: UDFI.IDP.PassSize);
4153 }
4154 }
4155
4156 if (!PointeeRD->hasIrrelevantDestructor()) {
4157 if (CXXDestructorDecl *Dtor = LookupDestructor(Class: PointeeRD)) {
4158 if (Dtor->isCalledByDelete(OpDel: OperatorDelete)) {
4159 MarkFunctionReferenced(Loc: StartLoc, Func: Dtor);
4160 if (DiagnoseUseOfDecl(D: Dtor, Locs: StartLoc))
4161 return ExprError();
4162 }
4163 }
4164 }
4165
4166 CheckVirtualDtorCall(dtor: PointeeRD->getDestructor(), Loc: StartLoc,
4167 /*IsDelete=*/true, /*CallCanBeVirtual=*/true,
4168 /*WarnOnNonAbstractTypes=*/!ArrayForm,
4169 DtorLoc: SourceLocation());
4170 }
4171
4172 if (!OperatorDelete) {
4173 if (getLangOpts().OpenCLCPlusPlus) {
4174 Diag(Loc: StartLoc, DiagID: diag::err_openclcxx_not_supported) << "default delete";
4175 return ExprError();
4176 }
4177
4178 bool IsComplete = isCompleteType(Loc: StartLoc, T: Pointee);
4179 bool CanProvideSize =
4180 IsComplete && (!ArrayForm || UsualArrayDeleteWantsSize ||
4181 Pointee.isDestructedType());
4182 bool Overaligned = hasNewExtendedAlignment(S&: *this, AllocType: Pointee);
4183
4184 // Look for a global declaration.
4185 ImplicitDeallocationParameters IDP = {
4186 Pointee, ShouldUseTypeAwareOperatorNewOrDelete(),
4187 alignedAllocationModeFromBool(IsAligned: Overaligned),
4188 sizedDeallocationModeFromBool(IsSized: CanProvideSize)};
4189 OperatorDelete = FindUsualDeallocationFunction(StartLoc, IDP, Name: DeleteName);
4190 if (!OperatorDelete)
4191 return ExprError();
4192 }
4193
4194 if (OperatorDelete->isInvalidDecl())
4195 return ExprError();
4196
4197 MarkFunctionReferenced(Loc: StartLoc, Func: OperatorDelete);
4198
4199 // Check access and ambiguity of destructor if we're going to call it.
4200 // Note that this is required even for a virtual delete.
4201 bool IsVirtualDelete = false;
4202 if (PointeeRD) {
4203 if (CXXDestructorDecl *Dtor = LookupDestructor(Class: PointeeRD)) {
4204 if (Dtor->isCalledByDelete(OpDel: OperatorDelete))
4205 CheckDestructorAccess(Loc: Ex.get()->getExprLoc(), Dtor,
4206 PDiag: PDiag(DiagID: diag::err_access_dtor) << PointeeElem);
4207 IsVirtualDelete = Dtor->isVirtual();
4208 }
4209 }
4210
4211 DiagnoseUseOfDecl(D: OperatorDelete, Locs: StartLoc);
4212
4213 unsigned AddressParamIdx = 0;
4214 if (OperatorDelete->isTypeAwareOperatorNewOrDelete()) {
4215 QualType TypeIdentity = OperatorDelete->getParamDecl(i: 0)->getType();
4216 if (RequireCompleteType(Loc: StartLoc, T: TypeIdentity,
4217 DiagID: diag::err_incomplete_type))
4218 return ExprError();
4219 AddressParamIdx = 1;
4220 }
4221
4222 // Convert the operand to the type of the first parameter of operator
4223 // delete. This is only necessary if we selected a destroying operator
4224 // delete that we are going to call (non-virtually); converting to void*
4225 // is trivial and left to AST consumers to handle.
4226 QualType ParamType =
4227 OperatorDelete->getParamDecl(i: AddressParamIdx)->getType();
4228 if (!IsVirtualDelete && !ParamType->getPointeeType()->isVoidType()) {
4229 Qualifiers Qs = Pointee.getQualifiers();
4230 if (Qs.hasCVRQualifiers()) {
4231 // Qualifiers are irrelevant to this conversion; we're only looking
4232 // for access and ambiguity.
4233 Qs.removeCVRQualifiers();
4234 QualType Unqual = Context.getPointerType(
4235 T: Context.getQualifiedType(T: Pointee.getUnqualifiedType(), Qs));
4236 Ex = ImpCastExprToType(E: Ex.get(), Type: Unqual, CK: CK_NoOp);
4237 }
4238 Ex = PerformImplicitConversion(From: Ex.get(), ToType: ParamType,
4239 Action: AssignmentAction::Passing);
4240 if (Ex.isInvalid())
4241 return ExprError();
4242 }
4243 }
4244
4245 CXXDeleteExpr *Result = new (Context) CXXDeleteExpr(
4246 Context.VoidTy, UseGlobal, ArrayForm, ArrayFormAsWritten,
4247 UsualArrayDeleteWantsSize, OperatorDelete, Ex.get(), StartLoc);
4248 AnalyzeDeleteExprMismatch(DE: Result);
4249 return Result;
4250}
4251
4252static bool resolveBuiltinNewDeleteOverload(Sema &S, CallExpr *TheCall,
4253 bool IsDelete,
4254 FunctionDecl *&Operator) {
4255
4256 DeclarationName NewName = S.Context.DeclarationNames.getCXXOperatorName(
4257 Op: IsDelete ? OO_Delete : OO_New);
4258
4259 LookupResult R(S, NewName, TheCall->getBeginLoc(), Sema::LookupOrdinaryName);
4260 S.LookupQualifiedName(R, LookupCtx: S.Context.getTranslationUnitDecl());
4261 assert(!R.empty() && "implicitly declared allocation functions not found");
4262 assert(!R.isAmbiguous() && "global allocation functions are ambiguous");
4263
4264 // We do our own custom access checks below.
4265 R.suppressDiagnostics();
4266
4267 SmallVector<Expr *, 8> Args(TheCall->arguments());
4268 OverloadCandidateSet Candidates(R.getNameLoc(),
4269 OverloadCandidateSet::CSK_Normal);
4270 for (LookupResult::iterator FnOvl = R.begin(), FnOvlEnd = R.end();
4271 FnOvl != FnOvlEnd; ++FnOvl) {
4272 // Even member operator new/delete are implicitly treated as
4273 // static, so don't use AddMemberCandidate.
4274 NamedDecl *D = (*FnOvl)->getUnderlyingDecl();
4275
4276 if (FunctionTemplateDecl *FnTemplate = dyn_cast<FunctionTemplateDecl>(Val: D)) {
4277 S.AddTemplateOverloadCandidate(FunctionTemplate: FnTemplate, FoundDecl: FnOvl.getPair(),
4278 /*ExplicitTemplateArgs=*/nullptr, Args,
4279 CandidateSet&: Candidates,
4280 /*SuppressUserConversions=*/false);
4281 continue;
4282 }
4283
4284 FunctionDecl *Fn = cast<FunctionDecl>(Val: D);
4285 S.AddOverloadCandidate(Function: Fn, FoundDecl: FnOvl.getPair(), Args, CandidateSet&: Candidates,
4286 /*SuppressUserConversions=*/false);
4287 }
4288
4289 SourceRange Range = TheCall->getSourceRange();
4290
4291 // Do the resolution.
4292 OverloadCandidateSet::iterator Best;
4293 switch (Candidates.BestViableFunction(S, Loc: R.getNameLoc(), Best)) {
4294 case OR_Success: {
4295 // Got one!
4296 FunctionDecl *FnDecl = Best->Function;
4297 assert(R.getNamingClass() == nullptr &&
4298 "class members should not be considered");
4299
4300 if (!FnDecl->isReplaceableGlobalAllocationFunction()) {
4301 S.Diag(Loc: R.getNameLoc(), DiagID: diag::err_builtin_operator_new_delete_not_usual)
4302 << (IsDelete ? 1 : 0) << Range;
4303 S.Diag(Loc: FnDecl->getLocation(), DiagID: diag::note_non_usual_function_declared_here)
4304 << R.getLookupName() << FnDecl->getSourceRange();
4305 return true;
4306 }
4307
4308 Operator = FnDecl;
4309 return false;
4310 }
4311
4312 case OR_No_Viable_Function:
4313 Candidates.NoteCandidates(
4314 PA: PartialDiagnosticAt(R.getNameLoc(),
4315 S.PDiag(DiagID: diag::err_ovl_no_viable_function_in_call)
4316 << R.getLookupName() << Range),
4317 S, OCD: OCD_AllCandidates, Args);
4318 return true;
4319
4320 case OR_Ambiguous:
4321 Candidates.NoteCandidates(
4322 PA: PartialDiagnosticAt(R.getNameLoc(),
4323 S.PDiag(DiagID: diag::err_ovl_ambiguous_call)
4324 << R.getLookupName() << Range),
4325 S, OCD: OCD_AmbiguousCandidates, Args);
4326 return true;
4327
4328 case OR_Deleted:
4329 S.DiagnoseUseOfDeletedFunction(Loc: R.getNameLoc(), Range, Name: R.getLookupName(),
4330 CandidateSet&: Candidates, Fn: Best->Function, Args);
4331 return true;
4332 }
4333 llvm_unreachable("Unreachable, bad result from BestViableFunction");
4334}
4335
4336ExprResult Sema::BuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
4337 bool IsDelete) {
4338 CallExpr *TheCall = cast<CallExpr>(Val: TheCallResult.get());
4339 if (!getLangOpts().CPlusPlus) {
4340 Diag(Loc: TheCall->getExprLoc(), DiagID: diag::err_builtin_requires_language)
4341 << (IsDelete ? "__builtin_operator_delete" : "__builtin_operator_new")
4342 << "C++";
4343 return ExprError();
4344 }
4345 // CodeGen assumes it can find the global new and delete to call,
4346 // so ensure that they are declared.
4347 DeclareGlobalNewDelete();
4348
4349 FunctionDecl *OperatorNewOrDelete = nullptr;
4350 if (resolveBuiltinNewDeleteOverload(S&: *this, TheCall, IsDelete,
4351 Operator&: OperatorNewOrDelete))
4352 return ExprError();
4353 assert(OperatorNewOrDelete && "should be found");
4354
4355 DiagnoseUseOfDecl(D: OperatorNewOrDelete, Locs: TheCall->getExprLoc());
4356 MarkFunctionReferenced(Loc: TheCall->getExprLoc(), Func: OperatorNewOrDelete);
4357
4358 TheCall->setType(OperatorNewOrDelete->getReturnType());
4359 for (unsigned i = 0; i != TheCall->getNumArgs(); ++i) {
4360 QualType ParamTy = OperatorNewOrDelete->getParamDecl(i)->getType();
4361 InitializedEntity Entity =
4362 InitializedEntity::InitializeParameter(Context, Type: ParamTy, Consumed: false);
4363 ExprResult Arg = PerformCopyInitialization(
4364 Entity, EqualLoc: TheCall->getArg(Arg: i)->getBeginLoc(), Init: TheCall->getArg(Arg: i));
4365 if (Arg.isInvalid())
4366 return ExprError();
4367 TheCall->setArg(Arg: i, ArgExpr: Arg.get());
4368 }
4369 auto Callee = dyn_cast<ImplicitCastExpr>(Val: TheCall->getCallee());
4370 assert(Callee && Callee->getCastKind() == CK_BuiltinFnToFnPtr &&
4371 "Callee expected to be implicit cast to a builtin function pointer");
4372 Callee->setType(OperatorNewOrDelete->getType());
4373
4374 return TheCallResult;
4375}
4376
4377void Sema::CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
4378 bool IsDelete, bool CallCanBeVirtual,
4379 bool WarnOnNonAbstractTypes,
4380 SourceLocation DtorLoc) {
4381 if (!dtor || dtor->isVirtual() || !CallCanBeVirtual || isUnevaluatedContext())
4382 return;
4383
4384 // C++ [expr.delete]p3:
4385 // In the first alternative (delete object), if the static type of the
4386 // object to be deleted is different from its dynamic type, the static
4387 // type shall be a base class of the dynamic type of the object to be
4388 // deleted and the static type shall have a virtual destructor or the
4389 // behavior is undefined.
4390 //
4391 const CXXRecordDecl *PointeeRD = dtor->getParent();
4392 // Note: a final class cannot be derived from, no issue there
4393 if (!PointeeRD->isPolymorphic() || PointeeRD->hasAttr<FinalAttr>())
4394 return;
4395
4396 // If the superclass is in a system header, there's nothing that can be done.
4397 // The `delete` (where we emit the warning) can be in a system header,
4398 // what matters for this warning is where the deleted type is defined.
4399 if (getSourceManager().isInSystemHeader(Loc: PointeeRD->getLocation()))
4400 return;
4401
4402 QualType ClassType = dtor->getFunctionObjectParameterType();
4403 if (PointeeRD->isAbstract()) {
4404 // If the class is abstract, we warn by default, because we're
4405 // sure the code has undefined behavior.
4406 Diag(Loc, DiagID: diag::warn_delete_abstract_non_virtual_dtor) << (IsDelete ? 0 : 1)
4407 << ClassType;
4408 } else if (WarnOnNonAbstractTypes) {
4409 // Otherwise, if this is not an array delete, it's a bit suspect,
4410 // but not necessarily wrong.
4411 Diag(Loc, DiagID: diag::warn_delete_non_virtual_dtor) << (IsDelete ? 0 : 1)
4412 << ClassType;
4413 }
4414 if (!IsDelete) {
4415 std::string TypeStr;
4416 ClassType.getAsStringInternal(Str&: TypeStr, Policy: getPrintingPolicy());
4417 Diag(Loc: DtorLoc, DiagID: diag::note_delete_non_virtual)
4418 << FixItHint::CreateInsertion(InsertionLoc: DtorLoc, Code: TypeStr + "::");
4419 }
4420}
4421
4422Sema::ConditionResult Sema::ActOnConditionVariable(Decl *ConditionVar,
4423 SourceLocation StmtLoc,
4424 ConditionKind CK) {
4425 ExprResult E =
4426 CheckConditionVariable(ConditionVar: cast<VarDecl>(Val: ConditionVar), StmtLoc, CK);
4427 if (E.isInvalid())
4428 return ConditionError();
4429 E = ActOnFinishFullExpr(Expr: E.get(), /*DiscardedValue*/ false);
4430 return ConditionResult(*this, ConditionVar, E,
4431 CK == ConditionKind::ConstexprIf);
4432}
4433
4434ExprResult Sema::CheckConditionVariable(VarDecl *ConditionVar,
4435 SourceLocation StmtLoc,
4436 ConditionKind CK) {
4437 if (ConditionVar->isInvalidDecl())
4438 return ExprError();
4439
4440 QualType T = ConditionVar->getType();
4441
4442 // C++ [stmt.select]p2:
4443 // The declarator shall not specify a function or an array.
4444 if (T->isFunctionType())
4445 return ExprError(Diag(Loc: ConditionVar->getLocation(),
4446 DiagID: diag::err_invalid_use_of_function_type)
4447 << ConditionVar->getSourceRange());
4448 else if (T->isArrayType())
4449 return ExprError(Diag(Loc: ConditionVar->getLocation(),
4450 DiagID: diag::err_invalid_use_of_array_type)
4451 << ConditionVar->getSourceRange());
4452
4453 ExprResult Condition = BuildDeclRefExpr(
4454 D: ConditionVar, Ty: ConditionVar->getType().getNonReferenceType(), VK: VK_LValue,
4455 Loc: ConditionVar->getLocation());
4456
4457 switch (CK) {
4458 case ConditionKind::Boolean:
4459 return CheckBooleanCondition(Loc: StmtLoc, E: Condition.get());
4460
4461 case ConditionKind::ConstexprIf:
4462 return CheckBooleanCondition(Loc: StmtLoc, E: Condition.get(), IsConstexpr: true);
4463
4464 case ConditionKind::Switch:
4465 return CheckSwitchCondition(SwitchLoc: StmtLoc, Cond: Condition.get());
4466 }
4467
4468 llvm_unreachable("unexpected condition kind");
4469}
4470
4471ExprResult Sema::CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr) {
4472 // C++11 6.4p4:
4473 // The value of a condition that is an initialized declaration in a statement
4474 // other than a switch statement is the value of the declared variable
4475 // implicitly converted to type bool. If that conversion is ill-formed, the
4476 // program is ill-formed.
4477 // The value of a condition that is an expression is the value of the
4478 // expression, implicitly converted to bool.
4479 //
4480 // C++23 8.5.2p2
4481 // If the if statement is of the form if constexpr, the value of the condition
4482 // is contextually converted to bool and the converted expression shall be
4483 // a constant expression.
4484 //
4485
4486 ExprResult E = PerformContextuallyConvertToBool(From: CondExpr);
4487 if (!IsConstexpr || E.isInvalid() || E.get()->isValueDependent())
4488 return E;
4489
4490 E = ActOnFinishFullExpr(Expr: E.get(), CC: E.get()->getExprLoc(),
4491 /*DiscardedValue*/ false,
4492 /*IsConstexpr*/ true);
4493 if (E.isInvalid())
4494 return E;
4495
4496 // FIXME: Return this value to the caller so they don't need to recompute it.
4497 llvm::APSInt Cond;
4498 E = VerifyIntegerConstantExpression(
4499 E: E.get(), Result: &Cond,
4500 DiagID: diag::err_constexpr_if_condition_expression_is_not_constant);
4501 return E;
4502}
4503
4504bool
4505Sema::IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType) {
4506 // Look inside the implicit cast, if it exists.
4507 if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(Val: From))
4508 From = Cast->getSubExpr();
4509
4510 // A string literal (2.13.4) that is not a wide string literal can
4511 // be converted to an rvalue of type "pointer to char"; a wide
4512 // string literal can be converted to an rvalue of type "pointer
4513 // to wchar_t" (C++ 4.2p2).
4514 if (StringLiteral *StrLit = dyn_cast<StringLiteral>(Val: From->IgnoreParens()))
4515 if (const PointerType *ToPtrType = ToType->getAs<PointerType>())
4516 if (const BuiltinType *ToPointeeType
4517 = ToPtrType->getPointeeType()->getAs<BuiltinType>()) {
4518 // This conversion is considered only when there is an
4519 // explicit appropriate pointer target type (C++ 4.2p2).
4520 if (!ToPtrType->getPointeeType().hasQualifiers()) {
4521 switch (StrLit->getKind()) {
4522 case StringLiteralKind::UTF8:
4523 case StringLiteralKind::UTF16:
4524 case StringLiteralKind::UTF32:
4525 // We don't allow UTF literals to be implicitly converted
4526 break;
4527 case StringLiteralKind::Ordinary:
4528 case StringLiteralKind::Binary:
4529 return (ToPointeeType->getKind() == BuiltinType::Char_U ||
4530 ToPointeeType->getKind() == BuiltinType::Char_S);
4531 case StringLiteralKind::Wide:
4532 return Context.typesAreCompatible(T1: Context.getWideCharType(),
4533 T2: QualType(ToPointeeType, 0));
4534 case StringLiteralKind::Unevaluated:
4535 assert(false && "Unevaluated string literal in expression");
4536 break;
4537 }
4538 }
4539 }
4540
4541 return false;
4542}
4543
4544static ExprResult BuildCXXCastArgument(Sema &S,
4545 SourceLocation CastLoc,
4546 QualType Ty,
4547 CastKind Kind,
4548 CXXMethodDecl *Method,
4549 DeclAccessPair FoundDecl,
4550 bool HadMultipleCandidates,
4551 Expr *From) {
4552 switch (Kind) {
4553 default: llvm_unreachable("Unhandled cast kind!");
4554 case CK_ConstructorConversion: {
4555 CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(Val: Method);
4556 SmallVector<Expr*, 8> ConstructorArgs;
4557
4558 if (S.RequireNonAbstractType(Loc: CastLoc, T: Ty,
4559 DiagID: diag::err_allocation_of_abstract_type))
4560 return ExprError();
4561
4562 if (S.CompleteConstructorCall(Constructor, DeclInitType: Ty, ArgsPtr: From, Loc: CastLoc,
4563 ConvertedArgs&: ConstructorArgs))
4564 return ExprError();
4565
4566 S.CheckConstructorAccess(Loc: CastLoc, D: Constructor, FoundDecl,
4567 Entity: InitializedEntity::InitializeTemporary(Type: Ty));
4568 if (S.DiagnoseUseOfDecl(D: Method, Locs: CastLoc))
4569 return ExprError();
4570
4571 ExprResult Result = S.BuildCXXConstructExpr(
4572 ConstructLoc: CastLoc, DeclInitType: Ty, FoundDecl, Constructor: cast<CXXConstructorDecl>(Val: Method),
4573 Exprs: ConstructorArgs, HadMultipleCandidates,
4574 /*ListInit*/ IsListInitialization: false, /*StdInitListInit*/ IsStdInitListInitialization: false, /*ZeroInit*/ RequiresZeroInit: false,
4575 ConstructKind: CXXConstructionKind::Complete, ParenRange: SourceRange());
4576 if (Result.isInvalid())
4577 return ExprError();
4578
4579 return S.MaybeBindToTemporary(E: Result.getAs<Expr>());
4580 }
4581
4582 case CK_UserDefinedConversion: {
4583 assert(!From->getType()->isPointerType() && "Arg can't have pointer type!");
4584
4585 S.CheckMemberOperatorAccess(Loc: CastLoc, ObjectExpr: From, /*arg*/ ArgExpr: nullptr, FoundDecl);
4586 if (S.DiagnoseUseOfDecl(D: Method, Locs: CastLoc))
4587 return ExprError();
4588
4589 // Create an implicit call expr that calls it.
4590 CXXConversionDecl *Conv = cast<CXXConversionDecl>(Val: Method);
4591 ExprResult Result = S.BuildCXXMemberCallExpr(Exp: From, FoundDecl, Method: Conv,
4592 HadMultipleCandidates);
4593 if (Result.isInvalid())
4594 return ExprError();
4595 // Record usage of conversion in an implicit cast.
4596 Result = ImplicitCastExpr::Create(Context: S.Context, T: Result.get()->getType(),
4597 Kind: CK_UserDefinedConversion, Operand: Result.get(),
4598 BasePath: nullptr, Cat: Result.get()->getValueKind(),
4599 FPO: S.CurFPFeatureOverrides());
4600
4601 return S.MaybeBindToTemporary(E: Result.get());
4602 }
4603 }
4604}
4605
4606ExprResult
4607Sema::PerformImplicitConversion(Expr *From, QualType ToType,
4608 const ImplicitConversionSequence &ICS,
4609 AssignmentAction Action,
4610 CheckedConversionKind CCK) {
4611 // C++ [over.match.oper]p7: [...] operands of class type are converted [...]
4612 if (CCK == CheckedConversionKind::ForBuiltinOverloadedOp &&
4613 !From->getType()->isRecordType())
4614 return From;
4615
4616 switch (ICS.getKind()) {
4617 case ImplicitConversionSequence::StandardConversion: {
4618 ExprResult Res = PerformImplicitConversion(From, ToType, SCS: ICS.Standard,
4619 Action, CCK);
4620 if (Res.isInvalid())
4621 return ExprError();
4622 From = Res.get();
4623 break;
4624 }
4625
4626 case ImplicitConversionSequence::UserDefinedConversion: {
4627
4628 FunctionDecl *FD = ICS.UserDefined.ConversionFunction;
4629 CastKind CastKind;
4630 QualType BeforeToType;
4631 assert(FD && "no conversion function for user-defined conversion seq");
4632 if (const CXXConversionDecl *Conv = dyn_cast<CXXConversionDecl>(Val: FD)) {
4633 CastKind = CK_UserDefinedConversion;
4634
4635 // If the user-defined conversion is specified by a conversion function,
4636 // the initial standard conversion sequence converts the source type to
4637 // the implicit object parameter of the conversion function.
4638 BeforeToType = Context.getCanonicalTagType(TD: Conv->getParent());
4639 } else {
4640 const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(Val: FD);
4641 CastKind = CK_ConstructorConversion;
4642 // Do no conversion if dealing with ... for the first conversion.
4643 if (!ICS.UserDefined.EllipsisConversion) {
4644 // If the user-defined conversion is specified by a constructor, the
4645 // initial standard conversion sequence converts the source type to
4646 // the type required by the argument of the constructor
4647 BeforeToType = Ctor->getParamDecl(i: 0)->getType().getNonReferenceType();
4648 }
4649 }
4650 // Watch out for ellipsis conversion.
4651 if (!ICS.UserDefined.EllipsisConversion) {
4652 ExprResult Res = PerformImplicitConversion(
4653 From, ToType: BeforeToType, SCS: ICS.UserDefined.Before,
4654 Action: AssignmentAction::Converting, CCK);
4655 if (Res.isInvalid())
4656 return ExprError();
4657 From = Res.get();
4658 }
4659
4660 ExprResult CastArg = BuildCXXCastArgument(
4661 S&: *this, CastLoc: From->getBeginLoc(), Ty: ToType.getNonReferenceType(), Kind: CastKind,
4662 Method: cast<CXXMethodDecl>(Val: FD), FoundDecl: ICS.UserDefined.FoundConversionFunction,
4663 HadMultipleCandidates: ICS.UserDefined.HadMultipleCandidates, From);
4664
4665 if (CastArg.isInvalid())
4666 return ExprError();
4667
4668 From = CastArg.get();
4669
4670 // C++ [over.match.oper]p7:
4671 // [...] the second standard conversion sequence of a user-defined
4672 // conversion sequence is not applied.
4673 if (CCK == CheckedConversionKind::ForBuiltinOverloadedOp)
4674 return From;
4675
4676 return PerformImplicitConversion(From, ToType, SCS: ICS.UserDefined.After,
4677 Action: AssignmentAction::Converting, CCK);
4678 }
4679
4680 case ImplicitConversionSequence::AmbiguousConversion:
4681 ICS.DiagnoseAmbiguousConversion(S&: *this, CaretLoc: From->getExprLoc(),
4682 PDiag: PDiag(DiagID: diag::err_typecheck_ambiguous_condition)
4683 << From->getSourceRange());
4684 return ExprError();
4685
4686 case ImplicitConversionSequence::EllipsisConversion:
4687 case ImplicitConversionSequence::StaticObjectArgumentConversion:
4688 llvm_unreachable("bad conversion");
4689
4690 case ImplicitConversionSequence::BadConversion:
4691 AssignConvertType ConvTy =
4692 CheckAssignmentConstraints(Loc: From->getExprLoc(), LHSType: ToType, RHSType: From->getType());
4693 bool Diagnosed = DiagnoseAssignmentResult(
4694 ConvTy: ConvTy == AssignConvertType::Compatible
4695 ? AssignConvertType::Incompatible
4696 : ConvTy,
4697 Loc: From->getExprLoc(), DstType: ToType, SrcType: From->getType(), SrcExpr: From, Action);
4698 assert(Diagnosed && "failed to diagnose bad conversion"); (void)Diagnosed;
4699 return ExprError();
4700 }
4701
4702 // Everything went well.
4703 return From;
4704}
4705
4706// adjustVectorOrConstantMatrixType - Compute the intermediate cast type casting
4707// elements of the from type to the elements of the to type without resizing the
4708// vector or matrix.
4709static QualType adjustVectorOrConstantMatrixType(ASTContext &Context,
4710 QualType FromTy,
4711 QualType ToType,
4712 QualType *ElTy = nullptr) {
4713 QualType ElType = ToType;
4714 if (auto *ToVec = ToType->getAs<VectorType>())
4715 ElType = ToVec->getElementType();
4716 else if (auto *ToMat = ToType->getAs<ConstantMatrixType>())
4717 ElType = ToMat->getElementType();
4718
4719 if (ElTy)
4720 *ElTy = ElType;
4721 if (FromTy->isVectorType()) {
4722 auto *FromVec = FromTy->castAs<VectorType>();
4723 return Context.getExtVectorType(VectorType: ElType, NumElts: FromVec->getNumElements());
4724 }
4725 if (FromTy->isConstantMatrixType()) {
4726 auto *FromMat = FromTy->castAs<ConstantMatrixType>();
4727 return Context.getConstantMatrixType(ElementType: ElType, NumRows: FromMat->getNumRows(),
4728 NumColumns: FromMat->getNumColumns());
4729 }
4730 return ElType;
4731}
4732
4733/// Check if an integral conversion involves incompatible overflow behavior
4734/// types. Returns true if the conversion is invalid.
4735static bool checkIncompatibleOBTConversion(Sema &S, QualType FromType,
4736 QualType ToType, Expr *From) {
4737 const auto *FromOBT = FromType->getAs<OverflowBehaviorType>();
4738 const auto *ToOBT = ToType->getAs<OverflowBehaviorType>();
4739
4740 if (FromOBT && ToOBT &&
4741 FromOBT->getBehaviorKind() != ToOBT->getBehaviorKind()) {
4742 S.Diag(Loc: From->getExprLoc(), DiagID: diag::err_incompatible_obt_kinds_assignment)
4743 << ToType << FromType
4744 << (ToOBT->getBehaviorKind() ==
4745 OverflowBehaviorType::OverflowBehaviorKind::Trap
4746 ? "__ob_trap"
4747 : "__ob_wrap")
4748 << (FromOBT->getBehaviorKind() ==
4749 OverflowBehaviorType::OverflowBehaviorKind::Trap
4750 ? "__ob_trap"
4751 : "__ob_wrap");
4752 return true;
4753 }
4754 return false;
4755}
4756
4757ExprResult
4758Sema::PerformImplicitConversion(Expr *From, QualType ToType,
4759 const StandardConversionSequence& SCS,
4760 AssignmentAction Action,
4761 CheckedConversionKind CCK) {
4762 bool CStyle = (CCK == CheckedConversionKind::CStyleCast ||
4763 CCK == CheckedConversionKind::FunctionalCast);
4764
4765 // Overall FIXME: we are recomputing too many types here and doing far too
4766 // much extra work. What this means is that we need to keep track of more
4767 // information that is computed when we try the implicit conversion initially,
4768 // so that we don't need to recompute anything here.
4769 QualType FromType = From->getType();
4770
4771 if (SCS.CopyConstructor) {
4772 // FIXME: When can ToType be a reference type?
4773 assert(!ToType->isReferenceType());
4774 if (SCS.Second == ICK_Derived_To_Base) {
4775 SmallVector<Expr*, 8> ConstructorArgs;
4776 if (CompleteConstructorCall(
4777 Constructor: cast<CXXConstructorDecl>(Val: SCS.CopyConstructor), DeclInitType: ToType, ArgsPtr: From,
4778 /*FIXME:ConstructLoc*/ Loc: SourceLocation(), ConvertedArgs&: ConstructorArgs))
4779 return ExprError();
4780 return BuildCXXConstructExpr(
4781 /*FIXME:ConstructLoc*/ ConstructLoc: SourceLocation(), DeclInitType: ToType,
4782 FoundDecl: SCS.FoundCopyConstructor, Constructor: SCS.CopyConstructor, Exprs: ConstructorArgs,
4783 /*HadMultipleCandidates*/ false,
4784 /*ListInit*/ IsListInitialization: false, /*StdInitListInit*/ IsStdInitListInitialization: false, /*ZeroInit*/ RequiresZeroInit: false,
4785 ConstructKind: CXXConstructionKind::Complete, ParenRange: SourceRange());
4786 }
4787 return BuildCXXConstructExpr(
4788 /*FIXME:ConstructLoc*/ ConstructLoc: SourceLocation(), DeclInitType: ToType,
4789 FoundDecl: SCS.FoundCopyConstructor, Constructor: SCS.CopyConstructor, Exprs: From,
4790 /*HadMultipleCandidates*/ false,
4791 /*ListInit*/ IsListInitialization: false, /*StdInitListInit*/ IsStdInitListInitialization: false, /*ZeroInit*/ RequiresZeroInit: false,
4792 ConstructKind: CXXConstructionKind::Complete, ParenRange: SourceRange());
4793 }
4794
4795 // Resolve overloaded function references.
4796 if (Context.hasSameType(T1: FromType, T2: Context.OverloadTy)) {
4797 DeclAccessPair Found;
4798 FunctionDecl *Fn = ResolveAddressOfOverloadedFunction(AddressOfExpr: From, TargetType: ToType,
4799 Complain: true, Found);
4800 if (!Fn)
4801 return ExprError();
4802
4803 if (DiagnoseUseOfDecl(D: Fn, Locs: From->getBeginLoc()))
4804 return ExprError();
4805
4806 ExprResult Res = FixOverloadedFunctionReference(E: From, FoundDecl: Found, Fn);
4807 if (Res.isInvalid())
4808 return ExprError();
4809
4810 // We might get back another placeholder expression if we resolved to a
4811 // builtin.
4812 Res = CheckPlaceholderExpr(E: Res.get());
4813 if (Res.isInvalid())
4814 return ExprError();
4815
4816 From = Res.get();
4817 FromType = From->getType();
4818 }
4819
4820 // If we're converting to an atomic type, first convert to the corresponding
4821 // non-atomic type.
4822 QualType ToAtomicType;
4823 if (const AtomicType *ToAtomic = ToType->getAs<AtomicType>()) {
4824 ToAtomicType = ToType;
4825 ToType = ToAtomic->getValueType();
4826 }
4827
4828 QualType InitialFromType = FromType;
4829 // Perform the first implicit conversion.
4830 switch (SCS.First) {
4831 case ICK_Identity:
4832 if (const AtomicType *FromAtomic = FromType->getAs<AtomicType>()) {
4833 FromType = FromAtomic->getValueType().getUnqualifiedType();
4834 From = ImplicitCastExpr::Create(Context, T: FromType, Kind: CK_AtomicToNonAtomic,
4835 Operand: From, /*BasePath=*/nullptr, Cat: VK_PRValue,
4836 FPO: FPOptionsOverride());
4837 }
4838 break;
4839
4840 case ICK_Lvalue_To_Rvalue: {
4841 assert(From->getObjectKind() != OK_ObjCProperty);
4842 ExprResult FromRes = DefaultLvalueConversion(E: From);
4843 if (FromRes.isInvalid())
4844 return ExprError();
4845
4846 From = FromRes.get();
4847 FromType = From->getType();
4848 break;
4849 }
4850
4851 case ICK_Array_To_Pointer:
4852 FromType = Context.getArrayDecayedType(T: FromType);
4853 From = ImpCastExprToType(E: From, Type: FromType, CK: CK_ArrayToPointerDecay, VK: VK_PRValue,
4854 /*BasePath=*/nullptr, CCK)
4855 .get();
4856 break;
4857
4858 case ICK_HLSL_Array_RValue:
4859 if (ToType->isArrayParameterType()) {
4860 FromType = Context.getArrayParameterType(Ty: FromType);
4861 } else if (FromType->isArrayParameterType()) {
4862 const ArrayParameterType *APT = cast<ArrayParameterType>(Val&: FromType);
4863 FromType = APT->getConstantArrayType(Ctx: Context);
4864 }
4865 From = ImpCastExprToType(E: From, Type: FromType, CK: CK_HLSLArrayRValue, VK: VK_PRValue,
4866 /*BasePath=*/nullptr, CCK)
4867 .get();
4868 break;
4869
4870 case ICK_Function_To_Pointer:
4871 FromType = Context.getPointerType(T: FromType);
4872 From = ImpCastExprToType(E: From, Type: FromType, CK: CK_FunctionToPointerDecay,
4873 VK: VK_PRValue, /*BasePath=*/nullptr, CCK)
4874 .get();
4875 break;
4876
4877 default:
4878 llvm_unreachable("Improper first standard conversion");
4879 }
4880
4881 // Perform the second implicit conversion
4882 switch (SCS.Second) {
4883 case ICK_Identity:
4884 // C++ [except.spec]p5:
4885 // [For] assignment to and initialization of pointers to functions,
4886 // pointers to member functions, and references to functions: the
4887 // target entity shall allow at least the exceptions allowed by the
4888 // source value in the assignment or initialization.
4889 switch (Action) {
4890 case AssignmentAction::Assigning:
4891 case AssignmentAction::Initializing:
4892 // Note, function argument passing and returning are initialization.
4893 case AssignmentAction::Passing:
4894 case AssignmentAction::Returning:
4895 case AssignmentAction::Sending:
4896 case AssignmentAction::Passing_CFAudited:
4897 if (CheckExceptionSpecCompatibility(From, ToType))
4898 return ExprError();
4899 break;
4900
4901 case AssignmentAction::Casting:
4902 case AssignmentAction::Converting:
4903 // Casts and implicit conversions are not initialization, so are not
4904 // checked for exception specification mismatches.
4905 break;
4906 }
4907 // Nothing else to do.
4908 break;
4909
4910 case ICK_Integral_Promotion:
4911 case ICK_Integral_Conversion: {
4912 QualType ElTy = ToType;
4913 QualType StepTy = ToType;
4914 if (FromType->isVectorType() || ToType->isVectorType() ||
4915 FromType->isConstantMatrixType() || ToType->isConstantMatrixType())
4916 StepTy =
4917 adjustVectorOrConstantMatrixType(Context, FromTy: FromType, ToType, ElTy: &ElTy);
4918
4919 // Check for incompatible OBT kinds before converting
4920 if (checkIncompatibleOBTConversion(S&: *this, FromType, ToType: StepTy, From))
4921 return ExprError();
4922
4923 if (ElTy->isBooleanType()) {
4924 assert(FromType->castAsEnumDecl()->isFixed() &&
4925 SCS.Second == ICK_Integral_Promotion &&
4926 "only enums with fixed underlying type can promote to bool");
4927 From = ImpCastExprToType(E: From, Type: StepTy, CK: CK_IntegralToBoolean, VK: VK_PRValue,
4928 /*BasePath=*/nullptr, CCK)
4929 .get();
4930 } else {
4931 From = ImpCastExprToType(E: From, Type: StepTy, CK: CK_IntegralCast, VK: VK_PRValue,
4932 /*BasePath=*/nullptr, CCK)
4933 .get();
4934 }
4935 break;
4936 }
4937
4938 case ICK_Floating_Promotion:
4939 case ICK_Floating_Conversion: {
4940 QualType StepTy = ToType;
4941 if (FromType->isVectorType() || ToType->isVectorType() ||
4942 FromType->isConstantMatrixType() || ToType->isConstantMatrixType())
4943 StepTy = adjustVectorOrConstantMatrixType(Context, FromTy: FromType, ToType);
4944 From = ImpCastExprToType(E: From, Type: StepTy, CK: CK_FloatingCast, VK: VK_PRValue,
4945 /*BasePath=*/nullptr, CCK)
4946 .get();
4947 break;
4948 }
4949
4950 case ICK_Complex_Promotion:
4951 case ICK_Complex_Conversion: {
4952 QualType FromEl = From->getType()->castAs<ComplexType>()->getElementType();
4953 QualType ToEl = ToType->castAs<ComplexType>()->getElementType();
4954 CastKind CK;
4955 if (FromEl->isRealFloatingType()) {
4956 if (ToEl->isRealFloatingType())
4957 CK = CK_FloatingComplexCast;
4958 else
4959 CK = CK_FloatingComplexToIntegralComplex;
4960 } else if (ToEl->isRealFloatingType()) {
4961 CK = CK_IntegralComplexToFloatingComplex;
4962 } else {
4963 CK = CK_IntegralComplexCast;
4964 }
4965 From = ImpCastExprToType(E: From, Type: ToType, CK, VK: VK_PRValue, /*BasePath=*/nullptr,
4966 CCK)
4967 .get();
4968 break;
4969 }
4970
4971 case ICK_Floating_Integral: {
4972 QualType ElTy = ToType;
4973 QualType StepTy = ToType;
4974 if (FromType->isVectorType() || ToType->isVectorType() ||
4975 FromType->isConstantMatrixType() || ToType->isConstantMatrixType())
4976 StepTy =
4977 adjustVectorOrConstantMatrixType(Context, FromTy: FromType, ToType, ElTy: &ElTy);
4978 if (ElTy->isRealFloatingType())
4979 From = ImpCastExprToType(E: From, Type: StepTy, CK: CK_IntegralToFloating, VK: VK_PRValue,
4980 /*BasePath=*/nullptr, CCK)
4981 .get();
4982 else
4983 From = ImpCastExprToType(E: From, Type: StepTy, CK: CK_FloatingToIntegral, VK: VK_PRValue,
4984 /*BasePath=*/nullptr, CCK)
4985 .get();
4986 break;
4987 }
4988
4989 case ICK_Fixed_Point_Conversion:
4990 assert((FromType->isFixedPointType() || ToType->isFixedPointType()) &&
4991 "Attempting implicit fixed point conversion without a fixed "
4992 "point operand");
4993 if (FromType->isFloatingType())
4994 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_FloatingToFixedPoint,
4995 VK: VK_PRValue,
4996 /*BasePath=*/nullptr, CCK).get();
4997 else if (ToType->isFloatingType())
4998 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_FixedPointToFloating,
4999 VK: VK_PRValue,
5000 /*BasePath=*/nullptr, CCK).get();
5001 else if (FromType->isIntegralType(Ctx: Context))
5002 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_IntegralToFixedPoint,
5003 VK: VK_PRValue,
5004 /*BasePath=*/nullptr, CCK).get();
5005 else if (ToType->isIntegralType(Ctx: Context))
5006 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_FixedPointToIntegral,
5007 VK: VK_PRValue,
5008 /*BasePath=*/nullptr, CCK).get();
5009 else if (ToType->isBooleanType())
5010 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_FixedPointToBoolean,
5011 VK: VK_PRValue,
5012 /*BasePath=*/nullptr, CCK).get();
5013 else
5014 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_FixedPointCast,
5015 VK: VK_PRValue,
5016 /*BasePath=*/nullptr, CCK).get();
5017 break;
5018
5019 case ICK_Compatible_Conversion:
5020 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_NoOp, VK: From->getValueKind(),
5021 /*BasePath=*/nullptr, CCK).get();
5022 break;
5023
5024 case ICK_Writeback_Conversion:
5025 case ICK_Pointer_Conversion: {
5026 if (SCS.IncompatibleObjC && Action != AssignmentAction::Casting) {
5027 // Diagnose incompatible Objective-C conversions
5028 if (Action == AssignmentAction::Initializing ||
5029 Action == AssignmentAction::Assigning)
5030 Diag(Loc: From->getBeginLoc(),
5031 DiagID: diag::ext_typecheck_convert_incompatible_pointer)
5032 << ToType << From->getType() << Action << From->getSourceRange()
5033 << 0;
5034 else
5035 Diag(Loc: From->getBeginLoc(),
5036 DiagID: diag::ext_typecheck_convert_incompatible_pointer)
5037 << From->getType() << ToType << Action << From->getSourceRange()
5038 << 0;
5039
5040 if (From->getType()->isObjCObjectPointerType() &&
5041 ToType->isObjCObjectPointerType())
5042 ObjC().EmitRelatedResultTypeNote(E: From);
5043 } else if (getLangOpts().allowsNonTrivialObjCLifetimeQualifiers() &&
5044 !ObjC().CheckObjCARCUnavailableWeakConversion(castType: ToType,
5045 ExprType: From->getType())) {
5046 if (Action == AssignmentAction::Initializing)
5047 Diag(Loc: From->getBeginLoc(), DiagID: diag::err_arc_weak_unavailable_assign);
5048 else
5049 Diag(Loc: From->getBeginLoc(), DiagID: diag::err_arc_convesion_of_weak_unavailable)
5050 << (Action == AssignmentAction::Casting) << From->getType()
5051 << ToType << From->getSourceRange();
5052 }
5053
5054 // Defer address space conversion to the third conversion.
5055 QualType FromPteeType = From->getType()->getPointeeType();
5056 QualType ToPteeType = ToType->getPointeeType();
5057 QualType NewToType = ToType;
5058 if (!FromPteeType.isNull() && !ToPteeType.isNull() &&
5059 FromPteeType.getAddressSpace() != ToPteeType.getAddressSpace()) {
5060 NewToType = Context.removeAddrSpaceQualType(T: ToPteeType);
5061 NewToType = Context.getAddrSpaceQualType(T: NewToType,
5062 AddressSpace: FromPteeType.getAddressSpace());
5063 if (ToType->isObjCObjectPointerType())
5064 NewToType = Context.getObjCObjectPointerType(OIT: NewToType);
5065 else if (ToType->isBlockPointerType())
5066 NewToType = Context.getBlockPointerType(T: NewToType);
5067 else
5068 NewToType = Context.getPointerType(T: NewToType);
5069 }
5070
5071 CastKind Kind;
5072 CXXCastPath BasePath;
5073 if (CheckPointerConversion(From, ToType: NewToType, Kind, BasePath, IgnoreBaseAccess: CStyle))
5074 return ExprError();
5075
5076 // Make sure we extend blocks if necessary.
5077 // FIXME: doing this here is really ugly.
5078 if (Kind == CK_BlockPointerToObjCPointerCast) {
5079 ExprResult E = From;
5080 (void)ObjC().PrepareCastToObjCObjectPointer(E);
5081 From = E.get();
5082 }
5083 if (getLangOpts().allowsNonTrivialObjCLifetimeQualifiers())
5084 ObjC().CheckObjCConversion(castRange: SourceRange(), castType: NewToType, op&: From, CCK);
5085 From = ImpCastExprToType(E: From, Type: NewToType, CK: Kind, VK: VK_PRValue, BasePath: &BasePath, CCK)
5086 .get();
5087 break;
5088 }
5089
5090 case ICK_Pointer_Member: {
5091 CastKind Kind;
5092 CXXCastPath BasePath;
5093 switch (CheckMemberPointerConversion(
5094 FromType: From->getType(), ToPtrType: ToType->castAs<MemberPointerType>(), Kind, BasePath,
5095 CheckLoc: From->getExprLoc(), OpRange: From->getSourceRange(), IgnoreBaseAccess: CStyle,
5096 Direction: MemberPointerConversionDirection::Downcast)) {
5097 case MemberPointerConversionResult::Success:
5098 assert((Kind != CK_NullToMemberPointer ||
5099 From->isNullPointerConstant(Context,
5100 Expr::NPC_ValueDependentIsNull)) &&
5101 "Expr must be null pointer constant!");
5102 break;
5103 case MemberPointerConversionResult::Inaccessible:
5104 break;
5105 case MemberPointerConversionResult::DifferentPointee:
5106 llvm_unreachable("unexpected result");
5107 case MemberPointerConversionResult::NotDerived:
5108 llvm_unreachable("Should not have been called if derivation isn't OK.");
5109 case MemberPointerConversionResult::Ambiguous:
5110 case MemberPointerConversionResult::Virtual:
5111 return ExprError();
5112 }
5113 if (CheckExceptionSpecCompatibility(From, ToType))
5114 return ExprError();
5115
5116 From =
5117 ImpCastExprToType(E: From, Type: ToType, CK: Kind, VK: VK_PRValue, BasePath: &BasePath, CCK).get();
5118 break;
5119 }
5120
5121 case ICK_Boolean_Conversion: {
5122 // Perform half-to-boolean conversion via float.
5123 if (From->getType()->isHalfType()) {
5124 From = ImpCastExprToType(E: From, Type: Context.FloatTy, CK: CK_FloatingCast).get();
5125 FromType = Context.FloatTy;
5126 }
5127 QualType ElTy = FromType;
5128 QualType StepTy = ToType;
5129 if (FromType->isVectorType())
5130 ElTy = FromType->castAs<VectorType>()->getElementType();
5131 else if (FromType->isConstantMatrixType())
5132 ElTy = FromType->castAs<ConstantMatrixType>()->getElementType();
5133 if (getLangOpts().HLSL) {
5134 if (FromType->isVectorType() || ToType->isVectorType() ||
5135 FromType->isConstantMatrixType() || ToType->isConstantMatrixType())
5136 StepTy = adjustVectorOrConstantMatrixType(Context, FromTy: FromType, ToType);
5137 }
5138
5139 From = ImpCastExprToType(E: From, Type: StepTy, CK: ScalarTypeToBooleanCastKind(ScalarTy: ElTy),
5140 VK: VK_PRValue,
5141 /*BasePath=*/nullptr, CCK)
5142 .get();
5143 break;
5144 }
5145
5146 case ICK_Derived_To_Base: {
5147 CXXCastPath BasePath;
5148 if (CheckDerivedToBaseConversion(
5149 Derived: From->getType(), Base: ToType.getNonReferenceType(), Loc: From->getBeginLoc(),
5150 Range: From->getSourceRange(), BasePath: &BasePath, IgnoreAccess: CStyle))
5151 return ExprError();
5152
5153 From = ImpCastExprToType(E: From, Type: ToType.getNonReferenceType(),
5154 CK: CK_DerivedToBase, VK: From->getValueKind(),
5155 BasePath: &BasePath, CCK).get();
5156 break;
5157 }
5158
5159 case ICK_Vector_Conversion:
5160 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_BitCast, VK: VK_PRValue,
5161 /*BasePath=*/nullptr, CCK)
5162 .get();
5163 break;
5164
5165 case ICK_SVE_Vector_Conversion:
5166 case ICK_RVV_Vector_Conversion:
5167 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_BitCast, VK: VK_PRValue,
5168 /*BasePath=*/nullptr, CCK)
5169 .get();
5170 break;
5171
5172 case ICK_Vector_Splat: {
5173 // Vector splat from any arithmetic type to a vector.
5174 Expr *Elem = prepareVectorSplat(VectorTy: ToType, SplattedExpr: From).get();
5175 From = ImpCastExprToType(E: Elem, Type: ToType, CK: CK_VectorSplat, VK: VK_PRValue,
5176 /*BasePath=*/nullptr, CCK)
5177 .get();
5178 break;
5179 }
5180
5181 case ICK_Complex_Real:
5182 // Case 1. x -> _Complex y
5183 if (const ComplexType *ToComplex = ToType->getAs<ComplexType>()) {
5184 QualType ElType = ToComplex->getElementType();
5185 bool isFloatingComplex = ElType->isRealFloatingType();
5186
5187 // x -> y
5188 if (Context.hasSameUnqualifiedType(T1: ElType, T2: From->getType())) {
5189 // do nothing
5190 } else if (From->getType()->isRealFloatingType()) {
5191 From = ImpCastExprToType(E: From, Type: ElType,
5192 CK: isFloatingComplex ? CK_FloatingCast : CK_FloatingToIntegral).get();
5193 } else {
5194 assert(From->getType()->isIntegerType());
5195 From = ImpCastExprToType(E: From, Type: ElType,
5196 CK: isFloatingComplex ? CK_IntegralToFloating : CK_IntegralCast).get();
5197 }
5198 // y -> _Complex y
5199 From = ImpCastExprToType(E: From, Type: ToType,
5200 CK: isFloatingComplex ? CK_FloatingRealToComplex
5201 : CK_IntegralRealToComplex).get();
5202
5203 // Case 2. _Complex x -> y
5204 } else {
5205 auto *FromComplex = From->getType()->castAs<ComplexType>();
5206 QualType ElType = FromComplex->getElementType();
5207 bool isFloatingComplex = ElType->isRealFloatingType();
5208
5209 // _Complex x -> x
5210 From = ImpCastExprToType(E: From, Type: ElType,
5211 CK: isFloatingComplex ? CK_FloatingComplexToReal
5212 : CK_IntegralComplexToReal,
5213 VK: VK_PRValue, /*BasePath=*/nullptr, CCK)
5214 .get();
5215
5216 // x -> y
5217 if (Context.hasSameUnqualifiedType(T1: ElType, T2: ToType)) {
5218 // do nothing
5219 } else if (ToType->isRealFloatingType()) {
5220 From = ImpCastExprToType(E: From, Type: ToType,
5221 CK: isFloatingComplex ? CK_FloatingCast
5222 : CK_IntegralToFloating,
5223 VK: VK_PRValue, /*BasePath=*/nullptr, CCK)
5224 .get();
5225 } else {
5226 assert(ToType->isIntegerType());
5227 From = ImpCastExprToType(E: From, Type: ToType,
5228 CK: isFloatingComplex ? CK_FloatingToIntegral
5229 : CK_IntegralCast,
5230 VK: VK_PRValue, /*BasePath=*/nullptr, CCK)
5231 .get();
5232 }
5233 }
5234 break;
5235
5236 case ICK_Block_Pointer_Conversion: {
5237 LangAS AddrSpaceL =
5238 ToType->castAs<BlockPointerType>()->getPointeeType().getAddressSpace();
5239 LangAS AddrSpaceR =
5240 FromType->castAs<BlockPointerType>()->getPointeeType().getAddressSpace();
5241 assert(Qualifiers::isAddressSpaceSupersetOf(AddrSpaceL, AddrSpaceR,
5242 getASTContext()) &&
5243 "Invalid cast");
5244 CastKind Kind =
5245 AddrSpaceL != AddrSpaceR ? CK_AddressSpaceConversion : CK_BitCast;
5246 From = ImpCastExprToType(E: From, Type: ToType.getUnqualifiedType(), CK: Kind,
5247 VK: VK_PRValue, /*BasePath=*/nullptr, CCK)
5248 .get();
5249 break;
5250 }
5251
5252 case ICK_TransparentUnionConversion: {
5253 ExprResult FromRes = From;
5254 AssignConvertType ConvTy =
5255 CheckTransparentUnionArgumentConstraints(ArgType: ToType, RHS&: FromRes);
5256 if (FromRes.isInvalid())
5257 return ExprError();
5258 From = FromRes.get();
5259 assert((ConvTy == AssignConvertType::Compatible) &&
5260 "Improper transparent union conversion");
5261 (void)ConvTy;
5262 break;
5263 }
5264
5265 case ICK_Zero_Event_Conversion:
5266 case ICK_Zero_Queue_Conversion:
5267 From = ImpCastExprToType(E: From, Type: ToType,
5268 CK: CK_ZeroToOCLOpaqueType,
5269 VK: From->getValueKind()).get();
5270 break;
5271
5272 case ICK_Lvalue_To_Rvalue:
5273 case ICK_Array_To_Pointer:
5274 case ICK_Function_To_Pointer:
5275 case ICK_Function_Conversion:
5276 case ICK_Qualification:
5277 case ICK_Num_Conversion_Kinds:
5278 case ICK_C_Only_Conversion:
5279 case ICK_Incompatible_Pointer_Conversion:
5280 case ICK_HLSL_Array_RValue:
5281 case ICK_HLSL_Vector_Truncation:
5282 case ICK_HLSL_Matrix_Truncation:
5283 case ICK_HLSL_Vector_Splat:
5284 case ICK_HLSL_Matrix_Splat:
5285 llvm_unreachable("Improper second standard conversion");
5286 }
5287
5288 if (SCS.Dimension != ICK_Identity) {
5289 // If SCS.Element is not ICK_Identity the To and From types must be HLSL
5290 // vectors or matrices.
5291 assert(
5292 (ToType->isVectorType() || ToType->isConstantMatrixType() ||
5293 ToType->isBuiltinType()) &&
5294 "Dimension conversion output must be vector, matrix, or scalar type.");
5295 switch (SCS.Dimension) {
5296 case ICK_HLSL_Vector_Splat: {
5297 // Vector splat from any arithmetic type to a vector.
5298 Expr *Elem = prepareVectorSplat(VectorTy: ToType, SplattedExpr: From).get();
5299 From = ImpCastExprToType(E: Elem, Type: ToType, CK: CK_VectorSplat, VK: VK_PRValue,
5300 /*BasePath=*/nullptr, CCK)
5301 .get();
5302 break;
5303 }
5304 case ICK_HLSL_Matrix_Splat: {
5305 // Matrix splat from any arithmetic type to a matrix.
5306 Expr *Elem = prepareMatrixSplat(MatrixTy: ToType, SplattedExpr: From).get();
5307 From =
5308 ImpCastExprToType(E: Elem, Type: ToType, CK: CK_HLSLAggregateSplatCast, VK: VK_PRValue,
5309 /*BasePath=*/nullptr, CCK)
5310 .get();
5311 break;
5312 }
5313 case ICK_HLSL_Vector_Truncation: {
5314 // Note: HLSL built-in vectors are ExtVectors. Since this truncates a
5315 // vector to a smaller vector or to a scalar, this can only operate on
5316 // arguments where the source type is an ExtVector and the destination
5317 // type is destination type is either an ExtVectorType or a builtin scalar
5318 // type.
5319 auto *FromVec = From->getType()->castAs<VectorType>();
5320 QualType TruncTy = FromVec->getElementType();
5321 if (auto *ToVec = ToType->getAs<VectorType>())
5322 TruncTy = Context.getExtVectorType(VectorType: TruncTy, NumElts: ToVec->getNumElements());
5323 From = ImpCastExprToType(E: From, Type: TruncTy, CK: CK_HLSLVectorTruncation,
5324 VK: From->getValueKind())
5325 .get();
5326
5327 break;
5328 }
5329 case ICK_HLSL_Matrix_Truncation: {
5330 auto *FromMat = From->getType()->castAs<ConstantMatrixType>();
5331 QualType TruncTy = FromMat->getElementType();
5332 if (auto *ToMat = ToType->getAs<ConstantMatrixType>())
5333 TruncTy = Context.getConstantMatrixType(ElementType: TruncTy, NumRows: ToMat->getNumRows(),
5334 NumColumns: ToMat->getNumColumns());
5335 From = ImpCastExprToType(E: From, Type: TruncTy, CK: CK_HLSLMatrixTruncation,
5336 VK: From->getValueKind())
5337 .get();
5338 break;
5339 }
5340 case ICK_Identity:
5341 default:
5342 llvm_unreachable("Improper element standard conversion");
5343 }
5344 }
5345
5346 switch (SCS.Third) {
5347 case ICK_Identity:
5348 // Nothing to do.
5349 break;
5350
5351 case ICK_Function_Conversion:
5352 // If both sides are functions (or pointers/references to them), there could
5353 // be incompatible exception declarations.
5354 if (CheckExceptionSpecCompatibility(From, ToType))
5355 return ExprError();
5356
5357 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_NoOp, VK: VK_PRValue,
5358 /*BasePath=*/nullptr, CCK)
5359 .get();
5360 break;
5361
5362 case ICK_Qualification: {
5363 ExprValueKind VK = From->getValueKind();
5364 CastKind CK = CK_NoOp;
5365
5366 if (ToType->isReferenceType() &&
5367 ToType->getPointeeType().getAddressSpace() !=
5368 From->getType().getAddressSpace())
5369 CK = CK_AddressSpaceConversion;
5370
5371 if (ToType->isPointerType() &&
5372 ToType->getPointeeType().getAddressSpace() !=
5373 From->getType()->getPointeeType().getAddressSpace())
5374 CK = CK_AddressSpaceConversion;
5375
5376 if (!isCast(CCK) &&
5377 !ToType->getPointeeType().getQualifiers().hasUnaligned() &&
5378 From->getType()->getPointeeType().getQualifiers().hasUnaligned()) {
5379 Diag(Loc: From->getBeginLoc(), DiagID: diag::warn_imp_cast_drops_unaligned)
5380 << InitialFromType << ToType;
5381 }
5382
5383 From = ImpCastExprToType(E: From, Type: ToType.getNonLValueExprType(Context), CK, VK,
5384 /*BasePath=*/nullptr, CCK)
5385 .get();
5386
5387 if (SCS.DeprecatedStringLiteralToCharPtr &&
5388 !getLangOpts().WritableStrings) {
5389 Diag(Loc: From->getBeginLoc(),
5390 DiagID: getLangOpts().CPlusPlus11
5391 ? diag::ext_deprecated_string_literal_conversion
5392 : diag::warn_deprecated_string_literal_conversion)
5393 << ToType.getNonReferenceType();
5394 }
5395
5396 break;
5397 }
5398
5399 default:
5400 llvm_unreachable("Improper third standard conversion");
5401 }
5402
5403 // If this conversion sequence involved a scalar -> atomic conversion, perform
5404 // that conversion now.
5405 if (!ToAtomicType.isNull()) {
5406 assert(Context.hasSameType(
5407 ToAtomicType->castAs<AtomicType>()->getValueType(), From->getType()));
5408 From = ImpCastExprToType(E: From, Type: ToAtomicType, CK: CK_NonAtomicToAtomic,
5409 VK: VK_PRValue, BasePath: nullptr, CCK)
5410 .get();
5411 }
5412
5413 // Materialize a temporary if we're implicitly converting to a reference
5414 // type. This is not required by the C++ rules but is necessary to maintain
5415 // AST invariants.
5416 if (ToType->isReferenceType() && From->isPRValue()) {
5417 ExprResult Res = TemporaryMaterializationConversion(E: From);
5418 if (Res.isInvalid())
5419 return ExprError();
5420 From = Res.get();
5421 }
5422
5423 // If this conversion sequence succeeded and involved implicitly converting a
5424 // _Nullable type to a _Nonnull one, complain.
5425 if (!isCast(CCK))
5426 diagnoseNullableToNonnullConversion(DstType: ToType, SrcType: InitialFromType,
5427 Loc: From->getBeginLoc());
5428
5429 return From;
5430}
5431
5432QualType Sema::CheckPointerToMemberOperands(ExprResult &LHS, ExprResult &RHS,
5433 ExprValueKind &VK,
5434 SourceLocation Loc,
5435 bool isIndirect) {
5436 assert(!LHS.get()->hasPlaceholderType() && !RHS.get()->hasPlaceholderType() &&
5437 "placeholders should have been weeded out by now");
5438
5439 // The LHS undergoes lvalue conversions if this is ->*, and undergoes the
5440 // temporary materialization conversion otherwise.
5441 if (isIndirect)
5442 LHS = DefaultLvalueConversion(E: LHS.get());
5443 else if (LHS.get()->isPRValue())
5444 LHS = TemporaryMaterializationConversion(E: LHS.get());
5445 if (LHS.isInvalid())
5446 return QualType();
5447
5448 // The RHS always undergoes lvalue conversions.
5449 RHS = DefaultLvalueConversion(E: RHS.get());
5450 if (RHS.isInvalid()) return QualType();
5451
5452 const char *OpSpelling = isIndirect ? "->*" : ".*";
5453 // C++ 5.5p2
5454 // The binary operator .* [p3: ->*] binds its second operand, which shall
5455 // be of type "pointer to member of T" (where T is a completely-defined
5456 // class type) [...]
5457 QualType RHSType = RHS.get()->getType();
5458 const MemberPointerType *MemPtr = RHSType->getAs<MemberPointerType>();
5459 if (!MemPtr) {
5460 Diag(Loc, DiagID: diag::err_bad_memptr_rhs)
5461 << OpSpelling << RHSType << RHS.get()->getSourceRange();
5462 return QualType();
5463 }
5464
5465 CXXRecordDecl *RHSClass = MemPtr->getMostRecentCXXRecordDecl();
5466
5467 // Note: C++ [expr.mptr.oper]p2-3 says that the class type into which the
5468 // member pointer points must be completely-defined. However, there is no
5469 // reason for this semantic distinction, and the rule is not enforced by
5470 // other compilers. Therefore, we do not check this property, as it is
5471 // likely to be considered a defect.
5472
5473 // C++ 5.5p2
5474 // [...] to its first operand, which shall be of class T or of a class of
5475 // which T is an unambiguous and accessible base class. [p3: a pointer to
5476 // such a class]
5477 QualType LHSType = LHS.get()->getType();
5478 if (isIndirect) {
5479 if (const PointerType *Ptr = LHSType->getAs<PointerType>())
5480 LHSType = Ptr->getPointeeType();
5481 else {
5482 Diag(Loc, DiagID: diag::err_bad_memptr_lhs)
5483 << OpSpelling << 1 << LHSType
5484 << FixItHint::CreateReplacement(RemoveRange: SourceRange(Loc), Code: ".*");
5485 return QualType();
5486 }
5487 }
5488 CXXRecordDecl *LHSClass = LHSType->getAsCXXRecordDecl();
5489
5490 if (!declaresSameEntity(D1: LHSClass, D2: RHSClass)) {
5491 // If we want to check the hierarchy, we need a complete type.
5492 if (RequireCompleteType(Loc, T: LHSType, DiagID: diag::err_bad_memptr_lhs,
5493 Args: OpSpelling, Args: (int)isIndirect)) {
5494 return QualType();
5495 }
5496
5497 if (!IsDerivedFrom(Loc, Derived: LHSClass, Base: RHSClass)) {
5498 Diag(Loc, DiagID: diag::err_bad_memptr_lhs) << OpSpelling
5499 << (int)isIndirect << LHS.get()->getType();
5500 return QualType();
5501 }
5502
5503 // FIXME: use sugared type from member pointer.
5504 CanQualType RHSClassType = Context.getCanonicalTagType(TD: RHSClass);
5505 CXXCastPath BasePath;
5506 if (CheckDerivedToBaseConversion(
5507 Derived: LHSType, Base: RHSClassType, Loc,
5508 Range: SourceRange(LHS.get()->getBeginLoc(), RHS.get()->getEndLoc()),
5509 BasePath: &BasePath))
5510 return QualType();
5511
5512 // Cast LHS to type of use.
5513 QualType UseType =
5514 Context.getQualifiedType(T: RHSClassType, Qs: LHSType.getQualifiers());
5515 if (isIndirect)
5516 UseType = Context.getPointerType(T: UseType);
5517 ExprValueKind VK = isIndirect ? VK_PRValue : LHS.get()->getValueKind();
5518 LHS = ImpCastExprToType(E: LHS.get(), Type: UseType, CK: CK_DerivedToBase, VK,
5519 BasePath: &BasePath);
5520 }
5521
5522 if (isa<CXXScalarValueInitExpr>(Val: RHS.get()->IgnoreParens())) {
5523 // Diagnose use of pointer-to-member type which when used as
5524 // the functional cast in a pointer-to-member expression.
5525 Diag(Loc, DiagID: diag::err_pointer_to_member_type) << isIndirect;
5526 return QualType();
5527 }
5528
5529 // C++ 5.5p2
5530 // The result is an object or a function of the type specified by the
5531 // second operand.
5532 // The cv qualifiers are the union of those in the pointer and the left side,
5533 // in accordance with 5.5p5 and 5.2.5.
5534 QualType Result = MemPtr->getPointeeType();
5535 Result = Context.getCVRQualifiedType(T: Result, CVR: LHSType.getCVRQualifiers());
5536
5537 // C++0x [expr.mptr.oper]p6:
5538 // In a .* expression whose object expression is an rvalue, the program is
5539 // ill-formed if the second operand is a pointer to member function with
5540 // ref-qualifier &. In a ->* expression or in a .* expression whose object
5541 // expression is an lvalue, the program is ill-formed if the second operand
5542 // is a pointer to member function with ref-qualifier &&.
5543 if (const FunctionProtoType *Proto = Result->getAs<FunctionProtoType>()) {
5544 switch (Proto->getRefQualifier()) {
5545 case RQ_None:
5546 // Do nothing
5547 break;
5548
5549 case RQ_LValue:
5550 if (!isIndirect && !LHS.get()->Classify(Ctx&: Context).isLValue()) {
5551 // C++2a allows functions with ref-qualifier & if their cv-qualifier-seq
5552 // is (exactly) 'const'.
5553 if (Proto->isConst() && !Proto->isVolatile())
5554 Diag(Loc, DiagID: getLangOpts().CPlusPlus20
5555 ? diag::warn_cxx17_compat_pointer_to_const_ref_member_on_rvalue
5556 : diag::ext_pointer_to_const_ref_member_on_rvalue);
5557 else
5558 Diag(Loc, DiagID: diag::err_pointer_to_member_oper_value_classify)
5559 << RHSType << 1 << LHS.get()->getSourceRange();
5560 }
5561 break;
5562
5563 case RQ_RValue:
5564 if (isIndirect || !LHS.get()->Classify(Ctx&: Context).isRValue())
5565 Diag(Loc, DiagID: diag::err_pointer_to_member_oper_value_classify)
5566 << RHSType << 0 << LHS.get()->getSourceRange();
5567 break;
5568 }
5569 }
5570
5571 // C++ [expr.mptr.oper]p6:
5572 // The result of a .* expression whose second operand is a pointer
5573 // to a data member is of the same value category as its
5574 // first operand. The result of a .* expression whose second
5575 // operand is a pointer to a member function is a prvalue. The
5576 // result of an ->* expression is an lvalue if its second operand
5577 // is a pointer to data member and a prvalue otherwise.
5578 if (Result->isFunctionType()) {
5579 VK = VK_PRValue;
5580 return Context.BoundMemberTy;
5581 } else if (isIndirect) {
5582 VK = VK_LValue;
5583 } else {
5584 VK = LHS.get()->getValueKind();
5585 }
5586
5587 return Result;
5588}
5589
5590/// Try to convert a type to another according to C++11 5.16p3.
5591///
5592/// This is part of the parameter validation for the ? operator. If either
5593/// value operand is a class type, the two operands are attempted to be
5594/// converted to each other. This function does the conversion in one direction.
5595/// It returns true if the program is ill-formed and has already been diagnosed
5596/// as such.
5597static bool TryClassUnification(Sema &Self, Expr *From, Expr *To,
5598 SourceLocation QuestionLoc,
5599 bool &HaveConversion,
5600 QualType &ToType) {
5601 HaveConversion = false;
5602 ToType = To->getType();
5603
5604 InitializationKind Kind =
5605 InitializationKind::CreateCopy(InitLoc: To->getBeginLoc(), EqualLoc: SourceLocation());
5606 // C++11 5.16p3
5607 // The process for determining whether an operand expression E1 of type T1
5608 // can be converted to match an operand expression E2 of type T2 is defined
5609 // as follows:
5610 // -- If E2 is an lvalue: E1 can be converted to match E2 if E1 can be
5611 // implicitly converted to type "lvalue reference to T2", subject to the
5612 // constraint that in the conversion the reference must bind directly to
5613 // an lvalue.
5614 // -- If E2 is an xvalue: E1 can be converted to match E2 if E1 can be
5615 // implicitly converted to the type "rvalue reference to R2", subject to
5616 // the constraint that the reference must bind directly.
5617 if (To->isGLValue()) {
5618 QualType T = Self.Context.getReferenceQualifiedType(e: To);
5619 InitializedEntity Entity = InitializedEntity::InitializeTemporary(Type: T);
5620
5621 InitializationSequence InitSeq(Self, Entity, Kind, From);
5622 if (InitSeq.isDirectReferenceBinding()) {
5623 ToType = T;
5624 HaveConversion = true;
5625 return false;
5626 }
5627
5628 if (InitSeq.isAmbiguous())
5629 return InitSeq.Diagnose(S&: Self, Entity, Kind, Args: From);
5630 }
5631
5632 // -- If E2 is an rvalue, or if the conversion above cannot be done:
5633 // -- if E1 and E2 have class type, and the underlying class types are
5634 // the same or one is a base class of the other:
5635 QualType FTy = From->getType();
5636 QualType TTy = To->getType();
5637 const RecordType *FRec = FTy->getAsCanonical<RecordType>();
5638 const RecordType *TRec = TTy->getAsCanonical<RecordType>();
5639 bool FDerivedFromT = FRec && TRec && FRec != TRec &&
5640 Self.IsDerivedFrom(Loc: QuestionLoc, Derived: FTy, Base: TTy);
5641 if (FRec && TRec && (FRec == TRec || FDerivedFromT ||
5642 Self.IsDerivedFrom(Loc: QuestionLoc, Derived: TTy, Base: FTy))) {
5643 // E1 can be converted to match E2 if the class of T2 is the
5644 // same type as, or a base class of, the class of T1, and
5645 // [cv2 > cv1].
5646 if (FRec == TRec || FDerivedFromT) {
5647 if (TTy.isAtLeastAsQualifiedAs(other: FTy, Ctx: Self.getASTContext())) {
5648 InitializedEntity Entity = InitializedEntity::InitializeTemporary(Type: TTy);
5649 InitializationSequence InitSeq(Self, Entity, Kind, From);
5650 if (InitSeq) {
5651 HaveConversion = true;
5652 return false;
5653 }
5654
5655 if (InitSeq.isAmbiguous())
5656 return InitSeq.Diagnose(S&: Self, Entity, Kind, Args: From);
5657 }
5658 }
5659
5660 return false;
5661 }
5662
5663 // -- Otherwise: E1 can be converted to match E2 if E1 can be
5664 // implicitly converted to the type that expression E2 would have
5665 // if E2 were converted to an rvalue (or the type it has, if E2 is
5666 // an rvalue).
5667 //
5668 // This actually refers very narrowly to the lvalue-to-rvalue conversion, not
5669 // to the array-to-pointer or function-to-pointer conversions.
5670 TTy = TTy.getNonLValueExprType(Context: Self.Context);
5671
5672 InitializedEntity Entity = InitializedEntity::InitializeTemporary(Type: TTy);
5673 InitializationSequence InitSeq(Self, Entity, Kind, From);
5674 HaveConversion = !InitSeq.Failed();
5675 ToType = TTy;
5676 if (InitSeq.isAmbiguous())
5677 return InitSeq.Diagnose(S&: Self, Entity, Kind, Args: From);
5678
5679 return false;
5680}
5681
5682/// Try to find a common type for two according to C++0x 5.16p5.
5683///
5684/// This is part of the parameter validation for the ? operator. If either
5685/// value operand is a class type, overload resolution is used to find a
5686/// conversion to a common type.
5687static bool FindConditionalOverload(Sema &Self, ExprResult &LHS, ExprResult &RHS,
5688 SourceLocation QuestionLoc) {
5689 Expr *Args[2] = { LHS.get(), RHS.get() };
5690 OverloadCandidateSet CandidateSet(QuestionLoc,
5691 OverloadCandidateSet::CSK_Operator);
5692 Self.AddBuiltinOperatorCandidates(Op: OO_Conditional, OpLoc: QuestionLoc, Args,
5693 CandidateSet);
5694
5695 OverloadCandidateSet::iterator Best;
5696 switch (CandidateSet.BestViableFunction(S&: Self, Loc: QuestionLoc, Best)) {
5697 case OR_Success: {
5698 // We found a match. Perform the conversions on the arguments and move on.
5699 ExprResult LHSRes = Self.PerformImplicitConversion(
5700 From: LHS.get(), ToType: Best->BuiltinParamTypes[0], ICS: Best->Conversions[0],
5701 Action: AssignmentAction::Converting);
5702 if (LHSRes.isInvalid())
5703 break;
5704 LHS = LHSRes;
5705
5706 ExprResult RHSRes = Self.PerformImplicitConversion(
5707 From: RHS.get(), ToType: Best->BuiltinParamTypes[1], ICS: Best->Conversions[1],
5708 Action: AssignmentAction::Converting);
5709 if (RHSRes.isInvalid())
5710 break;
5711 RHS = RHSRes;
5712 if (Best->Function)
5713 Self.MarkFunctionReferenced(Loc: QuestionLoc, Func: Best->Function);
5714 return false;
5715 }
5716
5717 case OR_No_Viable_Function:
5718
5719 // Emit a better diagnostic if one of the expressions is a null pointer
5720 // constant and the other is a pointer type. In this case, the user most
5721 // likely forgot to take the address of the other expression.
5722 if (Self.DiagnoseConditionalForNull(LHSExpr: LHS.get(), RHSExpr: RHS.get(), QuestionLoc))
5723 return true;
5724
5725 Self.Diag(Loc: QuestionLoc, DiagID: diag::err_typecheck_cond_incompatible_operands)
5726 << LHS.get()->getType() << RHS.get()->getType()
5727 << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
5728 return true;
5729
5730 case OR_Ambiguous:
5731 Self.Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_ambiguous_ovl)
5732 << LHS.get()->getType() << RHS.get()->getType()
5733 << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
5734 // FIXME: Print the possible common types by printing the return types of
5735 // the viable candidates.
5736 break;
5737
5738 case OR_Deleted:
5739 llvm_unreachable("Conditional operator has only built-in overloads");
5740 }
5741 return true;
5742}
5743
5744/// Perform an "extended" implicit conversion as returned by
5745/// TryClassUnification.
5746static bool ConvertForConditional(Sema &Self, ExprResult &E, QualType T) {
5747 InitializedEntity Entity = InitializedEntity::InitializeTemporary(Type: T);
5748 InitializationKind Kind =
5749 InitializationKind::CreateCopy(InitLoc: E.get()->getBeginLoc(), EqualLoc: SourceLocation());
5750 Expr *Arg = E.get();
5751 InitializationSequence InitSeq(Self, Entity, Kind, Arg);
5752 ExprResult Result = InitSeq.Perform(S&: Self, Entity, Kind, Args: Arg);
5753 if (Result.isInvalid())
5754 return true;
5755
5756 E = Result;
5757 return false;
5758}
5759
5760// Check the condition operand of ?: to see if it is valid for the GCC
5761// extension.
5762static bool isValidVectorForConditionalCondition(ASTContext &Ctx,
5763 QualType CondTy) {
5764 bool IsSVEVectorType = CondTy->isSveVLSBuiltinType();
5765 if (!CondTy->isVectorType() && !CondTy->isExtVectorType() && !IsSVEVectorType)
5766 return false;
5767 const QualType EltTy =
5768 IsSVEVectorType
5769 ? cast<BuiltinType>(Val: CondTy.getCanonicalType())->getSveEltType(Ctx)
5770 : cast<VectorType>(Val: CondTy.getCanonicalType())->getElementType();
5771 assert(!EltTy->isEnumeralType() && "Vectors cant be enum types");
5772 return EltTy->isIntegralType(Ctx);
5773}
5774
5775QualType Sema::CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
5776 ExprResult &RHS,
5777 SourceLocation QuestionLoc) {
5778 LHS = DefaultFunctionArrayLvalueConversion(E: LHS.get());
5779 RHS = DefaultFunctionArrayLvalueConversion(E: RHS.get());
5780
5781 QualType CondType = Cond.get()->getType();
5782 QualType LHSType = LHS.get()->getType();
5783 QualType RHSType = RHS.get()->getType();
5784
5785 bool LHSSizelessVector = LHSType->isSizelessVectorType();
5786 bool RHSSizelessVector = RHSType->isSizelessVectorType();
5787 bool LHSIsVector = LHSType->isVectorType() || LHSSizelessVector;
5788 bool RHSIsVector = RHSType->isVectorType() || RHSSizelessVector;
5789
5790 auto GetVectorInfo =
5791 [&](QualType Type) -> std::pair<QualType, llvm::ElementCount> {
5792 if (const auto *VT = Type->getAs<VectorType>())
5793 return std::make_pair(x: VT->getElementType(),
5794 y: llvm::ElementCount::getFixed(MinVal: VT->getNumElements()));
5795 ASTContext::BuiltinVectorTypeInfo VectorInfo =
5796 Context.getBuiltinVectorTypeInfo(VecTy: Type->castAs<BuiltinType>());
5797 return std::make_pair(x&: VectorInfo.ElementType, y&: VectorInfo.EC);
5798 };
5799
5800 auto [CondElementTy, CondElementCount] = GetVectorInfo(CondType);
5801
5802 QualType ResultType;
5803 if (LHSIsVector && RHSIsVector) {
5804 if (CondType->isExtVectorType() != LHSType->isExtVectorType()) {
5805 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_cond_result_mismatch)
5806 << /*isExtVectorNotSizeless=*/1;
5807 return {};
5808 }
5809
5810 // If both are vector types, they must be the same type.
5811 if (!Context.hasSameType(T1: LHSType, T2: RHSType)) {
5812 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_mismatched)
5813 << LHSType << RHSType;
5814 return {};
5815 }
5816 ResultType = Context.getCommonSugaredType(X: LHSType, Y: RHSType);
5817 } else if (LHSIsVector || RHSIsVector) {
5818 bool ResultSizeless = LHSSizelessVector || RHSSizelessVector;
5819 if (ResultSizeless != CondType->isSizelessVectorType()) {
5820 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_cond_result_mismatch)
5821 << /*isExtVectorNotSizeless=*/0;
5822 return {};
5823 }
5824 if (ResultSizeless)
5825 ResultType = CheckSizelessVectorOperands(LHS, RHS, Loc: QuestionLoc,
5826 /*IsCompAssign*/ false,
5827 OperationKind: ArithConvKind::Conditional);
5828 else
5829 ResultType = CheckVectorOperands(
5830 LHS, RHS, Loc: QuestionLoc, /*isCompAssign*/ IsCompAssign: false, /*AllowBothBool*/ true,
5831 /*AllowBoolConversions*/ AllowBoolConversion: false,
5832 /*AllowBoolOperation*/ true,
5833 /*ReportInvalid*/ true);
5834 if (ResultType.isNull())
5835 return {};
5836 } else {
5837 // Both are scalar.
5838 LHSType = LHSType.getUnqualifiedType();
5839 RHSType = RHSType.getUnqualifiedType();
5840 QualType ResultElementTy =
5841 Context.hasSameType(T1: LHSType, T2: RHSType)
5842 ? Context.getCommonSugaredType(X: LHSType, Y: RHSType)
5843 : UsualArithmeticConversions(LHS, RHS, Loc: QuestionLoc,
5844 ACK: ArithConvKind::Conditional);
5845
5846 if (ResultElementTy->isEnumeralType()) {
5847 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_operand_type)
5848 << ResultElementTy;
5849 return {};
5850 }
5851 if (CondType->isExtVectorType()) {
5852 ResultType = Context.getExtVectorType(VectorType: ResultElementTy,
5853 NumElts: CondElementCount.getFixedValue());
5854 } else if (CondType->isSizelessVectorType()) {
5855 ResultType = Context.getScalableVectorType(
5856 EltTy: ResultElementTy, NumElts: CondElementCount.getKnownMinValue());
5857 // There are not scalable vector type mappings for all element counts.
5858 if (ResultType.isNull()) {
5859 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_scalar_type_unsupported)
5860 << ResultElementTy << CondType;
5861 return {};
5862 }
5863 } else {
5864 ResultType = Context.getVectorType(VectorType: ResultElementTy,
5865 NumElts: CondElementCount.getFixedValue(),
5866 VecKind: VectorKind::Generic);
5867 }
5868 LHS = ImpCastExprToType(E: LHS.get(), Type: ResultType, CK: CK_VectorSplat);
5869 RHS = ImpCastExprToType(E: RHS.get(), Type: ResultType, CK: CK_VectorSplat);
5870 }
5871
5872 assert(!ResultType.isNull() &&
5873 (ResultType->isVectorType() || ResultType->isSizelessVectorType()) &&
5874 (!CondType->isExtVectorType() || ResultType->isExtVectorType()) &&
5875 "Result should have been a vector type");
5876
5877 auto [ResultElementTy, ResultElementCount] = GetVectorInfo(ResultType);
5878 if (ResultElementCount != CondElementCount) {
5879 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_size) << CondType
5880 << ResultType;
5881 return {};
5882 }
5883
5884 // Boolean vectors are permitted outside of OpenCL mode.
5885 if (Context.getTypeSize(T: ResultElementTy) !=
5886 Context.getTypeSize(T: CondElementTy) &&
5887 (!CondElementTy->isBooleanType() || LangOpts.OpenCL)) {
5888 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_element_size)
5889 << CondType << ResultType;
5890 return {};
5891 }
5892
5893 return ResultType;
5894}
5895
5896QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
5897 ExprResult &RHS, ExprValueKind &VK,
5898 ExprObjectKind &OK,
5899 SourceLocation QuestionLoc) {
5900 // FIXME: Handle C99's complex types, block pointers and Obj-C++ interface
5901 // pointers.
5902
5903 // Assume r-value.
5904 VK = VK_PRValue;
5905 OK = OK_Ordinary;
5906 bool IsVectorConditional =
5907 isValidVectorForConditionalCondition(Ctx&: Context, CondTy: Cond.get()->getType());
5908
5909 // C++11 [expr.cond]p1
5910 // The first expression is contextually converted to bool.
5911 if (!Cond.get()->isTypeDependent()) {
5912 ExprResult CondRes = IsVectorConditional
5913 ? DefaultFunctionArrayLvalueConversion(E: Cond.get())
5914 : CheckCXXBooleanCondition(CondExpr: Cond.get());
5915 if (CondRes.isInvalid())
5916 return QualType();
5917 Cond = CondRes;
5918 } else {
5919 // To implement C++, the first expression typically doesn't alter the result
5920 // type of the conditional, however the GCC compatible vector extension
5921 // changes the result type to be that of the conditional. Since we cannot
5922 // know if this is a vector extension here, delay the conversion of the
5923 // LHS/RHS below until later.
5924 return Context.DependentTy;
5925 }
5926
5927
5928 // Either of the arguments dependent?
5929 if (LHS.get()->isTypeDependent() || RHS.get()->isTypeDependent())
5930 return Context.DependentTy;
5931
5932 // C++11 [expr.cond]p2
5933 // If either the second or the third operand has type (cv) void, ...
5934 QualType LTy = LHS.get()->getType();
5935 QualType RTy = RHS.get()->getType();
5936 bool LVoid = LTy->isVoidType();
5937 bool RVoid = RTy->isVoidType();
5938 if (LVoid || RVoid) {
5939 // ... one of the following shall hold:
5940 // -- The second or the third operand (but not both) is a (possibly
5941 // parenthesized) throw-expression; the result is of the type
5942 // and value category of the other.
5943 bool LThrow = isa<CXXThrowExpr>(Val: LHS.get()->IgnoreParenImpCasts());
5944 bool RThrow = isa<CXXThrowExpr>(Val: RHS.get()->IgnoreParenImpCasts());
5945
5946 // Void expressions aren't legal in the vector-conditional expressions.
5947 if (IsVectorConditional) {
5948 SourceRange DiagLoc =
5949 LVoid ? LHS.get()->getSourceRange() : RHS.get()->getSourceRange();
5950 bool IsThrow = LVoid ? LThrow : RThrow;
5951 Diag(Loc: DiagLoc.getBegin(), DiagID: diag::err_conditional_vector_has_void)
5952 << DiagLoc << IsThrow;
5953 return QualType();
5954 }
5955
5956 if (LThrow != RThrow) {
5957 Expr *NonThrow = LThrow ? RHS.get() : LHS.get();
5958 VK = NonThrow->getValueKind();
5959 // DR (no number yet): the result is a bit-field if the
5960 // non-throw-expression operand is a bit-field.
5961 OK = NonThrow->getObjectKind();
5962 return NonThrow->getType();
5963 }
5964
5965 // -- Both the second and third operands have type void; the result is of
5966 // type void and is a prvalue.
5967 if (LVoid && RVoid)
5968 return Context.getCommonSugaredType(X: LTy, Y: RTy);
5969
5970 // Neither holds, error.
5971 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_void_nonvoid)
5972 << (LVoid ? RTy : LTy) << (LVoid ? 0 : 1)
5973 << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
5974 return QualType();
5975 }
5976
5977 // Neither is void.
5978 if (IsVectorConditional)
5979 return CheckVectorConditionalTypes(Cond, LHS, RHS, QuestionLoc);
5980
5981 // WebAssembly tables are not allowed as conditional LHS or RHS.
5982 if (LTy->isWebAssemblyTableType() || RTy->isWebAssemblyTableType()) {
5983 Diag(Loc: QuestionLoc, DiagID: diag::err_wasm_table_conditional_expression)
5984 << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
5985 return QualType();
5986 }
5987
5988 // C++11 [expr.cond]p3
5989 // Otherwise, if the second and third operand have different types, and
5990 // either has (cv) class type [...] an attempt is made to convert each of
5991 // those operands to the type of the other.
5992 if (!Context.hasSameType(T1: LTy, T2: RTy) &&
5993 (LTy->isRecordType() || RTy->isRecordType())) {
5994 // These return true if a single direction is already ambiguous.
5995 QualType L2RType, R2LType;
5996 bool HaveL2R, HaveR2L;
5997 if (TryClassUnification(Self&: *this, From: LHS.get(), To: RHS.get(), QuestionLoc, HaveConversion&: HaveL2R, ToType&: L2RType))
5998 return QualType();
5999 if (TryClassUnification(Self&: *this, From: RHS.get(), To: LHS.get(), QuestionLoc, HaveConversion&: HaveR2L, ToType&: R2LType))
6000 return QualType();
6001
6002 // If both can be converted, [...] the program is ill-formed.
6003 if (HaveL2R && HaveR2L) {
6004 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_ambiguous)
6005 << LTy << RTy << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
6006 return QualType();
6007 }
6008
6009 // If exactly one conversion is possible, that conversion is applied to
6010 // the chosen operand and the converted operands are used in place of the
6011 // original operands for the remainder of this section.
6012 if (HaveL2R) {
6013 if (ConvertForConditional(Self&: *this, E&: LHS, T: L2RType) || LHS.isInvalid())
6014 return QualType();
6015 LTy = LHS.get()->getType();
6016 } else if (HaveR2L) {
6017 if (ConvertForConditional(Self&: *this, E&: RHS, T: R2LType) || RHS.isInvalid())
6018 return QualType();
6019 RTy = RHS.get()->getType();
6020 }
6021 }
6022
6023 // C++11 [expr.cond]p3
6024 // if both are glvalues of the same value category and the same type except
6025 // for cv-qualification, an attempt is made to convert each of those
6026 // operands to the type of the other.
6027 // FIXME:
6028 // Resolving a defect in P0012R1: we extend this to cover all cases where
6029 // one of the operands is reference-compatible with the other, in order
6030 // to support conditionals between functions differing in noexcept. This
6031 // will similarly cover difference in array bounds after P0388R4.
6032 // FIXME: If LTy and RTy have a composite pointer type, should we convert to
6033 // that instead?
6034 ExprValueKind LVK = LHS.get()->getValueKind();
6035 ExprValueKind RVK = RHS.get()->getValueKind();
6036 if (!Context.hasSameType(T1: LTy, T2: RTy) && LVK == RVK && LVK != VK_PRValue) {
6037 // DerivedToBase was already handled by the class-specific case above.
6038 // FIXME: Should we allow ObjC conversions here?
6039 const ReferenceConversions AllowedConversions =
6040 ReferenceConversions::Qualification |
6041 ReferenceConversions::NestedQualification |
6042 ReferenceConversions::Function;
6043
6044 ReferenceConversions RefConv;
6045 if (CompareReferenceRelationship(Loc: QuestionLoc, T1: LTy, T2: RTy, Conv: &RefConv) ==
6046 Ref_Compatible &&
6047 !(RefConv & ~AllowedConversions) &&
6048 // [...] subject to the constraint that the reference must bind
6049 // directly [...]
6050 !RHS.get()->refersToBitField() && !RHS.get()->refersToVectorElement()) {
6051 RHS = ImpCastExprToType(E: RHS.get(), Type: LTy, CK: CK_NoOp, VK: RVK);
6052 RTy = RHS.get()->getType();
6053 } else if (CompareReferenceRelationship(Loc: QuestionLoc, T1: RTy, T2: LTy, Conv: &RefConv) ==
6054 Ref_Compatible &&
6055 !(RefConv & ~AllowedConversions) &&
6056 !LHS.get()->refersToBitField() &&
6057 !LHS.get()->refersToVectorElement()) {
6058 LHS = ImpCastExprToType(E: LHS.get(), Type: RTy, CK: CK_NoOp, VK: LVK);
6059 LTy = LHS.get()->getType();
6060 }
6061 }
6062
6063 // C++11 [expr.cond]p4
6064 // If the second and third operands are glvalues of the same value
6065 // category and have the same type, the result is of that type and
6066 // value category and it is a bit-field if the second or the third
6067 // operand is a bit-field, or if both are bit-fields.
6068 // We only extend this to bitfields, not to the crazy other kinds of
6069 // l-values.
6070 bool Same = Context.hasSameType(T1: LTy, T2: RTy);
6071 if (Same && LVK == RVK && LVK != VK_PRValue &&
6072 LHS.get()->isOrdinaryOrBitFieldObject() &&
6073 RHS.get()->isOrdinaryOrBitFieldObject()) {
6074 VK = LHS.get()->getValueKind();
6075 if (LHS.get()->getObjectKind() == OK_BitField ||
6076 RHS.get()->getObjectKind() == OK_BitField)
6077 OK = OK_BitField;
6078 return Context.getCommonSugaredType(X: LTy, Y: RTy);
6079 }
6080
6081 // C++11 [expr.cond]p5
6082 // Otherwise, the result is a prvalue. If the second and third operands
6083 // do not have the same type, and either has (cv) class type, ...
6084 if (!Same && (LTy->isRecordType() || RTy->isRecordType())) {
6085 // ... overload resolution is used to determine the conversions (if any)
6086 // to be applied to the operands. If the overload resolution fails, the
6087 // program is ill-formed.
6088 if (FindConditionalOverload(Self&: *this, LHS, RHS, QuestionLoc))
6089 return QualType();
6090 }
6091
6092 // C++11 [expr.cond]p6
6093 // Lvalue-to-rvalue, array-to-pointer, and function-to-pointer standard
6094 // conversions are performed on the second and third operands.
6095 LHS = DefaultFunctionArrayLvalueConversion(E: LHS.get());
6096 RHS = DefaultFunctionArrayLvalueConversion(E: RHS.get());
6097 if (LHS.isInvalid() || RHS.isInvalid())
6098 return QualType();
6099 LTy = LHS.get()->getType();
6100 RTy = RHS.get()->getType();
6101
6102 // After those conversions, one of the following shall hold:
6103 // -- The second and third operands have the same type; the result
6104 // is of that type. If the operands have class type, the result
6105 // is a prvalue temporary of the result type, which is
6106 // copy-initialized from either the second operand or the third
6107 // operand depending on the value of the first operand.
6108 if (Context.hasSameType(T1: LTy, T2: RTy)) {
6109 if (LTy->isRecordType()) {
6110 // The operands have class type. Make a temporary copy.
6111 ExprResult LHSCopy = PerformCopyInitialization(
6112 Entity: InitializedEntity::InitializeTemporary(Type: LTy), EqualLoc: SourceLocation(), Init: LHS);
6113 if (LHSCopy.isInvalid())
6114 return QualType();
6115
6116 ExprResult RHSCopy = PerformCopyInitialization(
6117 Entity: InitializedEntity::InitializeTemporary(Type: RTy), EqualLoc: SourceLocation(), Init: RHS);
6118 if (RHSCopy.isInvalid())
6119 return QualType();
6120
6121 LHS = LHSCopy;
6122 RHS = RHSCopy;
6123 }
6124 return Context.getCommonSugaredType(X: LTy, Y: RTy);
6125 }
6126
6127 // Extension: conditional operator involving vector types.
6128 if (LTy->isVectorType() || RTy->isVectorType())
6129 return CheckVectorOperands(LHS, RHS, Loc: QuestionLoc, /*isCompAssign*/ IsCompAssign: false,
6130 /*AllowBothBool*/ true,
6131 /*AllowBoolConversions*/ AllowBoolConversion: false,
6132 /*AllowBoolOperation*/ false,
6133 /*ReportInvalid*/ true);
6134
6135 // -- The second and third operands have arithmetic or enumeration type;
6136 // the usual arithmetic conversions are performed to bring them to a
6137 // common type, and the result is of that type.
6138 if (LTy->isArithmeticType() && RTy->isArithmeticType()) {
6139 QualType ResTy = UsualArithmeticConversions(LHS, RHS, Loc: QuestionLoc,
6140 ACK: ArithConvKind::Conditional);
6141 if (LHS.isInvalid() || RHS.isInvalid())
6142 return QualType();
6143 if (ResTy.isNull()) {
6144 Diag(Loc: QuestionLoc,
6145 DiagID: diag::err_typecheck_cond_incompatible_operands) << LTy << RTy
6146 << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
6147 return QualType();
6148 }
6149
6150 LHS = ImpCastExprToType(E: LHS.get(), Type: ResTy, CK: PrepareScalarCast(src&: LHS, destType: ResTy));
6151 RHS = ImpCastExprToType(E: RHS.get(), Type: ResTy, CK: PrepareScalarCast(src&: RHS, destType: ResTy));
6152
6153 return ResTy;
6154 }
6155
6156 // -- The second and third operands have pointer type, or one has pointer
6157 // type and the other is a null pointer constant, or both are null
6158 // pointer constants, at least one of which is non-integral; pointer
6159 // conversions and qualification conversions are performed to bring them
6160 // to their composite pointer type. The result is of the composite
6161 // pointer type.
6162 // -- The second and third operands have pointer to member type, or one has
6163 // pointer to member type and the other is a null pointer constant;
6164 // pointer to member conversions and qualification conversions are
6165 // performed to bring them to a common type, whose cv-qualification
6166 // shall match the cv-qualification of either the second or the third
6167 // operand. The result is of the common type.
6168 QualType Composite = FindCompositePointerType(Loc: QuestionLoc, E1&: LHS, E2&: RHS);
6169 if (!Composite.isNull())
6170 return Composite;
6171
6172 // Similarly, attempt to find composite type of two objective-c pointers.
6173 Composite = ObjC().FindCompositeObjCPointerType(LHS, RHS, QuestionLoc);
6174 if (LHS.isInvalid() || RHS.isInvalid())
6175 return QualType();
6176 if (!Composite.isNull())
6177 return Composite;
6178
6179 // Check if we are using a null with a non-pointer type.
6180 if (DiagnoseConditionalForNull(LHSExpr: LHS.get(), RHSExpr: RHS.get(), QuestionLoc))
6181 return QualType();
6182
6183 Diag(Loc: QuestionLoc, DiagID: diag::err_typecheck_cond_incompatible_operands)
6184 << LHS.get()->getType() << RHS.get()->getType()
6185 << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
6186 return QualType();
6187}
6188
6189QualType Sema::FindCompositePointerType(SourceLocation Loc,
6190 Expr *&E1, Expr *&E2,
6191 bool ConvertArgs) {
6192 assert(getLangOpts().CPlusPlus && "This function assumes C++");
6193
6194 // C++1z [expr]p14:
6195 // The composite pointer type of two operands p1 and p2 having types T1
6196 // and T2
6197 QualType T1 = E1->getType(), T2 = E2->getType();
6198
6199 // where at least one is a pointer or pointer to member type or
6200 // std::nullptr_t is:
6201 bool T1IsPointerLike = T1->isAnyPointerType() || T1->isMemberPointerType() ||
6202 T1->isNullPtrType();
6203 bool T2IsPointerLike = T2->isAnyPointerType() || T2->isMemberPointerType() ||
6204 T2->isNullPtrType();
6205 if (!T1IsPointerLike && !T2IsPointerLike)
6206 return QualType();
6207
6208 // - if both p1 and p2 are null pointer constants, std::nullptr_t;
6209 // This can't actually happen, following the standard, but we also use this
6210 // to implement the end of [expr.conv], which hits this case.
6211 //
6212 // - if either p1 or p2 is a null pointer constant, T2 or T1, respectively;
6213 if (T1IsPointerLike &&
6214 E2->isNullPointerConstant(Ctx&: Context, NPC: Expr::NPC_ValueDependentIsNull)) {
6215 if (ConvertArgs)
6216 E2 = ImpCastExprToType(E: E2, Type: T1, CK: T1->isMemberPointerType()
6217 ? CK_NullToMemberPointer
6218 : CK_NullToPointer).get();
6219 return T1;
6220 }
6221 if (T2IsPointerLike &&
6222 E1->isNullPointerConstant(Ctx&: Context, NPC: Expr::NPC_ValueDependentIsNull)) {
6223 if (ConvertArgs)
6224 E1 = ImpCastExprToType(E: E1, Type: T2, CK: T2->isMemberPointerType()
6225 ? CK_NullToMemberPointer
6226 : CK_NullToPointer).get();
6227 return T2;
6228 }
6229
6230 // Now both have to be pointers or member pointers.
6231 if (!T1IsPointerLike || !T2IsPointerLike)
6232 return QualType();
6233 assert(!T1->isNullPtrType() && !T2->isNullPtrType() &&
6234 "nullptr_t should be a null pointer constant");
6235
6236 struct Step {
6237 enum Kind { Pointer, ObjCPointer, MemberPointer, Array } K;
6238 // Qualifiers to apply under the step kind.
6239 Qualifiers Quals;
6240 /// The class for a pointer-to-member; a constant array type with a bound
6241 /// (if any) for an array.
6242 /// FIXME: Store Qualifier for pointer-to-member.
6243 const Type *ClassOrBound;
6244
6245 Step(Kind K, const Type *ClassOrBound = nullptr)
6246 : K(K), ClassOrBound(ClassOrBound) {}
6247 QualType rebuild(ASTContext &Ctx, QualType T) const {
6248 T = Ctx.getQualifiedType(T, Qs: Quals);
6249 switch (K) {
6250 case Pointer:
6251 return Ctx.getPointerType(T);
6252 case MemberPointer:
6253 return Ctx.getMemberPointerType(T, /*Qualifier=*/std::nullopt,
6254 Cls: ClassOrBound->getAsCXXRecordDecl());
6255 case ObjCPointer:
6256 return Ctx.getObjCObjectPointerType(OIT: T);
6257 case Array:
6258 if (auto *CAT = cast_or_null<ConstantArrayType>(Val: ClassOrBound))
6259 return Ctx.getConstantArrayType(EltTy: T, ArySize: CAT->getSize(), SizeExpr: nullptr,
6260 ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
6261 else
6262 return Ctx.getIncompleteArrayType(EltTy: T, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
6263 }
6264 llvm_unreachable("unknown step kind");
6265 }
6266 };
6267
6268 SmallVector<Step, 8> Steps;
6269
6270 // - if T1 is "pointer to cv1 C1" and T2 is "pointer to cv2 C2", where C1
6271 // is reference-related to C2 or C2 is reference-related to C1 (8.6.3),
6272 // the cv-combined type of T1 and T2 or the cv-combined type of T2 and T1,
6273 // respectively;
6274 // - if T1 is "pointer to member of C1 of type cv1 U1" and T2 is "pointer
6275 // to member of C2 of type cv2 U2" for some non-function type U, where
6276 // C1 is reference-related to C2 or C2 is reference-related to C1, the
6277 // cv-combined type of T2 and T1 or the cv-combined type of T1 and T2,
6278 // respectively;
6279 // - if T1 and T2 are similar types (4.5), the cv-combined type of T1 and
6280 // T2;
6281 //
6282 // Dismantle T1 and T2 to simultaneously determine whether they are similar
6283 // and to prepare to form the cv-combined type if so.
6284 QualType Composite1 = T1;
6285 QualType Composite2 = T2;
6286 unsigned NeedConstBefore = 0;
6287 while (true) {
6288 assert(!Composite1.isNull() && !Composite2.isNull());
6289
6290 Qualifiers Q1, Q2;
6291 Composite1 = Context.getUnqualifiedArrayType(T: Composite1, Quals&: Q1);
6292 Composite2 = Context.getUnqualifiedArrayType(T: Composite2, Quals&: Q2);
6293
6294 // Top-level qualifiers are ignored. Merge at all lower levels.
6295 if (!Steps.empty()) {
6296 // Find the qualifier union: (approximately) the unique minimal set of
6297 // qualifiers that is compatible with both types.
6298 Qualifiers Quals = Qualifiers::fromCVRUMask(CVRU: Q1.getCVRUQualifiers() |
6299 Q2.getCVRUQualifiers());
6300
6301 // Under one level of pointer or pointer-to-member, we can change to an
6302 // unambiguous compatible address space.
6303 if (Q1.getAddressSpace() == Q2.getAddressSpace()) {
6304 Quals.setAddressSpace(Q1.getAddressSpace());
6305 } else if (Steps.size() == 1) {
6306 bool MaybeQ1 = Q1.isAddressSpaceSupersetOf(other: Q2, Ctx: getASTContext());
6307 bool MaybeQ2 = Q2.isAddressSpaceSupersetOf(other: Q1, Ctx: getASTContext());
6308 if (MaybeQ1 == MaybeQ2) {
6309 // Exception for ptr size address spaces. Should be able to choose
6310 // either address space during comparison.
6311 if (isPtrSizeAddressSpace(AS: Q1.getAddressSpace()) ||
6312 isPtrSizeAddressSpace(AS: Q2.getAddressSpace()))
6313 MaybeQ1 = true;
6314 else
6315 return QualType(); // No unique best address space.
6316 }
6317 Quals.setAddressSpace(MaybeQ1 ? Q1.getAddressSpace()
6318 : Q2.getAddressSpace());
6319 } else {
6320 return QualType();
6321 }
6322
6323 // FIXME: In C, we merge __strong and none to __strong at the top level.
6324 if (Q1.getObjCGCAttr() == Q2.getObjCGCAttr())
6325 Quals.setObjCGCAttr(Q1.getObjCGCAttr());
6326 else if (T1->isVoidPointerType() || T2->isVoidPointerType())
6327 assert(Steps.size() == 1);
6328 else
6329 return QualType();
6330
6331 // Mismatched lifetime qualifiers never compatibly include each other.
6332 if (Q1.getObjCLifetime() == Q2.getObjCLifetime())
6333 Quals.setObjCLifetime(Q1.getObjCLifetime());
6334 else if (T1->isVoidPointerType() || T2->isVoidPointerType())
6335 assert(Steps.size() == 1);
6336 else
6337 return QualType();
6338
6339 if (Q1.getPointerAuth().isEquivalent(Other: Q2.getPointerAuth()))
6340 Quals.setPointerAuth(Q1.getPointerAuth());
6341 else
6342 return QualType();
6343
6344 Steps.back().Quals = Quals;
6345 if (Q1 != Quals || Q2 != Quals)
6346 NeedConstBefore = Steps.size() - 1;
6347 }
6348
6349 // FIXME: Can we unify the following with UnwrapSimilarTypes?
6350
6351 const ArrayType *Arr1, *Arr2;
6352 if ((Arr1 = Context.getAsArrayType(T: Composite1)) &&
6353 (Arr2 = Context.getAsArrayType(T: Composite2))) {
6354 auto *CAT1 = dyn_cast<ConstantArrayType>(Val: Arr1);
6355 auto *CAT2 = dyn_cast<ConstantArrayType>(Val: Arr2);
6356 if (CAT1 && CAT2 && CAT1->getSize() == CAT2->getSize()) {
6357 Composite1 = Arr1->getElementType();
6358 Composite2 = Arr2->getElementType();
6359 Steps.emplace_back(Args: Step::Array, Args&: CAT1);
6360 continue;
6361 }
6362 bool IAT1 = isa<IncompleteArrayType>(Val: Arr1);
6363 bool IAT2 = isa<IncompleteArrayType>(Val: Arr2);
6364 if ((IAT1 && IAT2) ||
6365 (getLangOpts().CPlusPlus20 && (IAT1 != IAT2) &&
6366 ((bool)CAT1 != (bool)CAT2) &&
6367 (Steps.empty() || Steps.back().K != Step::Array))) {
6368 // In C++20 onwards, we can unify an array of N T with an array of
6369 // a different or unknown bound. But we can't form an array whose
6370 // element type is an array of unknown bound by doing so.
6371 Composite1 = Arr1->getElementType();
6372 Composite2 = Arr2->getElementType();
6373 Steps.emplace_back(Args: Step::Array);
6374 if (CAT1 || CAT2)
6375 NeedConstBefore = Steps.size();
6376 continue;
6377 }
6378 }
6379
6380 const PointerType *Ptr1, *Ptr2;
6381 if ((Ptr1 = Composite1->getAs<PointerType>()) &&
6382 (Ptr2 = Composite2->getAs<PointerType>())) {
6383 Composite1 = Ptr1->getPointeeType();
6384 Composite2 = Ptr2->getPointeeType();
6385 Steps.emplace_back(Args: Step::Pointer);
6386 continue;
6387 }
6388
6389 const ObjCObjectPointerType *ObjPtr1, *ObjPtr2;
6390 if ((ObjPtr1 = Composite1->getAs<ObjCObjectPointerType>()) &&
6391 (ObjPtr2 = Composite2->getAs<ObjCObjectPointerType>())) {
6392 Composite1 = ObjPtr1->getPointeeType();
6393 Composite2 = ObjPtr2->getPointeeType();
6394 Steps.emplace_back(Args: Step::ObjCPointer);
6395 continue;
6396 }
6397
6398 const MemberPointerType *MemPtr1, *MemPtr2;
6399 if ((MemPtr1 = Composite1->getAs<MemberPointerType>()) &&
6400 (MemPtr2 = Composite2->getAs<MemberPointerType>())) {
6401 Composite1 = MemPtr1->getPointeeType();
6402 Composite2 = MemPtr2->getPointeeType();
6403
6404 // At the top level, we can perform a base-to-derived pointer-to-member
6405 // conversion:
6406 //
6407 // - [...] where C1 is reference-related to C2 or C2 is
6408 // reference-related to C1
6409 //
6410 // (Note that the only kinds of reference-relatedness in scope here are
6411 // "same type or derived from".) At any other level, the class must
6412 // exactly match.
6413 CXXRecordDecl *Cls = nullptr,
6414 *Cls1 = MemPtr1->getMostRecentCXXRecordDecl(),
6415 *Cls2 = MemPtr2->getMostRecentCXXRecordDecl();
6416 if (declaresSameEntity(D1: Cls1, D2: Cls2))
6417 Cls = Cls1;
6418 else if (Steps.empty())
6419 Cls = IsDerivedFrom(Loc, Derived: Cls1, Base: Cls2) ? Cls1
6420 : IsDerivedFrom(Loc, Derived: Cls2, Base: Cls1) ? Cls2
6421 : nullptr;
6422 if (!Cls)
6423 return QualType();
6424
6425 Steps.emplace_back(Args: Step::MemberPointer,
6426 Args: Context.getCanonicalTagType(TD: Cls).getTypePtr());
6427 continue;
6428 }
6429
6430 // Special case: at the top level, we can decompose an Objective-C pointer
6431 // and a 'cv void *'. Unify the qualifiers.
6432 if (Steps.empty() && ((Composite1->isVoidPointerType() &&
6433 Composite2->isObjCObjectPointerType()) ||
6434 (Composite1->isObjCObjectPointerType() &&
6435 Composite2->isVoidPointerType()))) {
6436 Composite1 = Composite1->getPointeeType();
6437 Composite2 = Composite2->getPointeeType();
6438 Steps.emplace_back(Args: Step::Pointer);
6439 continue;
6440 }
6441
6442 // FIXME: block pointer types?
6443
6444 // Cannot unwrap any more types.
6445 break;
6446 }
6447
6448 // - if T1 or T2 is "pointer to noexcept function" and the other type is
6449 // "pointer to function", where the function types are otherwise the same,
6450 // "pointer to function";
6451 // - if T1 or T2 is "pointer to member of C1 of type function", the other
6452 // type is "pointer to member of C2 of type noexcept function", and C1
6453 // is reference-related to C2 or C2 is reference-related to C1, where
6454 // the function types are otherwise the same, "pointer to member of C2 of
6455 // type function" or "pointer to member of C1 of type function",
6456 // respectively;
6457 //
6458 // We also support 'noreturn' here, so as a Clang extension we generalize the
6459 // above to:
6460 //
6461 // - [Clang] If T1 and T2 are both of type "pointer to function" or
6462 // "pointer to member function" and the pointee types can be unified
6463 // by a function pointer conversion, that conversion is applied
6464 // before checking the following rules.
6465 //
6466 // We've already unwrapped down to the function types, and we want to merge
6467 // rather than just convert, so do this ourselves rather than calling
6468 // IsFunctionConversion.
6469 //
6470 // FIXME: In order to match the standard wording as closely as possible, we
6471 // currently only do this under a single level of pointers. Ideally, we would
6472 // allow this in general, and set NeedConstBefore to the relevant depth on
6473 // the side(s) where we changed anything. If we permit that, we should also
6474 // consider this conversion when determining type similarity and model it as
6475 // a qualification conversion.
6476 if (Steps.size() == 1) {
6477 if (auto *FPT1 = Composite1->getAs<FunctionProtoType>()) {
6478 if (auto *FPT2 = Composite2->getAs<FunctionProtoType>()) {
6479 FunctionProtoType::ExtProtoInfo EPI1 = FPT1->getExtProtoInfo();
6480 FunctionProtoType::ExtProtoInfo EPI2 = FPT2->getExtProtoInfo();
6481
6482 // The result is noreturn if both operands are.
6483 bool Noreturn =
6484 EPI1.ExtInfo.getNoReturn() && EPI2.ExtInfo.getNoReturn();
6485 EPI1.ExtInfo = EPI1.ExtInfo.withNoReturn(noReturn: Noreturn);
6486 EPI2.ExtInfo = EPI2.ExtInfo.withNoReturn(noReturn: Noreturn);
6487
6488 bool CFIUncheckedCallee =
6489 EPI1.CFIUncheckedCallee || EPI2.CFIUncheckedCallee;
6490 EPI1.CFIUncheckedCallee = CFIUncheckedCallee;
6491 EPI2.CFIUncheckedCallee = CFIUncheckedCallee;
6492
6493 // The result is nothrow if both operands are.
6494 SmallVector<QualType, 8> ExceptionTypeStorage;
6495 EPI1.ExceptionSpec = EPI2.ExceptionSpec = Context.mergeExceptionSpecs(
6496 ESI1: EPI1.ExceptionSpec, ESI2: EPI2.ExceptionSpec, ExceptionTypeStorage,
6497 AcceptDependent: getLangOpts().CPlusPlus17);
6498
6499 Composite1 = Context.getFunctionType(ResultTy: FPT1->getReturnType(),
6500 Args: FPT1->getParamTypes(), EPI: EPI1);
6501 Composite2 = Context.getFunctionType(ResultTy: FPT2->getReturnType(),
6502 Args: FPT2->getParamTypes(), EPI: EPI2);
6503 }
6504 }
6505 }
6506
6507 // There are some more conversions we can perform under exactly one pointer.
6508 if (Steps.size() == 1 && Steps.front().K == Step::Pointer &&
6509 !Context.hasSameType(T1: Composite1, T2: Composite2)) {
6510 // - if T1 or T2 is "pointer to cv1 void" and the other type is
6511 // "pointer to cv2 T", where T is an object type or void,
6512 // "pointer to cv12 void", where cv12 is the union of cv1 and cv2;
6513 if (Composite1->isVoidType() && Composite2->isObjectType())
6514 Composite2 = Composite1;
6515 else if (Composite2->isVoidType() && Composite1->isObjectType())
6516 Composite1 = Composite2;
6517 // - if T1 is "pointer to cv1 C1" and T2 is "pointer to cv2 C2", where C1
6518 // is reference-related to C2 or C2 is reference-related to C1 (8.6.3),
6519 // the cv-combined type of T1 and T2 or the cv-combined type of T2 and
6520 // T1, respectively;
6521 //
6522 // The "similar type" handling covers all of this except for the "T1 is a
6523 // base class of T2" case in the definition of reference-related.
6524 else if (IsDerivedFrom(Loc, Derived: Composite1, Base: Composite2))
6525 Composite1 = Composite2;
6526 else if (IsDerivedFrom(Loc, Derived: Composite2, Base: Composite1))
6527 Composite2 = Composite1;
6528 }
6529
6530 // At this point, either the inner types are the same or we have failed to
6531 // find a composite pointer type.
6532 if (!Context.hasSameType(T1: Composite1, T2: Composite2))
6533 return QualType();
6534
6535 // Per C++ [conv.qual]p3, add 'const' to every level before the last
6536 // differing qualifier.
6537 for (unsigned I = 0; I != NeedConstBefore; ++I)
6538 Steps[I].Quals.addConst();
6539
6540 // Rebuild the composite type.
6541 QualType Composite = Context.getCommonSugaredType(X: Composite1, Y: Composite2);
6542 for (auto &S : llvm::reverse(C&: Steps))
6543 Composite = S.rebuild(Ctx&: Context, T: Composite);
6544
6545 if (ConvertArgs) {
6546 // Convert the expressions to the composite pointer type.
6547 InitializedEntity Entity =
6548 InitializedEntity::InitializeTemporary(Type: Composite);
6549 InitializationKind Kind =
6550 InitializationKind::CreateCopy(InitLoc: Loc, EqualLoc: SourceLocation());
6551
6552 InitializationSequence E1ToC(*this, Entity, Kind, E1);
6553 if (!E1ToC)
6554 return QualType();
6555
6556 InitializationSequence E2ToC(*this, Entity, Kind, E2);
6557 if (!E2ToC)
6558 return QualType();
6559
6560 // FIXME: Let the caller know if these fail to avoid duplicate diagnostics.
6561 ExprResult E1Result = E1ToC.Perform(S&: *this, Entity, Kind, Args: E1);
6562 if (E1Result.isInvalid())
6563 return QualType();
6564 E1 = E1Result.get();
6565
6566 ExprResult E2Result = E2ToC.Perform(S&: *this, Entity, Kind, Args: E2);
6567 if (E2Result.isInvalid())
6568 return QualType();
6569 E2 = E2Result.get();
6570 }
6571
6572 return Composite;
6573}
6574
6575ExprResult Sema::MaybeBindToTemporary(Expr *E) {
6576 if (!E)
6577 return ExprError();
6578
6579 assert(!isa<CXXBindTemporaryExpr>(E) && "Double-bound temporary?");
6580
6581 // If the result is a glvalue, we shouldn't bind it.
6582 if (E->isGLValue())
6583 return E;
6584
6585 // In ARC, calls that return a retainable type can return retained,
6586 // in which case we have to insert a consuming cast.
6587 if (getLangOpts().ObjCAutoRefCount &&
6588 E->getType()->isObjCRetainableType()) {
6589
6590 bool ReturnsRetained;
6591
6592 // For actual calls, we compute this by examining the type of the
6593 // called value.
6594 if (CallExpr *Call = dyn_cast<CallExpr>(Val: E)) {
6595 Expr *Callee = Call->getCallee()->IgnoreParens();
6596 QualType T = Callee->getType();
6597
6598 if (T == Context.BoundMemberTy) {
6599 // Handle pointer-to-members.
6600 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Val: Callee))
6601 T = BinOp->getRHS()->getType();
6602 else if (MemberExpr *Mem = dyn_cast<MemberExpr>(Val: Callee))
6603 T = Mem->getMemberDecl()->getType();
6604 }
6605
6606 if (const PointerType *Ptr = T->getAs<PointerType>())
6607 T = Ptr->getPointeeType();
6608 else if (const BlockPointerType *Ptr = T->getAs<BlockPointerType>())
6609 T = Ptr->getPointeeType();
6610 else if (const MemberPointerType *MemPtr = T->getAs<MemberPointerType>())
6611 T = MemPtr->getPointeeType();
6612
6613 auto *FTy = T->castAs<FunctionType>();
6614 ReturnsRetained = FTy->getExtInfo().getProducesResult();
6615
6616 // ActOnStmtExpr arranges things so that StmtExprs of retainable
6617 // type always produce a +1 object.
6618 } else if (isa<StmtExpr>(Val: E)) {
6619 ReturnsRetained = true;
6620
6621 // We hit this case with the lambda conversion-to-block optimization;
6622 // we don't want any extra casts here.
6623 } else if (isa<CastExpr>(Val: E) &&
6624 isa<BlockExpr>(Val: cast<CastExpr>(Val: E)->getSubExpr())) {
6625 return E;
6626
6627 // For message sends and property references, we try to find an
6628 // actual method. FIXME: we should infer retention by selector in
6629 // cases where we don't have an actual method.
6630 } else {
6631 ObjCMethodDecl *D = nullptr;
6632 if (ObjCMessageExpr *Send = dyn_cast<ObjCMessageExpr>(Val: E)) {
6633 D = Send->getMethodDecl();
6634 } else if (ObjCBoxedExpr *BoxedExpr = dyn_cast<ObjCBoxedExpr>(Val: E)) {
6635 D = BoxedExpr->getBoxingMethod();
6636 } else if (ObjCArrayLiteral *ArrayLit = dyn_cast<ObjCArrayLiteral>(Val: E)) {
6637 // Don't do reclaims if we're using the zero-element array
6638 // constant.
6639 if (ArrayLit->getNumElements() == 0 &&
6640 Context.getLangOpts().ObjCRuntime.hasEmptyCollections())
6641 return E;
6642
6643 D = ArrayLit->getArrayWithObjectsMethod();
6644 } else if (ObjCDictionaryLiteral *DictLit
6645 = dyn_cast<ObjCDictionaryLiteral>(Val: E)) {
6646 // Don't do reclaims if we're using the zero-element dictionary
6647 // constant.
6648 if (DictLit->getNumElements() == 0 &&
6649 Context.getLangOpts().ObjCRuntime.hasEmptyCollections())
6650 return E;
6651
6652 D = DictLit->getDictWithObjectsMethod();
6653 }
6654
6655 ReturnsRetained = (D && D->hasAttr<NSReturnsRetainedAttr>());
6656
6657 // Don't do reclaims on performSelector calls; despite their
6658 // return type, the invoked method doesn't necessarily actually
6659 // return an object.
6660 if (!ReturnsRetained &&
6661 D && D->getMethodFamily() == OMF_performSelector)
6662 return E;
6663 }
6664
6665 // Don't reclaim an object of Class type.
6666 if (!ReturnsRetained && E->getType()->isObjCARCImplicitlyUnretainedType())
6667 return E;
6668
6669 Cleanup.setExprNeedsCleanups(true);
6670
6671 CastKind ck = (ReturnsRetained ? CK_ARCConsumeObject
6672 : CK_ARCReclaimReturnedObject);
6673 return ImplicitCastExpr::Create(Context, T: E->getType(), Kind: ck, Operand: E, BasePath: nullptr,
6674 Cat: VK_PRValue, FPO: FPOptionsOverride());
6675 }
6676
6677 if (E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct)
6678 Cleanup.setExprNeedsCleanups(true);
6679
6680 if (!getLangOpts().CPlusPlus)
6681 return E;
6682
6683 // Search for the base element type (cf. ASTContext::getBaseElementType) with
6684 // a fast path for the common case that the type is directly a RecordType.
6685 const Type *T = Context.getCanonicalType(T: E->getType().getTypePtr());
6686 const RecordType *RT = nullptr;
6687 while (!RT) {
6688 switch (T->getTypeClass()) {
6689 case Type::Record:
6690 RT = cast<RecordType>(Val: T);
6691 break;
6692 case Type::ConstantArray:
6693 case Type::IncompleteArray:
6694 case Type::VariableArray:
6695 case Type::DependentSizedArray:
6696 T = cast<ArrayType>(Val: T)->getElementType().getTypePtr();
6697 break;
6698 default:
6699 return E;
6700 }
6701 }
6702
6703 // That should be enough to guarantee that this type is complete, if we're
6704 // not processing a decltype expression.
6705 auto *RD = cast<CXXRecordDecl>(Val: RT->getDecl())->getDefinitionOrSelf();
6706 if (RD->isInvalidDecl() || RD->isDependentContext())
6707 return E;
6708
6709 bool IsDecltype = ExprEvalContexts.back().ExprContext ==
6710 ExpressionEvaluationContextRecord::EK_Decltype;
6711 CXXDestructorDecl *Destructor = IsDecltype ? nullptr : LookupDestructor(Class: RD);
6712
6713 if (Destructor) {
6714 MarkFunctionReferenced(Loc: E->getExprLoc(), Func: Destructor);
6715 CheckDestructorAccess(Loc: E->getExprLoc(), Dtor: Destructor,
6716 PDiag: PDiag(DiagID: diag::err_access_dtor_temp)
6717 << E->getType());
6718 if (DiagnoseUseOfDecl(D: Destructor, Locs: E->getExprLoc()))
6719 return ExprError();
6720
6721 // If destructor is trivial, we can avoid the extra copy.
6722 if (Destructor->isTrivial())
6723 return E;
6724
6725 // We need a cleanup, but we don't need to remember the temporary.
6726 Cleanup.setExprNeedsCleanups(true);
6727 }
6728
6729 CXXTemporary *Temp = CXXTemporary::Create(C: Context, Destructor);
6730 CXXBindTemporaryExpr *Bind = CXXBindTemporaryExpr::Create(C: Context, Temp, SubExpr: E);
6731
6732 if (IsDecltype)
6733 ExprEvalContexts.back().DelayedDecltypeBinds.push_back(Elt: Bind);
6734
6735 return Bind;
6736}
6737
6738ExprResult
6739Sema::MaybeCreateExprWithCleanups(ExprResult SubExpr) {
6740 if (SubExpr.isInvalid())
6741 return ExprError();
6742
6743 return MaybeCreateExprWithCleanups(SubExpr: SubExpr.get());
6744}
6745
6746Expr *Sema::MaybeCreateExprWithCleanups(Expr *SubExpr) {
6747 assert(SubExpr && "subexpression can't be null!");
6748
6749 CleanupVarDeclMarking();
6750
6751 unsigned FirstCleanup = ExprEvalContexts.back().NumCleanupObjects;
6752 assert(ExprCleanupObjects.size() >= FirstCleanup);
6753 assert(Cleanup.exprNeedsCleanups() ||
6754 ExprCleanupObjects.size() == FirstCleanup);
6755 if (!Cleanup.exprNeedsCleanups())
6756 return SubExpr;
6757
6758 auto Cleanups = llvm::ArrayRef(ExprCleanupObjects.begin() + FirstCleanup,
6759 ExprCleanupObjects.size() - FirstCleanup);
6760
6761 auto *E = ExprWithCleanups::Create(
6762 C: Context, subexpr: SubExpr, CleanupsHaveSideEffects: Cleanup.cleanupsHaveSideEffects(), objects: Cleanups);
6763 DiscardCleanupsInEvaluationContext();
6764
6765 return E;
6766}
6767
6768Stmt *Sema::MaybeCreateStmtWithCleanups(Stmt *SubStmt) {
6769 assert(SubStmt && "sub-statement can't be null!");
6770
6771 CleanupVarDeclMarking();
6772
6773 if (!Cleanup.exprNeedsCleanups())
6774 return SubStmt;
6775
6776 // FIXME: In order to attach the temporaries, wrap the statement into
6777 // a StmtExpr; currently this is only used for asm statements.
6778 // This is hacky, either create a new CXXStmtWithTemporaries statement or
6779 // a new AsmStmtWithTemporaries.
6780 CompoundStmt *CompStmt =
6781 CompoundStmt::Create(C: Context, Stmts: SubStmt, FPFeatures: FPOptionsOverride(),
6782 LB: SourceLocation(), RB: SourceLocation());
6783 Expr *E = new (Context)
6784 StmtExpr(CompStmt, Context.VoidTy, SourceLocation(), SourceLocation(),
6785 /*FIXME TemplateDepth=*/0);
6786 return MaybeCreateExprWithCleanups(SubExpr: E);
6787}
6788
6789ExprResult Sema::ActOnDecltypeExpression(Expr *E) {
6790 assert(ExprEvalContexts.back().ExprContext ==
6791 ExpressionEvaluationContextRecord::EK_Decltype &&
6792 "not in a decltype expression");
6793
6794 ExprResult Result = CheckPlaceholderExpr(E);
6795 if (Result.isInvalid())
6796 return ExprError();
6797 E = Result.get();
6798
6799 // C++11 [expr.call]p11:
6800 // If a function call is a prvalue of object type,
6801 // -- if the function call is either
6802 // -- the operand of a decltype-specifier, or
6803 // -- the right operand of a comma operator that is the operand of a
6804 // decltype-specifier,
6805 // a temporary object is not introduced for the prvalue.
6806
6807 // Recursively rebuild ParenExprs and comma expressions to strip out the
6808 // outermost CXXBindTemporaryExpr, if any.
6809 if (ParenExpr *PE = dyn_cast<ParenExpr>(Val: E)) {
6810 ExprResult SubExpr = ActOnDecltypeExpression(E: PE->getSubExpr());
6811 if (SubExpr.isInvalid())
6812 return ExprError();
6813 if (SubExpr.get() == PE->getSubExpr())
6814 return E;
6815 return ActOnParenExpr(L: PE->getLParen(), R: PE->getRParen(), E: SubExpr.get());
6816 }
6817 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Val: E)) {
6818 if (BO->getOpcode() == BO_Comma) {
6819 ExprResult RHS = ActOnDecltypeExpression(E: BO->getRHS());
6820 if (RHS.isInvalid())
6821 return ExprError();
6822 if (RHS.get() == BO->getRHS())
6823 return E;
6824 return BinaryOperator::Create(C: Context, lhs: BO->getLHS(), rhs: RHS.get(), opc: BO_Comma,
6825 ResTy: BO->getType(), VK: BO->getValueKind(),
6826 OK: BO->getObjectKind(), opLoc: BO->getOperatorLoc(),
6827 FPFeatures: BO->getFPFeatures());
6828 }
6829 }
6830
6831 CXXBindTemporaryExpr *TopBind = dyn_cast<CXXBindTemporaryExpr>(Val: E);
6832 CallExpr *TopCall = TopBind ? dyn_cast<CallExpr>(Val: TopBind->getSubExpr())
6833 : nullptr;
6834 if (TopCall)
6835 E = TopCall;
6836 else
6837 TopBind = nullptr;
6838
6839 // Disable the special decltype handling now.
6840 ExprEvalContexts.back().ExprContext =
6841 ExpressionEvaluationContextRecord::EK_Other;
6842
6843 Result = CheckUnevaluatedOperand(E);
6844 if (Result.isInvalid())
6845 return ExprError();
6846 E = Result.get();
6847
6848 // In MS mode, don't perform any extra checking of call return types within a
6849 // decltype expression.
6850 if (getLangOpts().MSVCCompat)
6851 return E;
6852
6853 // Perform the semantic checks we delayed until this point.
6854 for (unsigned I = 0, N = ExprEvalContexts.back().DelayedDecltypeCalls.size();
6855 I != N; ++I) {
6856 CallExpr *Call = ExprEvalContexts.back().DelayedDecltypeCalls[I];
6857 if (Call == TopCall)
6858 continue;
6859
6860 if (CheckCallReturnType(ReturnType: Call->getCallReturnType(Ctx: Context),
6861 Loc: Call->getBeginLoc(), CE: Call, FD: Call->getDirectCallee()))
6862 return ExprError();
6863 }
6864
6865 // Now all relevant types are complete, check the destructors are accessible
6866 // and non-deleted, and annotate them on the temporaries.
6867 for (unsigned I = 0, N = ExprEvalContexts.back().DelayedDecltypeBinds.size();
6868 I != N; ++I) {
6869 CXXBindTemporaryExpr *Bind =
6870 ExprEvalContexts.back().DelayedDecltypeBinds[I];
6871 if (Bind == TopBind)
6872 continue;
6873
6874 CXXTemporary *Temp = Bind->getTemporary();
6875
6876 CXXRecordDecl *RD =
6877 Bind->getType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
6878 CXXDestructorDecl *Destructor = LookupDestructor(Class: RD);
6879 Temp->setDestructor(Destructor);
6880
6881 MarkFunctionReferenced(Loc: Bind->getExprLoc(), Func: Destructor);
6882 CheckDestructorAccess(Loc: Bind->getExprLoc(), Dtor: Destructor,
6883 PDiag: PDiag(DiagID: diag::err_access_dtor_temp)
6884 << Bind->getType());
6885 if (DiagnoseUseOfDecl(D: Destructor, Locs: Bind->getExprLoc()))
6886 return ExprError();
6887
6888 // We need a cleanup, but we don't need to remember the temporary.
6889 Cleanup.setExprNeedsCleanups(true);
6890 }
6891
6892 // Possibly strip off the top CXXBindTemporaryExpr.
6893 return E;
6894}
6895
6896/// Note a set of 'operator->' functions that were used for a member access.
6897static void noteOperatorArrows(Sema &S,
6898 ArrayRef<FunctionDecl *> OperatorArrows) {
6899 unsigned SkipStart = OperatorArrows.size(), SkipCount = 0;
6900 // FIXME: Make this configurable?
6901 unsigned Limit = 9;
6902 if (OperatorArrows.size() > Limit) {
6903 // Produce Limit-1 normal notes and one 'skipping' note.
6904 SkipStart = (Limit - 1) / 2 + (Limit - 1) % 2;
6905 SkipCount = OperatorArrows.size() - (Limit - 1);
6906 }
6907
6908 for (unsigned I = 0; I < OperatorArrows.size(); /**/) {
6909 if (I == SkipStart) {
6910 S.Diag(Loc: OperatorArrows[I]->getLocation(),
6911 DiagID: diag::note_operator_arrows_suppressed)
6912 << SkipCount;
6913 I += SkipCount;
6914 } else {
6915 S.Diag(Loc: OperatorArrows[I]->getLocation(), DiagID: diag::note_operator_arrow_here)
6916 << OperatorArrows[I]->getCallResultType();
6917 ++I;
6918 }
6919 }
6920}
6921
6922ExprResult Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base,
6923 SourceLocation OpLoc,
6924 tok::TokenKind OpKind,
6925 ParsedType &ObjectType,
6926 bool &MayBePseudoDestructor) {
6927 // Since this might be a postfix expression, get rid of ParenListExprs.
6928 ExprResult Result = MaybeConvertParenListExprToParenExpr(S, ME: Base);
6929 if (Result.isInvalid()) return ExprError();
6930 Base = Result.get();
6931
6932 Result = CheckPlaceholderExpr(E: Base);
6933 if (Result.isInvalid()) return ExprError();
6934 Base = Result.get();
6935
6936 QualType BaseType = Base->getType();
6937 MayBePseudoDestructor = false;
6938 if (BaseType->isDependentType()) {
6939 // If we have a pointer to a dependent type and are using the -> operator,
6940 // the object type is the type that the pointer points to. We might still
6941 // have enough information about that type to do something useful.
6942 if (OpKind == tok::arrow)
6943 if (const PointerType *Ptr = BaseType->getAs<PointerType>())
6944 BaseType = Ptr->getPointeeType();
6945
6946 ObjectType = ParsedType::make(P: BaseType);
6947 MayBePseudoDestructor = true;
6948 return Base;
6949 }
6950
6951 // C++ [over.match.oper]p8:
6952 // [...] When operator->returns, the operator-> is applied to the value
6953 // returned, with the original second operand.
6954 if (OpKind == tok::arrow) {
6955 QualType StartingType = BaseType;
6956 bool NoArrowOperatorFound = false;
6957 bool FirstIteration = true;
6958 FunctionDecl *CurFD = dyn_cast<FunctionDecl>(Val: CurContext);
6959 // The set of types we've considered so far.
6960 llvm::SmallPtrSet<CanQualType,8> CTypes;
6961 SmallVector<FunctionDecl*, 8> OperatorArrows;
6962 CTypes.insert(Ptr: Context.getCanonicalType(T: BaseType));
6963
6964 while (BaseType->isRecordType()) {
6965 if (OperatorArrows.size() >= getLangOpts().ArrowDepth) {
6966 Diag(Loc: OpLoc, DiagID: diag::err_operator_arrow_depth_exceeded)
6967 << StartingType << getLangOpts().ArrowDepth << Base->getSourceRange();
6968 noteOperatorArrows(S&: *this, OperatorArrows);
6969 Diag(Loc: OpLoc, DiagID: diag::note_operator_arrow_depth)
6970 << getLangOpts().ArrowDepth;
6971 return ExprError();
6972 }
6973
6974 Result = BuildOverloadedArrowExpr(
6975 S, Base, OpLoc,
6976 // When in a template specialization and on the first loop iteration,
6977 // potentially give the default diagnostic (with the fixit in a
6978 // separate note) instead of having the error reported back to here
6979 // and giving a diagnostic with a fixit attached to the error itself.
6980 NoArrowOperatorFound: (FirstIteration && CurFD && CurFD->isFunctionTemplateSpecialization())
6981 ? nullptr
6982 : &NoArrowOperatorFound);
6983 if (Result.isInvalid()) {
6984 if (NoArrowOperatorFound) {
6985 if (FirstIteration) {
6986 Diag(Loc: OpLoc, DiagID: diag::err_typecheck_member_reference_suggestion)
6987 << BaseType << 1 << Base->getSourceRange()
6988 << FixItHint::CreateReplacement(RemoveRange: OpLoc, Code: ".");
6989 OpKind = tok::period;
6990 break;
6991 }
6992 Diag(Loc: OpLoc, DiagID: diag::err_typecheck_member_reference_arrow)
6993 << BaseType << Base->getSourceRange();
6994 CallExpr *CE = dyn_cast<CallExpr>(Val: Base);
6995 if (Decl *CD = (CE ? CE->getCalleeDecl() : nullptr)) {
6996 Diag(Loc: CD->getBeginLoc(),
6997 DiagID: diag::note_member_reference_arrow_from_operator_arrow);
6998 }
6999 }
7000 return ExprError();
7001 }
7002 Base = Result.get();
7003 if (CXXOperatorCallExpr *OpCall = dyn_cast<CXXOperatorCallExpr>(Val: Base))
7004 OperatorArrows.push_back(Elt: OpCall->getDirectCallee());
7005 BaseType = Base->getType();
7006 CanQualType CBaseType = Context.getCanonicalType(T: BaseType);
7007 if (!CTypes.insert(Ptr: CBaseType).second) {
7008 Diag(Loc: OpLoc, DiagID: diag::err_operator_arrow_circular) << StartingType;
7009 noteOperatorArrows(S&: *this, OperatorArrows);
7010 return ExprError();
7011 }
7012 FirstIteration = false;
7013 }
7014
7015 if (OpKind == tok::arrow) {
7016 if (BaseType->isPointerType())
7017 BaseType = BaseType->getPointeeType();
7018 else if (auto *AT = Context.getAsArrayType(T: BaseType))
7019 BaseType = AT->getElementType();
7020 }
7021 }
7022
7023 // Objective-C properties allow "." access on Objective-C pointer types,
7024 // so adjust the base type to the object type itself.
7025 if (BaseType->isObjCObjectPointerType())
7026 BaseType = BaseType->getPointeeType();
7027
7028 // C++ [basic.lookup.classref]p2:
7029 // [...] If the type of the object expression is of pointer to scalar
7030 // type, the unqualified-id is looked up in the context of the complete
7031 // postfix-expression.
7032 //
7033 // This also indicates that we could be parsing a pseudo-destructor-name.
7034 // Note that Objective-C class and object types can be pseudo-destructor
7035 // expressions or normal member (ivar or property) access expressions, and
7036 // it's legal for the type to be incomplete if this is a pseudo-destructor
7037 // call. We'll do more incomplete-type checks later in the lookup process,
7038 // so just skip this check for ObjC types.
7039 if (!BaseType->isRecordType()) {
7040 ObjectType = ParsedType::make(P: BaseType);
7041 MayBePseudoDestructor = true;
7042 return Base;
7043 }
7044
7045 // The object type must be complete (or dependent), or
7046 // C++11 [expr.prim.general]p3:
7047 // Unlike the object expression in other contexts, *this is not required to
7048 // be of complete type for purposes of class member access (5.2.5) outside
7049 // the member function body.
7050 if (!BaseType->isDependentType() &&
7051 !isThisOutsideMemberFunctionBody(BaseType) &&
7052 RequireCompleteType(Loc: OpLoc, T: BaseType,
7053 DiagID: diag::err_incomplete_member_access)) {
7054 return CreateRecoveryExpr(Begin: Base->getBeginLoc(), End: Base->getEndLoc(), SubExprs: {Base});
7055 }
7056
7057 // C++ [basic.lookup.classref]p2:
7058 // If the id-expression in a class member access (5.2.5) is an
7059 // unqualified-id, and the type of the object expression is of a class
7060 // type C (or of pointer to a class type C), the unqualified-id is looked
7061 // up in the scope of class C. [...]
7062 ObjectType = ParsedType::make(P: BaseType);
7063 return Base;
7064}
7065
7066static bool CheckArrow(Sema &S, QualType &ObjectType, Expr *&Base,
7067 tok::TokenKind &OpKind, SourceLocation OpLoc) {
7068 if (Base->hasPlaceholderType()) {
7069 ExprResult result = S.CheckPlaceholderExpr(E: Base);
7070 if (result.isInvalid()) return true;
7071 Base = result.get();
7072 }
7073 ObjectType = Base->getType();
7074
7075 // C++ [expr.pseudo]p2:
7076 // The left-hand side of the dot operator shall be of scalar type. The
7077 // left-hand side of the arrow operator shall be of pointer to scalar type.
7078 // This scalar type is the object type.
7079 // Note that this is rather different from the normal handling for the
7080 // arrow operator.
7081 if (OpKind == tok::arrow) {
7082 // The operator requires a prvalue, so perform lvalue conversions.
7083 // Only do this if we might plausibly end with a pointer, as otherwise
7084 // this was likely to be intended to be a '.'.
7085 if (ObjectType->isPointerType() || ObjectType->isArrayType() ||
7086 ObjectType->isFunctionType()) {
7087 ExprResult BaseResult = S.DefaultFunctionArrayLvalueConversion(E: Base);
7088 if (BaseResult.isInvalid())
7089 return true;
7090 Base = BaseResult.get();
7091 ObjectType = Base->getType();
7092 }
7093
7094 if (const PointerType *Ptr = ObjectType->getAs<PointerType>()) {
7095 ObjectType = Ptr->getPointeeType();
7096 } else if (!Base->isTypeDependent()) {
7097 // The user wrote "p->" when they probably meant "p."; fix it.
7098 S.Diag(Loc: OpLoc, DiagID: diag::err_typecheck_member_reference_suggestion)
7099 << ObjectType << true
7100 << FixItHint::CreateReplacement(RemoveRange: OpLoc, Code: ".");
7101 if (S.isSFINAEContext())
7102 return true;
7103
7104 OpKind = tok::period;
7105 }
7106 }
7107
7108 return false;
7109}
7110
7111/// Check if it's ok to try and recover dot pseudo destructor calls on
7112/// pointer objects.
7113static bool
7114canRecoverDotPseudoDestructorCallsOnPointerObjects(Sema &SemaRef,
7115 QualType DestructedType) {
7116 // If this is a record type, check if its destructor is callable.
7117 if (auto *RD = DestructedType->getAsCXXRecordDecl()) {
7118 if (RD->hasDefinition())
7119 if (CXXDestructorDecl *D = SemaRef.LookupDestructor(Class: RD))
7120 return SemaRef.CanUseDecl(D, /*TreatUnavailableAsInvalid=*/false);
7121 return false;
7122 }
7123
7124 // Otherwise, check if it's a type for which it's valid to use a pseudo-dtor.
7125 return DestructedType->isDependentType() || DestructedType->isScalarType() ||
7126 DestructedType->isVectorType();
7127}
7128
7129ExprResult Sema::BuildPseudoDestructorExpr(Expr *Base,
7130 SourceLocation OpLoc,
7131 tok::TokenKind OpKind,
7132 const CXXScopeSpec &SS,
7133 TypeSourceInfo *ScopeTypeInfo,
7134 SourceLocation CCLoc,
7135 SourceLocation TildeLoc,
7136 PseudoDestructorTypeStorage Destructed) {
7137 TypeSourceInfo *DestructedTypeInfo = Destructed.getTypeSourceInfo();
7138
7139 QualType ObjectType;
7140 if (CheckArrow(S&: *this, ObjectType, Base, OpKind, OpLoc))
7141 return ExprError();
7142
7143 if (!ObjectType->isDependentType() && !ObjectType->isScalarType() &&
7144 !ObjectType->isVectorType() && !ObjectType->isMatrixType()) {
7145 if (getLangOpts().MSVCCompat && ObjectType->isVoidType())
7146 Diag(Loc: OpLoc, DiagID: diag::ext_pseudo_dtor_on_void) << Base->getSourceRange();
7147 else {
7148 Diag(Loc: OpLoc, DiagID: diag::err_pseudo_dtor_base_not_scalar)
7149 << ObjectType << Base->getSourceRange();
7150 return ExprError();
7151 }
7152 }
7153
7154 // C++ [expr.pseudo]p2:
7155 // [...] The cv-unqualified versions of the object type and of the type
7156 // designated by the pseudo-destructor-name shall be the same type.
7157 if (DestructedTypeInfo) {
7158 QualType DestructedType = DestructedTypeInfo->getType();
7159 SourceLocation DestructedTypeStart =
7160 DestructedTypeInfo->getTypeLoc().getBeginLoc();
7161 if (!DestructedType->isDependentType() && !ObjectType->isDependentType()) {
7162 if (!Context.hasSameUnqualifiedType(T1: DestructedType, T2: ObjectType)) {
7163 // Detect dot pseudo destructor calls on pointer objects, e.g.:
7164 // Foo *foo;
7165 // foo.~Foo();
7166 if (OpKind == tok::period && ObjectType->isPointerType() &&
7167 Context.hasSameUnqualifiedType(T1: DestructedType,
7168 T2: ObjectType->getPointeeType())) {
7169 auto Diagnostic =
7170 Diag(Loc: OpLoc, DiagID: diag::err_typecheck_member_reference_suggestion)
7171 << ObjectType << /*IsArrow=*/0 << Base->getSourceRange();
7172
7173 // Issue a fixit only when the destructor is valid.
7174 if (canRecoverDotPseudoDestructorCallsOnPointerObjects(
7175 SemaRef&: *this, DestructedType))
7176 Diagnostic << FixItHint::CreateReplacement(RemoveRange: OpLoc, Code: "->");
7177
7178 // Recover by setting the object type to the destructed type and the
7179 // operator to '->'.
7180 ObjectType = DestructedType;
7181 OpKind = tok::arrow;
7182 } else {
7183 Diag(Loc: DestructedTypeStart, DiagID: diag::err_pseudo_dtor_type_mismatch)
7184 << ObjectType << DestructedType << Base->getSourceRange()
7185 << DestructedTypeInfo->getTypeLoc().getSourceRange();
7186
7187 // Recover by setting the destructed type to the object type.
7188 DestructedType = ObjectType;
7189 DestructedTypeInfo =
7190 Context.getTrivialTypeSourceInfo(T: ObjectType, Loc: DestructedTypeStart);
7191 Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo);
7192 }
7193 } else if (DestructedType.getObjCLifetime() !=
7194 ObjectType.getObjCLifetime()) {
7195
7196 if (DestructedType.getObjCLifetime() == Qualifiers::OCL_None) {
7197 // Okay: just pretend that the user provided the correctly-qualified
7198 // type.
7199 } else {
7200 Diag(Loc: DestructedTypeStart, DiagID: diag::err_arc_pseudo_dtor_inconstant_quals)
7201 << ObjectType << DestructedType << Base->getSourceRange()
7202 << DestructedTypeInfo->getTypeLoc().getSourceRange();
7203 }
7204
7205 // Recover by setting the destructed type to the object type.
7206 DestructedType = ObjectType;
7207 DestructedTypeInfo = Context.getTrivialTypeSourceInfo(T: ObjectType,
7208 Loc: DestructedTypeStart);
7209 Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo);
7210 }
7211 }
7212 }
7213
7214 // C++ [expr.pseudo]p2:
7215 // [...] Furthermore, the two type-names in a pseudo-destructor-name of the
7216 // form
7217 //
7218 // ::[opt] nested-name-specifier[opt] type-name :: ~ type-name
7219 //
7220 // shall designate the same scalar type.
7221 if (ScopeTypeInfo) {
7222 QualType ScopeType = ScopeTypeInfo->getType();
7223 if (!ScopeType->isDependentType() && !ObjectType->isDependentType() &&
7224 !Context.hasSameUnqualifiedType(T1: ScopeType, T2: ObjectType)) {
7225
7226 Diag(Loc: ScopeTypeInfo->getTypeLoc().getSourceRange().getBegin(),
7227 DiagID: diag::err_pseudo_dtor_type_mismatch)
7228 << ObjectType << ScopeType << Base->getSourceRange()
7229 << ScopeTypeInfo->getTypeLoc().getSourceRange();
7230
7231 ScopeType = QualType();
7232 ScopeTypeInfo = nullptr;
7233 }
7234 }
7235
7236 Expr *Result
7237 = new (Context) CXXPseudoDestructorExpr(Context, Base,
7238 OpKind == tok::arrow, OpLoc,
7239 SS.getWithLocInContext(Context),
7240 ScopeTypeInfo,
7241 CCLoc,
7242 TildeLoc,
7243 Destructed);
7244
7245 return Result;
7246}
7247
7248ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
7249 SourceLocation OpLoc,
7250 tok::TokenKind OpKind,
7251 CXXScopeSpec &SS,
7252 UnqualifiedId &FirstTypeName,
7253 SourceLocation CCLoc,
7254 SourceLocation TildeLoc,
7255 UnqualifiedId &SecondTypeName) {
7256 assert((FirstTypeName.getKind() == UnqualifiedIdKind::IK_TemplateId ||
7257 FirstTypeName.getKind() == UnqualifiedIdKind::IK_Identifier) &&
7258 "Invalid first type name in pseudo-destructor");
7259 assert((SecondTypeName.getKind() == UnqualifiedIdKind::IK_TemplateId ||
7260 SecondTypeName.getKind() == UnqualifiedIdKind::IK_Identifier) &&
7261 "Invalid second type name in pseudo-destructor");
7262
7263 QualType ObjectType;
7264 if (CheckArrow(S&: *this, ObjectType, Base, OpKind, OpLoc))
7265 return ExprError();
7266
7267 // Compute the object type that we should use for name lookup purposes. Only
7268 // record types and dependent types matter.
7269 ParsedType ObjectTypePtrForLookup;
7270 if (!SS.isSet()) {
7271 if (ObjectType->isRecordType())
7272 ObjectTypePtrForLookup = ParsedType::make(P: ObjectType);
7273 else if (ObjectType->isDependentType())
7274 ObjectTypePtrForLookup = ParsedType::make(P: Context.DependentTy);
7275 }
7276
7277 // Convert the name of the type being destructed (following the ~) into a
7278 // type (with source-location information).
7279 QualType DestructedType;
7280 TypeSourceInfo *DestructedTypeInfo = nullptr;
7281 PseudoDestructorTypeStorage Destructed;
7282 if (SecondTypeName.getKind() == UnqualifiedIdKind::IK_Identifier) {
7283 ParsedType T = getTypeName(II: *SecondTypeName.Identifier,
7284 NameLoc: SecondTypeName.StartLocation,
7285 S, SS: &SS, isClassName: true, HasTrailingDot: false, ObjectType: ObjectTypePtrForLookup,
7286 /*IsCtorOrDtorName*/true);
7287 if (!T &&
7288 ((SS.isSet() && !computeDeclContext(SS, EnteringContext: false)) ||
7289 (!SS.isSet() && ObjectType->isDependentType()))) {
7290 // The name of the type being destroyed is a dependent name, and we
7291 // couldn't find anything useful in scope. Just store the identifier and
7292 // it's location, and we'll perform (qualified) name lookup again at
7293 // template instantiation time.
7294 Destructed = PseudoDestructorTypeStorage(SecondTypeName.Identifier,
7295 SecondTypeName.StartLocation);
7296 } else if (!T) {
7297 Diag(Loc: SecondTypeName.StartLocation,
7298 DiagID: diag::err_pseudo_dtor_destructor_non_type)
7299 << SecondTypeName.Identifier << ObjectType;
7300 if (isSFINAEContext())
7301 return ExprError();
7302
7303 // Recover by assuming we had the right type all along.
7304 DestructedType = ObjectType;
7305 } else
7306 DestructedType = GetTypeFromParser(Ty: T, TInfo: &DestructedTypeInfo);
7307 } else {
7308 // Resolve the template-id to a type.
7309 TemplateIdAnnotation *TemplateId = SecondTypeName.TemplateId;
7310 ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
7311 TemplateId->NumArgs);
7312 TypeResult T = ActOnTemplateIdType(
7313 S, ElaboratedKeyword: ElaboratedTypeKeyword::None,
7314 /*ElaboratedKeywordLoc=*/SourceLocation(), SS,
7315 TemplateKWLoc: TemplateId->TemplateKWLoc, Template: TemplateId->Template, TemplateII: TemplateId->Name,
7316 TemplateIILoc: TemplateId->TemplateNameLoc, LAngleLoc: TemplateId->LAngleLoc, TemplateArgs: TemplateArgsPtr,
7317 RAngleLoc: TemplateId->RAngleLoc,
7318 /*IsCtorOrDtorName*/ true);
7319 if (T.isInvalid() || !T.get()) {
7320 // Recover by assuming we had the right type all along.
7321 DestructedType = ObjectType;
7322 } else
7323 DestructedType = GetTypeFromParser(Ty: T.get(), TInfo: &DestructedTypeInfo);
7324 }
7325
7326 // If we've performed some kind of recovery, (re-)build the type source
7327 // information.
7328 if (!DestructedType.isNull()) {
7329 if (!DestructedTypeInfo)
7330 DestructedTypeInfo = Context.getTrivialTypeSourceInfo(T: DestructedType,
7331 Loc: SecondTypeName.StartLocation);
7332 Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo);
7333 }
7334
7335 // Convert the name of the scope type (the type prior to '::') into a type.
7336 TypeSourceInfo *ScopeTypeInfo = nullptr;
7337 QualType ScopeType;
7338 if (FirstTypeName.getKind() == UnqualifiedIdKind::IK_TemplateId ||
7339 FirstTypeName.Identifier) {
7340 if (FirstTypeName.getKind() == UnqualifiedIdKind::IK_Identifier) {
7341 ParsedType T = getTypeName(II: *FirstTypeName.Identifier,
7342 NameLoc: FirstTypeName.StartLocation,
7343 S, SS: &SS, isClassName: true, HasTrailingDot: false, ObjectType: ObjectTypePtrForLookup,
7344 /*IsCtorOrDtorName*/true);
7345 if (!T) {
7346 Diag(Loc: FirstTypeName.StartLocation,
7347 DiagID: diag::err_pseudo_dtor_destructor_non_type)
7348 << FirstTypeName.Identifier << ObjectType;
7349
7350 if (isSFINAEContext())
7351 return ExprError();
7352
7353 // Just drop this type. It's unnecessary anyway.
7354 ScopeType = QualType();
7355 } else
7356 ScopeType = GetTypeFromParser(Ty: T, TInfo: &ScopeTypeInfo);
7357 } else {
7358 // Resolve the template-id to a type.
7359 TemplateIdAnnotation *TemplateId = FirstTypeName.TemplateId;
7360 ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
7361 TemplateId->NumArgs);
7362 TypeResult T = ActOnTemplateIdType(
7363 S, ElaboratedKeyword: ElaboratedTypeKeyword::None,
7364 /*ElaboratedKeywordLoc=*/SourceLocation(), SS,
7365 TemplateKWLoc: TemplateId->TemplateKWLoc, Template: TemplateId->Template, TemplateII: TemplateId->Name,
7366 TemplateIILoc: TemplateId->TemplateNameLoc, LAngleLoc: TemplateId->LAngleLoc, TemplateArgs: TemplateArgsPtr,
7367 RAngleLoc: TemplateId->RAngleLoc,
7368 /*IsCtorOrDtorName*/ true);
7369 if (T.isInvalid() || !T.get()) {
7370 // Recover by dropping this type.
7371 ScopeType = QualType();
7372 } else
7373 ScopeType = GetTypeFromParser(Ty: T.get(), TInfo: &ScopeTypeInfo);
7374 }
7375 }
7376
7377 if (!ScopeType.isNull() && !ScopeTypeInfo)
7378 ScopeTypeInfo = Context.getTrivialTypeSourceInfo(T: ScopeType,
7379 Loc: FirstTypeName.StartLocation);
7380
7381
7382 return BuildPseudoDestructorExpr(Base, OpLoc, OpKind, SS,
7383 ScopeTypeInfo, CCLoc, TildeLoc,
7384 Destructed);
7385}
7386
7387ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
7388 SourceLocation OpLoc,
7389 tok::TokenKind OpKind,
7390 SourceLocation TildeLoc,
7391 const DeclSpec& DS) {
7392 QualType ObjectType;
7393 QualType T;
7394 TypeLocBuilder TLB;
7395 if (CheckArrow(S&: *this, ObjectType, Base, OpKind, OpLoc) ||
7396 DS.getTypeSpecType() == DeclSpec::TST_error)
7397 return ExprError();
7398
7399 switch (DS.getTypeSpecType()) {
7400 case DeclSpec::TST_decltype_auto: {
7401 Diag(Loc: DS.getTypeSpecTypeLoc(), DiagID: diag::err_decltype_auto_invalid);
7402 return true;
7403 }
7404 case DeclSpec::TST_decltype: {
7405 T = BuildDecltypeType(E: DS.getRepAsExpr(), /*AsUnevaluated=*/false);
7406 DecltypeTypeLoc DecltypeTL = TLB.push<DecltypeTypeLoc>(T);
7407 DecltypeTL.setDecltypeLoc(DS.getTypeSpecTypeLoc());
7408 DecltypeTL.setRParenLoc(DS.getTypeofParensRange().getEnd());
7409 break;
7410 }
7411 case DeclSpec::TST_typename_pack_indexing: {
7412 T = ActOnPackIndexingType(Pattern: DS.getRepAsType().get(), IndexExpr: DS.getPackIndexingExpr(),
7413 Loc: DS.getBeginLoc(), EllipsisLoc: DS.getEllipsisLoc());
7414 TLB.pushTrivial(Context&: getASTContext(),
7415 T: cast<PackIndexingType>(Val: T.getTypePtr())->getPattern(),
7416 Loc: DS.getBeginLoc());
7417 PackIndexingTypeLoc PITL = TLB.push<PackIndexingTypeLoc>(T);
7418 PITL.setEllipsisLoc(DS.getEllipsisLoc());
7419 break;
7420 }
7421 default:
7422 llvm_unreachable("Unsupported type in pseudo destructor");
7423 }
7424 TypeSourceInfo *DestructedTypeInfo = TLB.getTypeSourceInfo(Context, T);
7425 PseudoDestructorTypeStorage Destructed(DestructedTypeInfo);
7426
7427 return BuildPseudoDestructorExpr(Base, OpLoc, OpKind, SS: CXXScopeSpec(),
7428 ScopeTypeInfo: nullptr, CCLoc: SourceLocation(), TildeLoc,
7429 Destructed);
7430}
7431
7432ExprResult Sema::BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
7433 SourceLocation RParen) {
7434 // If the operand is an unresolved lookup expression, the expression is ill-
7435 // formed per [over.over]p1, because overloaded function names cannot be used
7436 // without arguments except in explicit contexts.
7437 ExprResult R = CheckPlaceholderExpr(E: Operand);
7438 if (R.isInvalid())
7439 return R;
7440
7441 R = CheckUnevaluatedOperand(E: R.get());
7442 if (R.isInvalid())
7443 return ExprError();
7444
7445 Operand = R.get();
7446
7447 if (!inTemplateInstantiation() && !Operand->isInstantiationDependent() &&
7448 Operand->HasSideEffects(Ctx: Context, IncludePossibleEffects: false)) {
7449 // The expression operand for noexcept is in an unevaluated expression
7450 // context, so side effects could result in unintended consequences.
7451 Diag(Loc: Operand->getExprLoc(), DiagID: diag::warn_side_effects_unevaluated_context);
7452 }
7453
7454 CanThrowResult CanThrow = canThrow(E: Operand);
7455 return new (Context)
7456 CXXNoexceptExpr(Context.BoolTy, Operand, CanThrow, KeyLoc, RParen);
7457}
7458
7459ExprResult Sema::ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation,
7460 Expr *Operand, SourceLocation RParen) {
7461 return BuildCXXNoexceptExpr(KeyLoc, Operand, RParen);
7462}
7463
7464static void MaybeDecrementCount(
7465 Expr *E, llvm::DenseMap<const VarDecl *, int> &RefsMinusAssignments) {
7466 DeclRefExpr *LHS = nullptr;
7467 bool IsCompoundAssign = false;
7468 bool isIncrementDecrementUnaryOp = false;
7469 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Val: E)) {
7470 if (BO->getLHS()->getType()->isDependentType() ||
7471 BO->getRHS()->getType()->isDependentType()) {
7472 if (BO->getOpcode() != BO_Assign)
7473 return;
7474 } else if (!BO->isAssignmentOp())
7475 return;
7476 else
7477 IsCompoundAssign = BO->isCompoundAssignmentOp();
7478 LHS = dyn_cast<DeclRefExpr>(Val: BO->getLHS());
7479 } else if (CXXOperatorCallExpr *COCE = dyn_cast<CXXOperatorCallExpr>(Val: E)) {
7480 if (COCE->getOperator() != OO_Equal)
7481 return;
7482 LHS = dyn_cast<DeclRefExpr>(Val: COCE->getArg(Arg: 0));
7483 } else if (UnaryOperator *UO = dyn_cast<UnaryOperator>(Val: E)) {
7484 if (!UO->isIncrementDecrementOp())
7485 return;
7486 isIncrementDecrementUnaryOp = true;
7487 LHS = dyn_cast<DeclRefExpr>(Val: UO->getSubExpr());
7488 }
7489 if (!LHS)
7490 return;
7491 VarDecl *VD = dyn_cast<VarDecl>(Val: LHS->getDecl());
7492 if (!VD)
7493 return;
7494 // Don't decrement RefsMinusAssignments if volatile variable with compound
7495 // assignment (+=, ...) or increment/decrement unary operator to avoid
7496 // potential unused-but-set-variable warning.
7497 if ((IsCompoundAssign || isIncrementDecrementUnaryOp) &&
7498 VD->getType().isVolatileQualified())
7499 return;
7500 auto iter = RefsMinusAssignments.find(Val: VD);
7501 if (iter == RefsMinusAssignments.end())
7502 return;
7503 iter->getSecond()--;
7504}
7505
7506/// Perform the conversions required for an expression used in a
7507/// context that ignores the result.
7508ExprResult Sema::IgnoredValueConversions(Expr *E) {
7509 MaybeDecrementCount(E, RefsMinusAssignments);
7510
7511 if (E->hasPlaceholderType()) {
7512 ExprResult result = CheckPlaceholderExpr(E);
7513 if (result.isInvalid()) return E;
7514 E = result.get();
7515 }
7516
7517 if (getLangOpts().CPlusPlus) {
7518 // The C++11 standard defines the notion of a discarded-value expression;
7519 // normally, we don't need to do anything to handle it, but if it is a
7520 // volatile lvalue with a special form, we perform an lvalue-to-rvalue
7521 // conversion.
7522 if (getLangOpts().CPlusPlus11 && E->isReadIfDiscardedInCPlusPlus11()) {
7523 ExprResult Res = DefaultLvalueConversion(E);
7524 if (Res.isInvalid())
7525 return E;
7526 E = Res.get();
7527 } else {
7528 // Per C++2a [expr.ass]p5, a volatile assignment is not deprecated if
7529 // it occurs as a discarded-value expression.
7530 CheckUnusedVolatileAssignment(E);
7531 }
7532
7533 // C++1z:
7534 // If the expression is a prvalue after this optional conversion, the
7535 // temporary materialization conversion is applied.
7536 //
7537 // We do not materialize temporaries by default in order to avoid creating
7538 // unnecessary temporary objects. If we skip this step, IR generation is
7539 // able to synthesize the storage for itself in the aggregate case, and
7540 // adding the extra node to the AST is just clutter.
7541 if (isInLifetimeExtendingContext() && getLangOpts().CPlusPlus17 &&
7542 E->isPRValue() && !E->getType()->isVoidType()) {
7543 ExprResult Res = TemporaryMaterializationConversion(E);
7544 if (Res.isInvalid())
7545 return E;
7546 E = Res.get();
7547 }
7548 return E;
7549 }
7550
7551 // C99 6.3.2.1:
7552 // [Except in specific positions,] an lvalue that does not have
7553 // array type is converted to the value stored in the
7554 // designated object (and is no longer an lvalue).
7555 if (E->isPRValue()) {
7556 // In C, function designators (i.e. expressions of function type)
7557 // are r-values, but we still want to do function-to-pointer decay
7558 // on them. This is both technically correct and convenient for
7559 // some clients.
7560 if (!getLangOpts().CPlusPlus && E->getType()->isFunctionType())
7561 return DefaultFunctionArrayConversion(E);
7562
7563 return E;
7564 }
7565
7566 // GCC seems to also exclude expressions of incomplete enum type.
7567 if (const auto *ED = E->getType()->getAsEnumDecl(); ED && !ED->isComplete()) {
7568 // FIXME: stupid workaround for a codegen bug!
7569 E = ImpCastExprToType(E, Type: Context.VoidTy, CK: CK_ToVoid).get();
7570 return E;
7571 }
7572
7573 ExprResult Res = DefaultFunctionArrayLvalueConversion(E);
7574 if (Res.isInvalid())
7575 return E;
7576 E = Res.get();
7577
7578 if (!E->getType()->isVoidType())
7579 RequireCompleteType(Loc: E->getExprLoc(), T: E->getType(),
7580 DiagID: diag::err_incomplete_type);
7581 return E;
7582}
7583
7584ExprResult Sema::CheckUnevaluatedOperand(Expr *E) {
7585 // Per C++2a [expr.ass]p5, a volatile assignment is not deprecated if
7586 // it occurs as an unevaluated operand.
7587 CheckUnusedVolatileAssignment(E);
7588
7589 return E;
7590}
7591
7592// If we can unambiguously determine whether Var can never be used
7593// in a constant expression, return true.
7594// - if the variable and its initializer are non-dependent, then
7595// we can unambiguously check if the variable is a constant expression.
7596// - if the initializer is not value dependent - we can determine whether
7597// it can be used to initialize a constant expression. If Init can not
7598// be used to initialize a constant expression we conclude that Var can
7599// never be a constant expression.
7600// - FXIME: if the initializer is dependent, we can still do some analysis and
7601// identify certain cases unambiguously as non-const by using a Visitor:
7602// - such as those that involve odr-use of a ParmVarDecl, involve a new
7603// delete, lambda-expr, dynamic-cast, reinterpret-cast etc...
7604static inline bool VariableCanNeverBeAConstantExpression(VarDecl *Var,
7605 ASTContext &Context) {
7606 if (isa<ParmVarDecl>(Val: Var)) return true;
7607 const VarDecl *DefVD = nullptr;
7608
7609 // If there is no initializer - this can not be a constant expression.
7610 const Expr *Init = Var->getAnyInitializer(D&: DefVD);
7611 if (!Init)
7612 return true;
7613 assert(DefVD);
7614 if (DefVD->isWeak())
7615 return false;
7616
7617 if (Var->getType()->isDependentType() || Init->isValueDependent()) {
7618 // FIXME: Teach the constant evaluator to deal with the non-dependent parts
7619 // of value-dependent expressions, and use it here to determine whether the
7620 // initializer is a potential constant expression.
7621 return false;
7622 }
7623
7624 return !Var->isUsableInConstantExpressions(C: Context);
7625}
7626
7627/// Check if the current lambda has any potential captures
7628/// that must be captured by any of its enclosing lambdas that are ready to
7629/// capture. If there is a lambda that can capture a nested
7630/// potential-capture, go ahead and do so. Also, check to see if any
7631/// variables are uncaptureable or do not involve an odr-use so do not
7632/// need to be captured.
7633
7634static void CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(
7635 Expr *const FE, LambdaScopeInfo *const CurrentLSI, Sema &S) {
7636
7637 assert(!S.isUnevaluatedContext());
7638 assert(S.CurContext->isDependentContext());
7639#ifndef NDEBUG
7640 DeclContext *DC = S.CurContext;
7641 while (isa_and_nonnull<CapturedDecl>(DC))
7642 DC = DC->getParent();
7643 assert(
7644 (CurrentLSI->CallOperator == DC || !CurrentLSI->AfterParameterList) &&
7645 "The current call operator must be synchronized with Sema's CurContext");
7646#endif // NDEBUG
7647
7648 const bool IsFullExprInstantiationDependent = FE->isInstantiationDependent();
7649
7650 // All the potentially captureable variables in the current nested
7651 // lambda (within a generic outer lambda), must be captured by an
7652 // outer lambda that is enclosed within a non-dependent context.
7653 CurrentLSI->visitPotentialCaptures(Callback: [&](ValueDecl *Var, Expr *VarExpr) {
7654 // If the variable is clearly identified as non-odr-used and the full
7655 // expression is not instantiation dependent, only then do we not
7656 // need to check enclosing lambda's for speculative captures.
7657 // For e.g.:
7658 // Even though 'x' is not odr-used, it should be captured.
7659 // int test() {
7660 // const int x = 10;
7661 // auto L = [=](auto a) {
7662 // (void) +x + a;
7663 // };
7664 // }
7665 if (CurrentLSI->isVariableExprMarkedAsNonODRUsed(CapturingVarExpr: VarExpr) &&
7666 !IsFullExprInstantiationDependent)
7667 return;
7668
7669 VarDecl *UnderlyingVar = Var->getPotentiallyDecomposedVarDecl();
7670 if (!UnderlyingVar)
7671 return;
7672
7673 // If we have a capture-capable lambda for the variable, go ahead and
7674 // capture the variable in that lambda (and all its enclosing lambdas).
7675 if (const UnsignedOrNone Index =
7676 getStackIndexOfNearestEnclosingCaptureCapableLambda(
7677 FunctionScopes: S.FunctionScopes, VarToCapture: Var, S))
7678 S.MarkCaptureUsedInEnclosingContext(Capture: Var, Loc: VarExpr->getExprLoc(), CapturingScopeIndex: *Index);
7679 const bool IsVarNeverAConstantExpression =
7680 VariableCanNeverBeAConstantExpression(Var: UnderlyingVar, Context&: S.Context);
7681 if (!IsFullExprInstantiationDependent || IsVarNeverAConstantExpression) {
7682 // This full expression is not instantiation dependent or the variable
7683 // can not be used in a constant expression - which means
7684 // this variable must be odr-used here, so diagnose a
7685 // capture violation early, if the variable is un-captureable.
7686 // This is purely for diagnosing errors early. Otherwise, this
7687 // error would get diagnosed when the lambda becomes capture ready.
7688 QualType CaptureType, DeclRefType;
7689 SourceLocation ExprLoc = VarExpr->getExprLoc();
7690 if (S.tryCaptureVariable(Var, Loc: ExprLoc, Kind: TryCaptureKind::Implicit,
7691 /*EllipsisLoc*/ SourceLocation(),
7692 /*BuildAndDiagnose*/ false, CaptureType,
7693 DeclRefType, FunctionScopeIndexToStopAt: nullptr)) {
7694 // We will never be able to capture this variable, and we need
7695 // to be able to in any and all instantiations, so diagnose it.
7696 S.tryCaptureVariable(Var, Loc: ExprLoc, Kind: TryCaptureKind::Implicit,
7697 /*EllipsisLoc*/ SourceLocation(),
7698 /*BuildAndDiagnose*/ true, CaptureType,
7699 DeclRefType, FunctionScopeIndexToStopAt: nullptr);
7700 }
7701 }
7702 });
7703
7704 // Check if 'this' needs to be captured.
7705 if (CurrentLSI->hasPotentialThisCapture()) {
7706 // If we have a capture-capable lambda for 'this', go ahead and capture
7707 // 'this' in that lambda (and all its enclosing lambdas).
7708 if (const UnsignedOrNone Index =
7709 getStackIndexOfNearestEnclosingCaptureCapableLambda(
7710 FunctionScopes: S.FunctionScopes, /*0 is 'this'*/ VarToCapture: nullptr, S)) {
7711 const unsigned FunctionScopeIndexOfCapturableLambda = *Index;
7712 S.CheckCXXThisCapture(Loc: CurrentLSI->PotentialThisCaptureLocation,
7713 /*Explicit*/ false, /*BuildAndDiagnose*/ true,
7714 FunctionScopeIndexToStopAt: &FunctionScopeIndexOfCapturableLambda);
7715 }
7716 }
7717
7718 // Reset all the potential captures at the end of each full-expression.
7719 CurrentLSI->clearPotentialCaptures();
7720}
7721
7722ExprResult Sema::ActOnFinishFullExpr(Expr *FE, SourceLocation CC,
7723 bool DiscardedValue, bool IsConstexpr,
7724 bool IsTemplateArgument) {
7725 ExprResult FullExpr = FE;
7726
7727 if (!FullExpr.get())
7728 return ExprError();
7729
7730 if (!IsTemplateArgument && DiagnoseUnexpandedParameterPack(E: FullExpr.get()))
7731 return ExprError();
7732
7733 if (DiscardedValue) {
7734 // Top-level expressions default to 'id' when we're in a debugger.
7735 if (getLangOpts().DebuggerCastResultToId &&
7736 FullExpr.get()->getType() == Context.UnknownAnyTy) {
7737 FullExpr = forceUnknownAnyToType(E: FullExpr.get(), ToType: Context.getObjCIdType());
7738 if (FullExpr.isInvalid())
7739 return ExprError();
7740 }
7741
7742 FullExpr = CheckPlaceholderExpr(E: FullExpr.get());
7743 if (FullExpr.isInvalid())
7744 return ExprError();
7745
7746 FullExpr = IgnoredValueConversions(E: FullExpr.get());
7747 if (FullExpr.isInvalid())
7748 return ExprError();
7749
7750 DiagnoseUnusedExprResult(S: FullExpr.get(), DiagID: diag::warn_unused_expr);
7751 }
7752
7753 if (FullExpr.isInvalid())
7754 return ExprError();
7755
7756 CheckCompletedExpr(E: FullExpr.get(), CheckLoc: CC, IsConstexpr);
7757
7758 // At the end of this full expression (which could be a deeply nested
7759 // lambda), if there is a potential capture within the nested lambda,
7760 // have the outer capture-able lambda try and capture it.
7761 // Consider the following code:
7762 // void f(int, int);
7763 // void f(const int&, double);
7764 // void foo() {
7765 // const int x = 10, y = 20;
7766 // auto L = [=](auto a) {
7767 // auto M = [=](auto b) {
7768 // f(x, b); <-- requires x to be captured by L and M
7769 // f(y, a); <-- requires y to be captured by L, but not all Ms
7770 // };
7771 // };
7772 // }
7773
7774 // FIXME: Also consider what happens for something like this that involves
7775 // the gnu-extension statement-expressions or even lambda-init-captures:
7776 // void f() {
7777 // const int n = 0;
7778 // auto L = [&](auto a) {
7779 // +n + ({ 0; a; });
7780 // };
7781 // }
7782 //
7783 // Here, we see +n, and then the full-expression 0; ends, so we don't
7784 // capture n (and instead remove it from our list of potential captures),
7785 // and then the full-expression +n + ({ 0; }); ends, but it's too late
7786 // for us to see that we need to capture n after all.
7787
7788 LambdaScopeInfo *const CurrentLSI =
7789 getCurLambda(/*IgnoreCapturedRegions=*/IgnoreNonLambdaCapturingScope: true);
7790 // FIXME: PR 17877 showed that getCurLambda() can return a valid pointer
7791 // even if CurContext is not a lambda call operator. Refer to that Bug Report
7792 // for an example of the code that might cause this asynchrony.
7793 // By ensuring we are in the context of a lambda's call operator
7794 // we can fix the bug (we only need to check whether we need to capture
7795 // if we are within a lambda's body); but per the comments in that
7796 // PR, a proper fix would entail :
7797 // "Alternative suggestion:
7798 // - Add to Sema an integer holding the smallest (outermost) scope
7799 // index that we are *lexically* within, and save/restore/set to
7800 // FunctionScopes.size() in InstantiatingTemplate's
7801 // constructor/destructor.
7802 // - Teach the handful of places that iterate over FunctionScopes to
7803 // stop at the outermost enclosing lexical scope."
7804 DeclContext *DC = CurContext;
7805 while (isa_and_nonnull<CapturedDecl>(Val: DC))
7806 DC = DC->getParent();
7807 const bool IsInLambdaDeclContext = isLambdaCallOperator(DC);
7808 if (IsInLambdaDeclContext && CurrentLSI &&
7809 CurrentLSI->hasPotentialCaptures() && !FullExpr.isInvalid())
7810 CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(FE, CurrentLSI,
7811 S&: *this);
7812 return MaybeCreateExprWithCleanups(SubExpr: FullExpr);
7813}
7814
7815StmtResult Sema::ActOnFinishFullStmt(Stmt *FullStmt) {
7816 if (!FullStmt) return StmtError();
7817
7818 return MaybeCreateStmtWithCleanups(SubStmt: FullStmt);
7819}
7820
7821IfExistsResult
7822Sema::CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
7823 const DeclarationNameInfo &TargetNameInfo) {
7824 DeclarationName TargetName = TargetNameInfo.getName();
7825 if (!TargetName)
7826 return IfExistsResult::DoesNotExist;
7827
7828 // If the name itself is dependent, then the result is dependent.
7829 if (TargetName.isDependentName())
7830 return IfExistsResult::Dependent;
7831
7832 // Do the redeclaration lookup in the current scope.
7833 LookupResult R(*this, TargetNameInfo, Sema::LookupAnyName,
7834 RedeclarationKind::NotForRedeclaration);
7835 LookupParsedName(R, S, SS: &SS, /*ObjectType=*/QualType());
7836 R.suppressDiagnostics();
7837
7838 switch (R.getResultKind()) {
7839 case LookupResultKind::Found:
7840 case LookupResultKind::FoundOverloaded:
7841 case LookupResultKind::FoundUnresolvedValue:
7842 case LookupResultKind::Ambiguous:
7843 return IfExistsResult::Exists;
7844
7845 case LookupResultKind::NotFound:
7846 return IfExistsResult::DoesNotExist;
7847
7848 case LookupResultKind::NotFoundInCurrentInstantiation:
7849 return IfExistsResult::Dependent;
7850 }
7851
7852 llvm_unreachable("Invalid LookupResult Kind!");
7853}
7854
7855IfExistsResult Sema::CheckMicrosoftIfExistsSymbol(Scope *S,
7856 SourceLocation KeywordLoc,
7857 bool IsIfExists,
7858 CXXScopeSpec &SS,
7859 UnqualifiedId &Name) {
7860 DeclarationNameInfo TargetNameInfo = GetNameFromUnqualifiedId(Name);
7861
7862 // Check for an unexpanded parameter pack.
7863 auto UPPC = IsIfExists ? UPPC_IfExists : UPPC_IfNotExists;
7864 if (DiagnoseUnexpandedParameterPack(SS, UPPC) ||
7865 DiagnoseUnexpandedParameterPack(NameInfo: TargetNameInfo, UPPC))
7866 return IfExistsResult::Error;
7867
7868 return CheckMicrosoftIfExistsSymbol(S, SS, TargetNameInfo);
7869}
7870
7871concepts::Requirement *Sema::ActOnSimpleRequirement(Expr *E) {
7872 return BuildExprRequirement(E, /*IsSimple=*/IsSatisfied: true,
7873 /*NoexceptLoc=*/SourceLocation(),
7874 /*ReturnTypeRequirement=*/{});
7875}
7876
7877concepts::Requirement *Sema::ActOnTypeRequirement(
7878 SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
7879 const IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId) {
7880 assert(((!TypeName && TemplateId) || (TypeName && !TemplateId)) &&
7881 "Exactly one of TypeName and TemplateId must be specified.");
7882 TypeSourceInfo *TSI = nullptr;
7883 if (TypeName) {
7884 QualType T =
7885 CheckTypenameType(Keyword: ElaboratedTypeKeyword::Typename, KeywordLoc: TypenameKWLoc,
7886 QualifierLoc: SS.getWithLocInContext(Context), II: *TypeName, IILoc: NameLoc,
7887 TSI: &TSI, /*DeducedTSTContext=*/false);
7888 if (T.isNull())
7889 return nullptr;
7890 } else {
7891 ASTTemplateArgsPtr ArgsPtr(TemplateId->getTemplateArgs(),
7892 TemplateId->NumArgs);
7893 TypeResult T = ActOnTypenameType(S: CurScope, TypenameLoc: TypenameKWLoc, SS,
7894 TemplateLoc: TemplateId->TemplateKWLoc,
7895 TemplateName: TemplateId->Template, TemplateII: TemplateId->Name,
7896 TemplateIILoc: TemplateId->TemplateNameLoc,
7897 LAngleLoc: TemplateId->LAngleLoc, TemplateArgs: ArgsPtr,
7898 RAngleLoc: TemplateId->RAngleLoc);
7899 if (T.isInvalid())
7900 return nullptr;
7901 if (GetTypeFromParser(Ty: T.get(), TInfo: &TSI).isNull())
7902 return nullptr;
7903 }
7904 return BuildTypeRequirement(Type: TSI);
7905}
7906
7907concepts::Requirement *
7908Sema::ActOnCompoundRequirement(Expr *E, SourceLocation NoexceptLoc) {
7909 return BuildExprRequirement(E, /*IsSimple=*/IsSatisfied: false, NoexceptLoc,
7910 /*ReturnTypeRequirement=*/{});
7911}
7912
7913concepts::Requirement *
7914Sema::ActOnCompoundRequirement(
7915 Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
7916 TemplateIdAnnotation *TypeConstraint, unsigned Depth) {
7917 // C++2a [expr.prim.req.compound] p1.3.3
7918 // [..] the expression is deduced against an invented function template
7919 // F [...] F is a void function template with a single type template
7920 // parameter T declared with the constrained-parameter. Form a new
7921 // cv-qualifier-seq cv by taking the union of const and volatile specifiers
7922 // around the constrained-parameter. F has a single parameter whose
7923 // type-specifier is cv T followed by the abstract-declarator. [...]
7924 //
7925 // The cv part is done in the calling function - we get the concept with
7926 // arguments and the abstract declarator with the correct CV qualification and
7927 // have to synthesize T and the single parameter of F.
7928 auto &II = Context.Idents.get(Name: "expr-type");
7929 auto *TParam = TemplateTypeParmDecl::Create(C: Context, DC: CurContext,
7930 KeyLoc: SourceLocation(),
7931 NameLoc: SourceLocation(), D: Depth,
7932 /*Index=*/P: 0, Id: &II,
7933 /*Typename=*/true,
7934 /*ParameterPack=*/false,
7935 /*HasTypeConstraint=*/true);
7936
7937 if (BuildTypeConstraint(SS, TypeConstraint, ConstrainedParameter: TParam,
7938 /*EllipsisLoc=*/SourceLocation(),
7939 /*AllowUnexpandedPack=*/true))
7940 // Just produce a requirement with no type requirements.
7941 return BuildExprRequirement(E, /*IsSimple=*/IsSatisfied: false, NoexceptLoc, ReturnTypeRequirement: {});
7942
7943 auto *TPL = TemplateParameterList::Create(C: Context, TemplateLoc: SourceLocation(),
7944 LAngleLoc: SourceLocation(),
7945 Params: ArrayRef<NamedDecl *>(TParam),
7946 RAngleLoc: SourceLocation(),
7947 /*RequiresClause=*/nullptr);
7948 return BuildExprRequirement(
7949 E, /*IsSimple=*/IsSatisfied: false, NoexceptLoc,
7950 ReturnTypeRequirement: concepts::ExprRequirement::ReturnTypeRequirement(TPL));
7951}
7952
7953concepts::ExprRequirement *
7954Sema::BuildExprRequirement(
7955 Expr *E, bool IsSimple, SourceLocation NoexceptLoc,
7956 concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement) {
7957 auto Status = concepts::ExprRequirement::SS_Satisfied;
7958 ConceptSpecializationExpr *SubstitutedConstraintExpr = nullptr;
7959 if (E->isInstantiationDependent() || E->getType()->isPlaceholderType() ||
7960 ReturnTypeRequirement.isDependent())
7961 Status = concepts::ExprRequirement::SS_Dependent;
7962 else if (NoexceptLoc.isValid() && canThrow(E) == CanThrowResult::CT_Can)
7963 Status = concepts::ExprRequirement::SS_NoexceptNotMet;
7964 else if (ReturnTypeRequirement.isSubstitutionFailure())
7965 Status = concepts::ExprRequirement::SS_TypeRequirementSubstitutionFailure;
7966 else if (ReturnTypeRequirement.isTypeConstraint()) {
7967 // C++2a [expr.prim.req]p1.3.3
7968 // The immediately-declared constraint ([temp]) of decltype((E)) shall
7969 // be satisfied.
7970 TemplateParameterList *TPL =
7971 ReturnTypeRequirement.getTypeConstraintTemplateParameterList();
7972 QualType MatchedType = Context.getReferenceQualifiedType(e: E);
7973 llvm::SmallVector<TemplateArgument, 1> Args;
7974 Args.push_back(Elt: TemplateArgument(MatchedType));
7975
7976 auto *Param = cast<TemplateTypeParmDecl>(Val: TPL->getParam(Idx: 0));
7977
7978 MultiLevelTemplateArgumentList MLTAL(Param, Args, /*Final=*/true);
7979 MLTAL.addOuterRetainedLevels(Num: TPL->getDepth());
7980 const TypeConstraint *TC = Param->getTypeConstraint();
7981 assert(TC && "Type Constraint cannot be null here");
7982 auto *IDC = TC->getImmediatelyDeclaredConstraint();
7983 assert(IDC && "ImmediatelyDeclaredConstraint can't be null here.");
7984 ExprResult Constraint = SubstExpr(E: IDC, TemplateArgs: MLTAL);
7985 bool HasError = Constraint.isInvalid();
7986 if (!HasError) {
7987 SubstitutedConstraintExpr =
7988 cast<ConceptSpecializationExpr>(Val: Constraint.get());
7989 if (SubstitutedConstraintExpr->getSatisfaction().ContainsErrors)
7990 HasError = true;
7991 }
7992 if (HasError) {
7993 return new (Context) concepts::ExprRequirement(
7994 createSubstDiagAt(Location: IDC->getExprLoc(),
7995 Printer: [&](llvm::raw_ostream &OS) {
7996 IDC->printPretty(OS, /*Helper=*/nullptr,
7997 Policy: getPrintingPolicy());
7998 }),
7999 IsSimple, NoexceptLoc, ReturnTypeRequirement);
8000 }
8001 if (!SubstitutedConstraintExpr->isSatisfied())
8002 Status = concepts::ExprRequirement::SS_ConstraintsNotSatisfied;
8003 }
8004 return new (Context) concepts::ExprRequirement(E, IsSimple, NoexceptLoc,
8005 ReturnTypeRequirement, Status,
8006 SubstitutedConstraintExpr);
8007}
8008
8009concepts::ExprRequirement *
8010Sema::BuildExprRequirement(
8011 concepts::Requirement::SubstitutionDiagnostic *ExprSubstitutionDiagnostic,
8012 bool IsSimple, SourceLocation NoexceptLoc,
8013 concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement) {
8014 return new (Context) concepts::ExprRequirement(ExprSubstitutionDiagnostic,
8015 IsSimple, NoexceptLoc,
8016 ReturnTypeRequirement);
8017}
8018
8019concepts::TypeRequirement *
8020Sema::BuildTypeRequirement(TypeSourceInfo *Type) {
8021 return new (Context) concepts::TypeRequirement(Type);
8022}
8023
8024concepts::TypeRequirement *
8025Sema::BuildTypeRequirement(
8026 concepts::Requirement::SubstitutionDiagnostic *SubstDiag) {
8027 return new (Context) concepts::TypeRequirement(SubstDiag);
8028}
8029
8030concepts::Requirement *Sema::ActOnNestedRequirement(Expr *Constraint) {
8031 return BuildNestedRequirement(E: Constraint);
8032}
8033
8034concepts::NestedRequirement *
8035Sema::BuildNestedRequirement(Expr *Constraint) {
8036 ConstraintSatisfaction Satisfaction;
8037 LocalInstantiationScope Scope(*this);
8038 if (!Constraint->isInstantiationDependent() &&
8039 !Constraint->isValueDependent() &&
8040 CheckConstraintSatisfaction(Entity: nullptr, AssociatedConstraints: AssociatedConstraint(Constraint),
8041 /*TemplateArgs=*/TemplateArgLists: {},
8042 TemplateIDRange: Constraint->getSourceRange(), Satisfaction))
8043 return nullptr;
8044 return new (Context) concepts::NestedRequirement(Context, Constraint,
8045 Satisfaction);
8046}
8047
8048concepts::NestedRequirement *
8049Sema::BuildNestedRequirement(StringRef InvalidConstraintEntity,
8050 const ASTConstraintSatisfaction &Satisfaction) {
8051 return new (Context) concepts::NestedRequirement(
8052 InvalidConstraintEntity,
8053 ASTConstraintSatisfaction::Rebuild(C: Context, Satisfaction));
8054}
8055
8056RequiresExprBodyDecl *
8057Sema::ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
8058 ArrayRef<ParmVarDecl *> LocalParameters,
8059 Scope *BodyScope) {
8060 assert(BodyScope);
8061
8062 RequiresExprBodyDecl *Body = RequiresExprBodyDecl::Create(C&: Context, DC: CurContext,
8063 StartLoc: RequiresKWLoc);
8064
8065 PushDeclContext(S: BodyScope, DC: Body);
8066
8067 for (ParmVarDecl *Param : LocalParameters) {
8068 if (Param->getType()->isVoidType()) {
8069 if (LocalParameters.size() > 1) {
8070 Diag(Loc: Param->getBeginLoc(), DiagID: diag::err_void_only_param);
8071 Param->setType(Context.IntTy);
8072 } else if (Param->getIdentifier()) {
8073 Diag(Loc: Param->getBeginLoc(), DiagID: diag::err_param_with_void_type);
8074 Param->setType(Context.IntTy);
8075 } else if (Param->getType().hasQualifiers()) {
8076 Diag(Loc: Param->getBeginLoc(), DiagID: diag::err_void_param_qualified);
8077 }
8078 } else if (Param->hasDefaultArg()) {
8079 // C++2a [expr.prim.req] p4
8080 // [...] A local parameter of a requires-expression shall not have a
8081 // default argument. [...]
8082 Diag(Loc: Param->getDefaultArgRange().getBegin(),
8083 DiagID: diag::err_requires_expr_local_parameter_default_argument);
8084 // Ignore default argument and move on
8085 } else if (Param->isExplicitObjectParameter()) {
8086 // C++23 [dcl.fct]p6:
8087 // An explicit-object-parameter-declaration is a parameter-declaration
8088 // with a this specifier. An explicit-object-parameter-declaration
8089 // shall appear only as the first parameter-declaration of a
8090 // parameter-declaration-list of either:
8091 // - a member-declarator that declares a member function, or
8092 // - a lambda-declarator.
8093 //
8094 // The parameter-declaration-list of a requires-expression is not such
8095 // a context.
8096 Diag(Loc: Param->getExplicitObjectParamThisLoc(),
8097 DiagID: diag::err_requires_expr_explicit_object_parameter);
8098 Param->setExplicitObjectParameterLoc(SourceLocation());
8099 }
8100
8101 Param->setDeclContext(Body);
8102 // If this has an identifier, add it to the scope stack.
8103 if (Param->getIdentifier()) {
8104 CheckShadow(S: BodyScope, D: Param);
8105 PushOnScopeChains(D: Param, S: BodyScope);
8106 }
8107 }
8108 return Body;
8109}
8110
8111void Sema::ActOnFinishRequiresExpr() {
8112 assert(CurContext && "DeclContext imbalance!");
8113 CurContext = CurContext->getLexicalParent();
8114 assert(CurContext && "Popped translation unit!");
8115}
8116
8117ExprResult Sema::ActOnRequiresExpr(
8118 SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body,
8119 SourceLocation LParenLoc, ArrayRef<ParmVarDecl *> LocalParameters,
8120 SourceLocation RParenLoc, ArrayRef<concepts::Requirement *> Requirements,
8121 SourceLocation ClosingBraceLoc) {
8122 auto *RE = RequiresExpr::Create(C&: Context, RequiresKWLoc, Body, LParenLoc,
8123 LocalParameters, RParenLoc, Requirements,
8124 RBraceLoc: ClosingBraceLoc);
8125 if (DiagnoseUnexpandedParameterPackInRequiresExpr(RE))
8126 return ExprError();
8127 return RE;
8128}
8129