1//===--- SemaExprCXX.cpp - Semantic Analysis for Expressions --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// Implements semantic analysis for C++ expressions.
11///
12//===----------------------------------------------------------------------===//
13
14#include "TreeTransform.h"
15#include "TypeLocBuilder.h"
16#include "clang/AST/ASTContext.h"
17#include "clang/AST/ASTLambda.h"
18#include "clang/AST/CXXInheritance.h"
19#include "clang/AST/CharUnits.h"
20#include "clang/AST/DeclCXX.h"
21#include "clang/AST/DeclObjC.h"
22#include "clang/AST/DynamicRecursiveASTVisitor.h"
23#include "clang/AST/ExprCXX.h"
24#include "clang/AST/ExprConcepts.h"
25#include "clang/AST/ExprObjC.h"
26#include "clang/AST/Type.h"
27#include "clang/AST/TypeLoc.h"
28#include "clang/Basic/AlignedAllocation.h"
29#include "clang/Basic/DiagnosticSema.h"
30#include "clang/Basic/PartialDiagnostic.h"
31#include "clang/Basic/TargetInfo.h"
32#include "clang/Basic/TokenKinds.h"
33#include "clang/Lex/Preprocessor.h"
34#include "clang/Sema/DeclSpec.h"
35#include "clang/Sema/EnterExpressionEvaluationContext.h"
36#include "clang/Sema/Initialization.h"
37#include "clang/Sema/Lookup.h"
38#include "clang/Sema/ParsedTemplate.h"
39#include "clang/Sema/Scope.h"
40#include "clang/Sema/ScopeInfo.h"
41#include "clang/Sema/SemaCUDA.h"
42#include "clang/Sema/SemaHLSL.h"
43#include "clang/Sema/SemaLambda.h"
44#include "clang/Sema/SemaObjC.h"
45#include "clang/Sema/SemaPPC.h"
46#include "clang/Sema/Template.h"
47#include "clang/Sema/TemplateDeduction.h"
48#include "llvm/ADT/APInt.h"
49#include "llvm/ADT/STLExtras.h"
50#include "llvm/ADT/StringExtras.h"
51#include "llvm/Support/ErrorHandling.h"
52#include "llvm/Support/TypeSize.h"
53#include <optional>
54using namespace clang;
55using namespace sema;
56
57ParsedType Sema::getInheritingConstructorName(CXXScopeSpec &SS,
58 SourceLocation NameLoc,
59 const IdentifierInfo &Name) {
60 NestedNameSpecifier *NNS = SS.getScopeRep();
61 if ([[maybe_unused]] const IdentifierInfo *II = NNS->getAsIdentifier())
62 assert(II == &Name && "not a constructor name");
63
64 QualType Type(NNS->translateToType(Context), 0);
65 // This reference to the type is located entirely at the location of the
66 // final identifier in the qualified-id.
67 return CreateParsedType(T: Type,
68 TInfo: Context.getTrivialTypeSourceInfo(T: Type, Loc: NameLoc));
69}
70
71ParsedType Sema::getConstructorName(const IdentifierInfo &II,
72 SourceLocation NameLoc, Scope *S,
73 CXXScopeSpec &SS, bool EnteringContext) {
74 CXXRecordDecl *CurClass = getCurrentClass(S, SS: &SS);
75 assert(CurClass && &II == CurClass->getIdentifier() &&
76 "not a constructor name");
77
78 // When naming a constructor as a member of a dependent context (eg, in a
79 // friend declaration or an inherited constructor declaration), form an
80 // unresolved "typename" type.
81 if (CurClass->isDependentContext() && !EnteringContext && SS.getScopeRep()) {
82 QualType T = Context.getDependentNameType(Keyword: ElaboratedTypeKeyword::None,
83 NNS: SS.getScopeRep(), Name: &II);
84 return ParsedType::make(P: T);
85 }
86
87 if (SS.isNotEmpty() && RequireCompleteDeclContext(SS, DC: CurClass))
88 return ParsedType();
89
90 // Find the injected-class-name declaration. Note that we make no attempt to
91 // diagnose cases where the injected-class-name is shadowed: the only
92 // declaration that can validly shadow the injected-class-name is a
93 // non-static data member, and if the class contains both a non-static data
94 // member and a constructor then it is ill-formed (we check that in
95 // CheckCompletedCXXClass).
96 CXXRecordDecl *InjectedClassName = nullptr;
97 for (NamedDecl *ND : CurClass->lookup(Name: &II)) {
98 auto *RD = dyn_cast<CXXRecordDecl>(Val: ND);
99 if (RD && RD->isInjectedClassName()) {
100 InjectedClassName = RD;
101 break;
102 }
103 }
104 if (!InjectedClassName) {
105 if (!CurClass->isInvalidDecl()) {
106 // FIXME: RequireCompleteDeclContext doesn't check dependent contexts
107 // properly. Work around it here for now.
108 Diag(Loc: SS.getLastQualifierNameLoc(),
109 DiagID: diag::err_incomplete_nested_name_spec) << CurClass << SS.getRange();
110 }
111 return ParsedType();
112 }
113
114 QualType T = Context.getTypeDeclType(Decl: InjectedClassName);
115 DiagnoseUseOfDecl(D: InjectedClassName, Locs: NameLoc);
116 MarkAnyDeclReferenced(Loc: NameLoc, D: InjectedClassName, /*OdrUse=*/MightBeOdrUse: false);
117
118 return ParsedType::make(P: T);
119}
120
121ParsedType Sema::getDestructorName(const IdentifierInfo &II,
122 SourceLocation NameLoc, Scope *S,
123 CXXScopeSpec &SS, ParsedType ObjectTypePtr,
124 bool EnteringContext) {
125 // Determine where to perform name lookup.
126
127 // FIXME: This area of the standard is very messy, and the current
128 // wording is rather unclear about which scopes we search for the
129 // destructor name; see core issues 399 and 555. Issue 399 in
130 // particular shows where the current description of destructor name
131 // lookup is completely out of line with existing practice, e.g.,
132 // this appears to be ill-formed:
133 //
134 // namespace N {
135 // template <typename T> struct S {
136 // ~S();
137 // };
138 // }
139 //
140 // void f(N::S<int>* s) {
141 // s->N::S<int>::~S();
142 // }
143 //
144 // See also PR6358 and PR6359.
145 //
146 // For now, we accept all the cases in which the name given could plausibly
147 // be interpreted as a correct destructor name, issuing off-by-default
148 // extension diagnostics on the cases that don't strictly conform to the
149 // C++20 rules. This basically means we always consider looking in the
150 // nested-name-specifier prefix, the complete nested-name-specifier, and
151 // the scope, and accept if we find the expected type in any of the three
152 // places.
153
154 if (SS.isInvalid())
155 return nullptr;
156
157 // Whether we've failed with a diagnostic already.
158 bool Failed = false;
159
160 llvm::SmallVector<NamedDecl*, 8> FoundDecls;
161 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 8> FoundDeclSet;
162
163 // If we have an object type, it's because we are in a
164 // pseudo-destructor-expression or a member access expression, and
165 // we know what type we're looking for.
166 QualType SearchType =
167 ObjectTypePtr ? GetTypeFromParser(Ty: ObjectTypePtr) : QualType();
168
169 auto CheckLookupResult = [&](LookupResult &Found) -> ParsedType {
170 auto IsAcceptableResult = [&](NamedDecl *D) -> bool {
171 auto *Type = dyn_cast<TypeDecl>(Val: D->getUnderlyingDecl());
172 if (!Type)
173 return false;
174
175 if (SearchType.isNull() || SearchType->isDependentType())
176 return true;
177
178 QualType T = Context.getTypeDeclType(Decl: Type);
179 return Context.hasSameUnqualifiedType(T1: T, T2: SearchType);
180 };
181
182 unsigned NumAcceptableResults = 0;
183 for (NamedDecl *D : Found) {
184 if (IsAcceptableResult(D))
185 ++NumAcceptableResults;
186
187 // Don't list a class twice in the lookup failure diagnostic if it's
188 // found by both its injected-class-name and by the name in the enclosing
189 // scope.
190 if (auto *RD = dyn_cast<CXXRecordDecl>(Val: D))
191 if (RD->isInjectedClassName())
192 D = cast<NamedDecl>(Val: RD->getParent());
193
194 if (FoundDeclSet.insert(Ptr: D).second)
195 FoundDecls.push_back(Elt: D);
196 }
197
198 // As an extension, attempt to "fix" an ambiguity by erasing all non-type
199 // results, and all non-matching results if we have a search type. It's not
200 // clear what the right behavior is if destructor lookup hits an ambiguity,
201 // but other compilers do generally accept at least some kinds of
202 // ambiguity.
203 if (Found.isAmbiguous() && NumAcceptableResults == 1) {
204 Diag(Loc: NameLoc, DiagID: diag::ext_dtor_name_ambiguous);
205 LookupResult::Filter F = Found.makeFilter();
206 while (F.hasNext()) {
207 NamedDecl *D = F.next();
208 if (auto *TD = dyn_cast<TypeDecl>(Val: D->getUnderlyingDecl()))
209 Diag(Loc: D->getLocation(), DiagID: diag::note_destructor_type_here)
210 << Context.getTypeDeclType(Decl: TD);
211 else
212 Diag(Loc: D->getLocation(), DiagID: diag::note_destructor_nontype_here);
213
214 if (!IsAcceptableResult(D))
215 F.erase();
216 }
217 F.done();
218 }
219
220 if (Found.isAmbiguous())
221 Failed = true;
222
223 if (TypeDecl *Type = Found.getAsSingle<TypeDecl>()) {
224 if (IsAcceptableResult(Type)) {
225 QualType T = Context.getTypeDeclType(Decl: Type);
226 MarkAnyDeclReferenced(Loc: Type->getLocation(), D: Type, /*OdrUse=*/MightBeOdrUse: false);
227 return CreateParsedType(
228 T: Context.getElaboratedType(Keyword: ElaboratedTypeKeyword::None, NNS: nullptr, NamedType: T),
229 TInfo: Context.getTrivialTypeSourceInfo(T, Loc: NameLoc));
230 }
231 }
232
233 return nullptr;
234 };
235
236 bool IsDependent = false;
237
238 auto LookupInObjectType = [&]() -> ParsedType {
239 if (Failed || SearchType.isNull())
240 return nullptr;
241
242 IsDependent |= SearchType->isDependentType();
243
244 LookupResult Found(*this, &II, NameLoc, LookupDestructorName);
245 DeclContext *LookupCtx = computeDeclContext(T: SearchType);
246 if (!LookupCtx)
247 return nullptr;
248 LookupQualifiedName(R&: Found, LookupCtx);
249 return CheckLookupResult(Found);
250 };
251
252 auto LookupInNestedNameSpec = [&](CXXScopeSpec &LookupSS) -> ParsedType {
253 if (Failed)
254 return nullptr;
255
256 IsDependent |= isDependentScopeSpecifier(SS: LookupSS);
257 DeclContext *LookupCtx = computeDeclContext(SS: LookupSS, EnteringContext);
258 if (!LookupCtx)
259 return nullptr;
260
261 LookupResult Found(*this, &II, NameLoc, LookupDestructorName);
262 if (RequireCompleteDeclContext(SS&: LookupSS, DC: LookupCtx)) {
263 Failed = true;
264 return nullptr;
265 }
266 LookupQualifiedName(R&: Found, LookupCtx);
267 return CheckLookupResult(Found);
268 };
269
270 auto LookupInScope = [&]() -> ParsedType {
271 if (Failed || !S)
272 return nullptr;
273
274 LookupResult Found(*this, &II, NameLoc, LookupDestructorName);
275 LookupName(R&: Found, S);
276 return CheckLookupResult(Found);
277 };
278
279 // C++2a [basic.lookup.qual]p6:
280 // In a qualified-id of the form
281 //
282 // nested-name-specifier[opt] type-name :: ~ type-name
283 //
284 // the second type-name is looked up in the same scope as the first.
285 //
286 // We interpret this as meaning that if you do a dual-scope lookup for the
287 // first name, you also do a dual-scope lookup for the second name, per
288 // C++ [basic.lookup.classref]p4:
289 //
290 // If the id-expression in a class member access is a qualified-id of the
291 // form
292 //
293 // class-name-or-namespace-name :: ...
294 //
295 // the class-name-or-namespace-name following the . or -> is first looked
296 // up in the class of the object expression and the name, if found, is used.
297 // Otherwise, it is looked up in the context of the entire
298 // postfix-expression.
299 //
300 // This looks in the same scopes as for an unqualified destructor name:
301 //
302 // C++ [basic.lookup.classref]p3:
303 // If the unqualified-id is ~ type-name, the type-name is looked up
304 // in the context of the entire postfix-expression. If the type T
305 // of the object expression is of a class type C, the type-name is
306 // also looked up in the scope of class C. At least one of the
307 // lookups shall find a name that refers to cv T.
308 //
309 // FIXME: The intent is unclear here. Should type-name::~type-name look in
310 // the scope anyway if it finds a non-matching name declared in the class?
311 // If both lookups succeed and find a dependent result, which result should
312 // we retain? (Same question for p->~type-name().)
313
314 if (NestedNameSpecifier *Prefix =
315 SS.isSet() ? SS.getScopeRep()->getPrefix() : nullptr) {
316 // This is
317 //
318 // nested-name-specifier type-name :: ~ type-name
319 //
320 // Look for the second type-name in the nested-name-specifier.
321 CXXScopeSpec PrefixSS;
322 PrefixSS.Adopt(Other: NestedNameSpecifierLoc(Prefix, SS.location_data()));
323 if (ParsedType T = LookupInNestedNameSpec(PrefixSS))
324 return T;
325 } else {
326 // This is one of
327 //
328 // type-name :: ~ type-name
329 // ~ type-name
330 //
331 // Look in the scope and (if any) the object type.
332 if (ParsedType T = LookupInScope())
333 return T;
334 if (ParsedType T = LookupInObjectType())
335 return T;
336 }
337
338 if (Failed)
339 return nullptr;
340
341 if (IsDependent) {
342 // We didn't find our type, but that's OK: it's dependent anyway.
343
344 // FIXME: What if we have no nested-name-specifier?
345 TypeSourceInfo *TSI = nullptr;
346 QualType T =
347 CheckTypenameType(Keyword: ElaboratedTypeKeyword::None, KeywordLoc: SourceLocation(),
348 QualifierLoc: SS.getWithLocInContext(Context), II, IILoc: NameLoc, TSI: &TSI,
349 /*DeducedTSTContext=*/true);
350 return CreateParsedType(T, TInfo: TSI);
351 }
352
353 // The remaining cases are all non-standard extensions imitating the behavior
354 // of various other compilers.
355 unsigned NumNonExtensionDecls = FoundDecls.size();
356
357 if (SS.isSet()) {
358 // For compatibility with older broken C++ rules and existing code,
359 //
360 // nested-name-specifier :: ~ type-name
361 //
362 // also looks for type-name within the nested-name-specifier.
363 if (ParsedType T = LookupInNestedNameSpec(SS)) {
364 Diag(Loc: SS.getEndLoc(), DiagID: diag::ext_dtor_named_in_wrong_scope)
365 << SS.getRange()
366 << FixItHint::CreateInsertion(InsertionLoc: SS.getEndLoc(),
367 Code: ("::" + II.getName()).str());
368 return T;
369 }
370
371 // For compatibility with other compilers and older versions of Clang,
372 //
373 // nested-name-specifier type-name :: ~ type-name
374 //
375 // also looks for type-name in the scope. Unfortunately, we can't
376 // reasonably apply this fallback for dependent nested-name-specifiers.
377 if (SS.isValid() && SS.getScopeRep()->getPrefix()) {
378 if (ParsedType T = LookupInScope()) {
379 Diag(Loc: SS.getEndLoc(), DiagID: diag::ext_qualified_dtor_named_in_lexical_scope)
380 << FixItHint::CreateRemoval(RemoveRange: SS.getRange());
381 Diag(Loc: FoundDecls.back()->getLocation(), DiagID: diag::note_destructor_type_here)
382 << GetTypeFromParser(Ty: T);
383 return T;
384 }
385 }
386 }
387
388 // We didn't find anything matching; tell the user what we did find (if
389 // anything).
390
391 // Don't tell the user about declarations we shouldn't have found.
392 FoundDecls.resize(N: NumNonExtensionDecls);
393
394 // List types before non-types.
395 llvm::stable_sort(Range&: FoundDecls, C: [](NamedDecl *A, NamedDecl *B) {
396 return isa<TypeDecl>(Val: A->getUnderlyingDecl()) >
397 isa<TypeDecl>(Val: B->getUnderlyingDecl());
398 });
399
400 // Suggest a fixit to properly name the destroyed type.
401 auto MakeFixItHint = [&]{
402 const CXXRecordDecl *Destroyed = nullptr;
403 // FIXME: If we have a scope specifier, suggest its last component?
404 if (!SearchType.isNull())
405 Destroyed = SearchType->getAsCXXRecordDecl();
406 else if (S)
407 Destroyed = dyn_cast_or_null<CXXRecordDecl>(Val: S->getEntity());
408 if (Destroyed)
409 return FixItHint::CreateReplacement(RemoveRange: SourceRange(NameLoc),
410 Code: Destroyed->getNameAsString());
411 return FixItHint();
412 };
413
414 if (FoundDecls.empty()) {
415 // FIXME: Attempt typo-correction?
416 Diag(Loc: NameLoc, DiagID: diag::err_undeclared_destructor_name)
417 << &II << MakeFixItHint();
418 } else if (!SearchType.isNull() && FoundDecls.size() == 1) {
419 if (auto *TD = dyn_cast<TypeDecl>(Val: FoundDecls[0]->getUnderlyingDecl())) {
420 assert(!SearchType.isNull() &&
421 "should only reject a type result if we have a search type");
422 QualType T = Context.getTypeDeclType(Decl: TD);
423 Diag(Loc: NameLoc, DiagID: diag::err_destructor_expr_type_mismatch)
424 << T << SearchType << MakeFixItHint();
425 } else {
426 Diag(Loc: NameLoc, DiagID: diag::err_destructor_expr_nontype)
427 << &II << MakeFixItHint();
428 }
429 } else {
430 Diag(Loc: NameLoc, DiagID: SearchType.isNull() ? diag::err_destructor_name_nontype
431 : diag::err_destructor_expr_mismatch)
432 << &II << SearchType << MakeFixItHint();
433 }
434
435 for (NamedDecl *FoundD : FoundDecls) {
436 if (auto *TD = dyn_cast<TypeDecl>(Val: FoundD->getUnderlyingDecl()))
437 Diag(Loc: FoundD->getLocation(), DiagID: diag::note_destructor_type_here)
438 << Context.getTypeDeclType(Decl: TD);
439 else
440 Diag(Loc: FoundD->getLocation(), DiagID: diag::note_destructor_nontype_here)
441 << FoundD;
442 }
443
444 return nullptr;
445}
446
447ParsedType Sema::getDestructorTypeForDecltype(const DeclSpec &DS,
448 ParsedType ObjectType) {
449 if (DS.getTypeSpecType() == DeclSpec::TST_error)
450 return nullptr;
451
452 if (DS.getTypeSpecType() == DeclSpec::TST_decltype_auto) {
453 Diag(Loc: DS.getTypeSpecTypeLoc(), DiagID: diag::err_decltype_auto_invalid);
454 return nullptr;
455 }
456
457 assert(DS.getTypeSpecType() == DeclSpec::TST_decltype &&
458 "unexpected type in getDestructorType");
459 QualType T = BuildDecltypeType(E: DS.getRepAsExpr());
460
461 // If we know the type of the object, check that the correct destructor
462 // type was named now; we can give better diagnostics this way.
463 QualType SearchType = GetTypeFromParser(Ty: ObjectType);
464 if (!SearchType.isNull() && !SearchType->isDependentType() &&
465 !Context.hasSameUnqualifiedType(T1: T, T2: SearchType)) {
466 Diag(Loc: DS.getTypeSpecTypeLoc(), DiagID: diag::err_destructor_expr_type_mismatch)
467 << T << SearchType;
468 return nullptr;
469 }
470
471 return ParsedType::make(P: T);
472}
473
474bool Sema::checkLiteralOperatorId(const CXXScopeSpec &SS,
475 const UnqualifiedId &Name, bool IsUDSuffix) {
476 assert(Name.getKind() == UnqualifiedIdKind::IK_LiteralOperatorId);
477 if (!IsUDSuffix) {
478 // [over.literal] p8
479 //
480 // double operator""_Bq(long double); // OK: not a reserved identifier
481 // double operator"" _Bq(long double); // ill-formed, no diagnostic required
482 const IdentifierInfo *II = Name.Identifier;
483 ReservedIdentifierStatus Status = II->isReserved(LangOpts: PP.getLangOpts());
484 SourceLocation Loc = Name.getEndLoc();
485
486 auto Hint = FixItHint::CreateReplacement(
487 RemoveRange: Name.getSourceRange(),
488 Code: (StringRef("operator\"\"") + II->getName()).str());
489
490 // Only emit this diagnostic if we start with an underscore, else the
491 // diagnostic for C++11 requiring a space between the quotes and the
492 // identifier conflicts with this and gets confusing. The diagnostic stating
493 // this is a reserved name should force the underscore, which gets this
494 // back.
495 if (II->isReservedLiteralSuffixId() !=
496 ReservedLiteralSuffixIdStatus::NotStartsWithUnderscore)
497 Diag(Loc, DiagID: diag::warn_deprecated_literal_operator_id) << II << Hint;
498
499 if (isReservedInAllContexts(Status))
500 Diag(Loc, DiagID: diag::warn_reserved_extern_symbol)
501 << II << static_cast<int>(Status) << Hint;
502 }
503
504 if (!SS.isValid())
505 return false;
506
507 switch (SS.getScopeRep()->getKind()) {
508 case NestedNameSpecifier::Identifier:
509 case NestedNameSpecifier::TypeSpec:
510 // Per C++11 [over.literal]p2, literal operators can only be declared at
511 // namespace scope. Therefore, this unqualified-id cannot name anything.
512 // Reject it early, because we have no AST representation for this in the
513 // case where the scope is dependent.
514 Diag(Loc: Name.getBeginLoc(), DiagID: diag::err_literal_operator_id_outside_namespace)
515 << SS.getScopeRep();
516 return true;
517
518 case NestedNameSpecifier::Global:
519 case NestedNameSpecifier::Super:
520 case NestedNameSpecifier::Namespace:
521 case NestedNameSpecifier::NamespaceAlias:
522 return false;
523 }
524
525 llvm_unreachable("unknown nested name specifier kind");
526}
527
528ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
529 SourceLocation TypeidLoc,
530 TypeSourceInfo *Operand,
531 SourceLocation RParenLoc) {
532 // C++ [expr.typeid]p4:
533 // The top-level cv-qualifiers of the lvalue expression or the type-id
534 // that is the operand of typeid are always ignored.
535 // If the type of the type-id is a class type or a reference to a class
536 // type, the class shall be completely-defined.
537 Qualifiers Quals;
538 QualType T
539 = Context.getUnqualifiedArrayType(T: Operand->getType().getNonReferenceType(),
540 Quals);
541 if (T->getAs<RecordType>() &&
542 RequireCompleteType(Loc: TypeidLoc, T, DiagID: diag::err_incomplete_typeid))
543 return ExprError();
544
545 if (T->isVariablyModifiedType())
546 return ExprError(Diag(Loc: TypeidLoc, DiagID: diag::err_variably_modified_typeid) << T);
547
548 if (CheckQualifiedFunctionForTypeId(T, Loc: TypeidLoc))
549 return ExprError();
550
551 return new (Context) CXXTypeidExpr(TypeInfoType.withConst(), Operand,
552 SourceRange(TypeidLoc, RParenLoc));
553}
554
555ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
556 SourceLocation TypeidLoc,
557 Expr *E,
558 SourceLocation RParenLoc) {
559 bool WasEvaluated = false;
560 if (E && !E->isTypeDependent()) {
561 if (E->hasPlaceholderType()) {
562 ExprResult result = CheckPlaceholderExpr(E);
563 if (result.isInvalid()) return ExprError();
564 E = result.get();
565 }
566
567 QualType T = E->getType();
568 if (const RecordType *RecordT = T->getAs<RecordType>()) {
569 CXXRecordDecl *RecordD = cast<CXXRecordDecl>(Val: RecordT->getDecl());
570 // C++ [expr.typeid]p3:
571 // [...] If the type of the expression is a class type, the class
572 // shall be completely-defined.
573 if (RequireCompleteType(Loc: TypeidLoc, T, DiagID: diag::err_incomplete_typeid))
574 return ExprError();
575
576 // C++ [expr.typeid]p3:
577 // When typeid is applied to an expression other than an glvalue of a
578 // polymorphic class type [...] [the] expression is an unevaluated
579 // operand. [...]
580 if (RecordD->isPolymorphic() && E->isGLValue()) {
581 if (isUnevaluatedContext()) {
582 // The operand was processed in unevaluated context, switch the
583 // context and recheck the subexpression.
584 ExprResult Result = TransformToPotentiallyEvaluated(E);
585 if (Result.isInvalid())
586 return ExprError();
587 E = Result.get();
588 }
589
590 // We require a vtable to query the type at run time.
591 MarkVTableUsed(Loc: TypeidLoc, Class: RecordD);
592 WasEvaluated = true;
593 }
594 }
595
596 ExprResult Result = CheckUnevaluatedOperand(E);
597 if (Result.isInvalid())
598 return ExprError();
599 E = Result.get();
600
601 // C++ [expr.typeid]p4:
602 // [...] If the type of the type-id is a reference to a possibly
603 // cv-qualified type, the result of the typeid expression refers to a
604 // std::type_info object representing the cv-unqualified referenced
605 // type.
606 Qualifiers Quals;
607 QualType UnqualT = Context.getUnqualifiedArrayType(T, Quals);
608 if (!Context.hasSameType(T1: T, T2: UnqualT)) {
609 T = UnqualT;
610 E = ImpCastExprToType(E, Type: UnqualT, CK: CK_NoOp, VK: E->getValueKind()).get();
611 }
612 }
613
614 if (E->getType()->isVariablyModifiedType())
615 return ExprError(Diag(Loc: TypeidLoc, DiagID: diag::err_variably_modified_typeid)
616 << E->getType());
617 else if (!inTemplateInstantiation() &&
618 E->HasSideEffects(Ctx: Context, IncludePossibleEffects: WasEvaluated)) {
619 // The expression operand for typeid is in an unevaluated expression
620 // context, so side effects could result in unintended consequences.
621 Diag(Loc: E->getExprLoc(), DiagID: WasEvaluated
622 ? diag::warn_side_effects_typeid
623 : diag::warn_side_effects_unevaluated_context);
624 }
625
626 return new (Context) CXXTypeidExpr(TypeInfoType.withConst(), E,
627 SourceRange(TypeidLoc, RParenLoc));
628}
629
630/// ActOnCXXTypeidOfType - Parse typeid( type-id ) or typeid (expression);
631ExprResult
632Sema::ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc,
633 bool isType, void *TyOrExpr, SourceLocation RParenLoc) {
634 // typeid is not supported in OpenCL.
635 if (getLangOpts().OpenCLCPlusPlus) {
636 return ExprError(Diag(Loc: OpLoc, DiagID: diag::err_openclcxx_not_supported)
637 << "typeid");
638 }
639
640 // Find the std::type_info type.
641 if (!getStdNamespace())
642 return ExprError(Diag(Loc: OpLoc, DiagID: diag::err_need_header_before_typeid));
643
644 if (!CXXTypeInfoDecl) {
645 IdentifierInfo *TypeInfoII = &PP.getIdentifierTable().get(Name: "type_info");
646 LookupResult R(*this, TypeInfoII, SourceLocation(), LookupTagName);
647 LookupQualifiedName(R, LookupCtx: getStdNamespace());
648 CXXTypeInfoDecl = R.getAsSingle<RecordDecl>();
649 // Microsoft's typeinfo doesn't have type_info in std but in the global
650 // namespace if _HAS_EXCEPTIONS is defined to 0. See PR13153.
651 if (!CXXTypeInfoDecl && LangOpts.MSVCCompat) {
652 LookupQualifiedName(R, LookupCtx: Context.getTranslationUnitDecl());
653 CXXTypeInfoDecl = R.getAsSingle<RecordDecl>();
654 }
655 if (!CXXTypeInfoDecl)
656 return ExprError(Diag(Loc: OpLoc, DiagID: diag::err_need_header_before_typeid));
657 }
658
659 if (!getLangOpts().RTTI) {
660 return ExprError(Diag(Loc: OpLoc, DiagID: diag::err_no_typeid_with_fno_rtti));
661 }
662
663 QualType TypeInfoType = Context.getTypeDeclType(Decl: CXXTypeInfoDecl);
664
665 if (isType) {
666 // The operand is a type; handle it as such.
667 TypeSourceInfo *TInfo = nullptr;
668 QualType T = GetTypeFromParser(Ty: ParsedType::getFromOpaquePtr(P: TyOrExpr),
669 TInfo: &TInfo);
670 if (T.isNull())
671 return ExprError();
672
673 if (!TInfo)
674 TInfo = Context.getTrivialTypeSourceInfo(T, Loc: OpLoc);
675
676 return BuildCXXTypeId(TypeInfoType, TypeidLoc: OpLoc, Operand: TInfo, RParenLoc);
677 }
678
679 // The operand is an expression.
680 ExprResult Result =
681 BuildCXXTypeId(TypeInfoType, TypeidLoc: OpLoc, E: (Expr *)TyOrExpr, RParenLoc);
682
683 if (!getLangOpts().RTTIData && !Result.isInvalid())
684 if (auto *CTE = dyn_cast<CXXTypeidExpr>(Val: Result.get()))
685 if (CTE->isPotentiallyEvaluated() && !CTE->isMostDerived(Context))
686 Diag(Loc: OpLoc, DiagID: diag::warn_no_typeid_with_rtti_disabled)
687 << (getDiagnostics().getDiagnosticOptions().getFormat() ==
688 DiagnosticOptions::MSVC);
689 return Result;
690}
691
692/// Grabs __declspec(uuid()) off a type, or returns 0 if we cannot resolve to
693/// a single GUID.
694static void
695getUuidAttrOfType(Sema &SemaRef, QualType QT,
696 llvm::SmallSetVector<const UuidAttr *, 1> &UuidAttrs) {
697 // Optionally remove one level of pointer, reference or array indirection.
698 const Type *Ty = QT.getTypePtr();
699 if (QT->isPointerOrReferenceType())
700 Ty = QT->getPointeeType().getTypePtr();
701 else if (QT->isArrayType())
702 Ty = Ty->getBaseElementTypeUnsafe();
703
704 const auto *TD = Ty->getAsTagDecl();
705 if (!TD)
706 return;
707
708 if (const auto *Uuid = TD->getMostRecentDecl()->getAttr<UuidAttr>()) {
709 UuidAttrs.insert(X: Uuid);
710 return;
711 }
712
713 // __uuidof can grab UUIDs from template arguments.
714 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(Val: TD)) {
715 const TemplateArgumentList &TAL = CTSD->getTemplateArgs();
716 for (const TemplateArgument &TA : TAL.asArray()) {
717 const UuidAttr *UuidForTA = nullptr;
718 if (TA.getKind() == TemplateArgument::Type)
719 getUuidAttrOfType(SemaRef, QT: TA.getAsType(), UuidAttrs);
720 else if (TA.getKind() == TemplateArgument::Declaration)
721 getUuidAttrOfType(SemaRef, QT: TA.getAsDecl()->getType(), UuidAttrs);
722
723 if (UuidForTA)
724 UuidAttrs.insert(X: UuidForTA);
725 }
726 }
727}
728
729ExprResult Sema::BuildCXXUuidof(QualType Type,
730 SourceLocation TypeidLoc,
731 TypeSourceInfo *Operand,
732 SourceLocation RParenLoc) {
733 MSGuidDecl *Guid = nullptr;
734 if (!Operand->getType()->isDependentType()) {
735 llvm::SmallSetVector<const UuidAttr *, 1> UuidAttrs;
736 getUuidAttrOfType(SemaRef&: *this, QT: Operand->getType(), UuidAttrs);
737 if (UuidAttrs.empty())
738 return ExprError(Diag(Loc: TypeidLoc, DiagID: diag::err_uuidof_without_guid));
739 if (UuidAttrs.size() > 1)
740 return ExprError(Diag(Loc: TypeidLoc, DiagID: diag::err_uuidof_with_multiple_guids));
741 Guid = UuidAttrs.back()->getGuidDecl();
742 }
743
744 return new (Context)
745 CXXUuidofExpr(Type, Operand, Guid, SourceRange(TypeidLoc, RParenLoc));
746}
747
748ExprResult Sema::BuildCXXUuidof(QualType Type, SourceLocation TypeidLoc,
749 Expr *E, SourceLocation RParenLoc) {
750 MSGuidDecl *Guid = nullptr;
751 if (!E->getType()->isDependentType()) {
752 if (E->isNullPointerConstant(Ctx&: Context, NPC: Expr::NPC_ValueDependentIsNull)) {
753 // A null pointer results in {00000000-0000-0000-0000-000000000000}.
754 Guid = Context.getMSGuidDecl(Parts: MSGuidDecl::Parts{});
755 } else {
756 llvm::SmallSetVector<const UuidAttr *, 1> UuidAttrs;
757 getUuidAttrOfType(SemaRef&: *this, QT: E->getType(), UuidAttrs);
758 if (UuidAttrs.empty())
759 return ExprError(Diag(Loc: TypeidLoc, DiagID: diag::err_uuidof_without_guid));
760 if (UuidAttrs.size() > 1)
761 return ExprError(Diag(Loc: TypeidLoc, DiagID: diag::err_uuidof_with_multiple_guids));
762 Guid = UuidAttrs.back()->getGuidDecl();
763 }
764 }
765
766 return new (Context)
767 CXXUuidofExpr(Type, E, Guid, SourceRange(TypeidLoc, RParenLoc));
768}
769
770/// ActOnCXXUuidof - Parse __uuidof( type-id ) or __uuidof (expression);
771ExprResult
772Sema::ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc,
773 bool isType, void *TyOrExpr, SourceLocation RParenLoc) {
774 QualType GuidType = Context.getMSGuidType();
775 GuidType.addConst();
776
777 if (isType) {
778 // The operand is a type; handle it as such.
779 TypeSourceInfo *TInfo = nullptr;
780 QualType T = GetTypeFromParser(Ty: ParsedType::getFromOpaquePtr(P: TyOrExpr),
781 TInfo: &TInfo);
782 if (T.isNull())
783 return ExprError();
784
785 if (!TInfo)
786 TInfo = Context.getTrivialTypeSourceInfo(T, Loc: OpLoc);
787
788 return BuildCXXUuidof(Type: GuidType, TypeidLoc: OpLoc, Operand: TInfo, RParenLoc);
789 }
790
791 // The operand is an expression.
792 return BuildCXXUuidof(Type: GuidType, TypeidLoc: OpLoc, E: (Expr*)TyOrExpr, RParenLoc);
793}
794
795ExprResult
796Sema::ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind) {
797 assert((Kind == tok::kw_true || Kind == tok::kw_false) &&
798 "Unknown C++ Boolean value!");
799 return new (Context)
800 CXXBoolLiteralExpr(Kind == tok::kw_true, Context.BoolTy, OpLoc);
801}
802
803ExprResult
804Sema::ActOnCXXNullPtrLiteral(SourceLocation Loc) {
805 return new (Context) CXXNullPtrLiteralExpr(Context.NullPtrTy, Loc);
806}
807
808ExprResult
809Sema::ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *Ex) {
810 bool IsThrownVarInScope = false;
811 if (Ex) {
812 // C++0x [class.copymove]p31:
813 // When certain criteria are met, an implementation is allowed to omit the
814 // copy/move construction of a class object [...]
815 //
816 // - in a throw-expression, when the operand is the name of a
817 // non-volatile automatic object (other than a function or catch-
818 // clause parameter) whose scope does not extend beyond the end of the
819 // innermost enclosing try-block (if there is one), the copy/move
820 // operation from the operand to the exception object (15.1) can be
821 // omitted by constructing the automatic object directly into the
822 // exception object
823 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: Ex->IgnoreParens()))
824 if (const auto *Var = dyn_cast<VarDecl>(Val: DRE->getDecl());
825 Var && Var->hasLocalStorage() &&
826 !Var->getType().isVolatileQualified()) {
827 for (; S; S = S->getParent()) {
828 if (S->isDeclScope(D: Var)) {
829 IsThrownVarInScope = true;
830 break;
831 }
832
833 // FIXME: Many of the scope checks here seem incorrect.
834 if (S->getFlags() &
835 (Scope::FnScope | Scope::ClassScope | Scope::BlockScope |
836 Scope::ObjCMethodScope | Scope::TryScope))
837 break;
838 }
839 }
840 }
841
842 return BuildCXXThrow(OpLoc, Ex, IsThrownVarInScope);
843}
844
845ExprResult Sema::BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
846 bool IsThrownVarInScope) {
847 const llvm::Triple &T = Context.getTargetInfo().getTriple();
848 const bool IsOpenMPGPUTarget =
849 getLangOpts().OpenMPIsTargetDevice && (T.isNVPTX() || T.isAMDGCN());
850
851 DiagnoseExceptionUse(Loc: OpLoc, /* IsTry= */ false);
852
853 // In OpenMP target regions, we replace 'throw' with a trap on GPU targets.
854 if (IsOpenMPGPUTarget)
855 targetDiag(Loc: OpLoc, DiagID: diag::warn_throw_not_valid_on_target) << T.str();
856
857 // Exceptions aren't allowed in CUDA device code.
858 if (getLangOpts().CUDA)
859 CUDA().DiagIfDeviceCode(Loc: OpLoc, DiagID: diag::err_cuda_device_exceptions)
860 << "throw" << CUDA().CurrentTarget();
861
862 if (getCurScope() && getCurScope()->isOpenMPSimdDirectiveScope())
863 Diag(Loc: OpLoc, DiagID: diag::err_omp_simd_region_cannot_use_stmt) << "throw";
864
865 // Exceptions that escape a compute construct are ill-formed.
866 if (getLangOpts().OpenACC && getCurScope() &&
867 getCurScope()->isInOpenACCComputeConstructScope(Flags: Scope::TryScope))
868 Diag(Loc: OpLoc, DiagID: diag::err_acc_branch_in_out_compute_construct)
869 << /*throw*/ 2 << /*out of*/ 0;
870
871 if (Ex && !Ex->isTypeDependent()) {
872 // Initialize the exception result. This implicitly weeds out
873 // abstract types or types with inaccessible copy constructors.
874
875 // C++0x [class.copymove]p31:
876 // When certain criteria are met, an implementation is allowed to omit the
877 // copy/move construction of a class object [...]
878 //
879 // - in a throw-expression, when the operand is the name of a
880 // non-volatile automatic object (other than a function or
881 // catch-clause
882 // parameter) whose scope does not extend beyond the end of the
883 // innermost enclosing try-block (if there is one), the copy/move
884 // operation from the operand to the exception object (15.1) can be
885 // omitted by constructing the automatic object directly into the
886 // exception object
887 NamedReturnInfo NRInfo =
888 IsThrownVarInScope ? getNamedReturnInfo(E&: Ex) : NamedReturnInfo();
889
890 QualType ExceptionObjectTy = Context.getExceptionObjectType(T: Ex->getType());
891 if (CheckCXXThrowOperand(ThrowLoc: OpLoc, ThrowTy: ExceptionObjectTy, E: Ex))
892 return ExprError();
893
894 InitializedEntity Entity =
895 InitializedEntity::InitializeException(ThrowLoc: OpLoc, Type: ExceptionObjectTy);
896 ExprResult Res = PerformMoveOrCopyInitialization(Entity, NRInfo, Value: Ex);
897 if (Res.isInvalid())
898 return ExprError();
899 Ex = Res.get();
900 }
901
902 // PPC MMA non-pointer types are not allowed as throw expr types.
903 if (Ex && Context.getTargetInfo().getTriple().isPPC64())
904 PPC().CheckPPCMMAType(Type: Ex->getType(), TypeLoc: Ex->getBeginLoc());
905
906 return new (Context)
907 CXXThrowExpr(Ex, Context.VoidTy, OpLoc, IsThrownVarInScope);
908}
909
910static void
911collectPublicBases(CXXRecordDecl *RD,
912 llvm::DenseMap<CXXRecordDecl *, unsigned> &SubobjectsSeen,
913 llvm::SmallPtrSetImpl<CXXRecordDecl *> &VBases,
914 llvm::SetVector<CXXRecordDecl *> &PublicSubobjectsSeen,
915 bool ParentIsPublic) {
916 for (const CXXBaseSpecifier &BS : RD->bases()) {
917 CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl();
918 bool NewSubobject;
919 // Virtual bases constitute the same subobject. Non-virtual bases are
920 // always distinct subobjects.
921 if (BS.isVirtual())
922 NewSubobject = VBases.insert(Ptr: BaseDecl).second;
923 else
924 NewSubobject = true;
925
926 if (NewSubobject)
927 ++SubobjectsSeen[BaseDecl];
928
929 // Only add subobjects which have public access throughout the entire chain.
930 bool PublicPath = ParentIsPublic && BS.getAccessSpecifier() == AS_public;
931 if (PublicPath)
932 PublicSubobjectsSeen.insert(X: BaseDecl);
933
934 // Recurse on to each base subobject.
935 collectPublicBases(RD: BaseDecl, SubobjectsSeen, VBases, PublicSubobjectsSeen,
936 ParentIsPublic: PublicPath);
937 }
938}
939
940static void getUnambiguousPublicSubobjects(
941 CXXRecordDecl *RD, llvm::SmallVectorImpl<CXXRecordDecl *> &Objects) {
942 llvm::DenseMap<CXXRecordDecl *, unsigned> SubobjectsSeen;
943 llvm::SmallSet<CXXRecordDecl *, 2> VBases;
944 llvm::SetVector<CXXRecordDecl *> PublicSubobjectsSeen;
945 SubobjectsSeen[RD] = 1;
946 PublicSubobjectsSeen.insert(X: RD);
947 collectPublicBases(RD, SubobjectsSeen, VBases, PublicSubobjectsSeen,
948 /*ParentIsPublic=*/true);
949
950 for (CXXRecordDecl *PublicSubobject : PublicSubobjectsSeen) {
951 // Skip ambiguous objects.
952 if (SubobjectsSeen[PublicSubobject] > 1)
953 continue;
954
955 Objects.push_back(Elt: PublicSubobject);
956 }
957}
958
959bool Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc,
960 QualType ExceptionObjectTy, Expr *E) {
961 // If the type of the exception would be an incomplete type or a pointer
962 // to an incomplete type other than (cv) void the program is ill-formed.
963 QualType Ty = ExceptionObjectTy;
964 bool isPointer = false;
965 if (const PointerType* Ptr = Ty->getAs<PointerType>()) {
966 Ty = Ptr->getPointeeType();
967 isPointer = true;
968 }
969
970 // Cannot throw WebAssembly reference type.
971 if (Ty.isWebAssemblyReferenceType()) {
972 Diag(Loc: ThrowLoc, DiagID: diag::err_wasm_reftype_tc) << 0 << E->getSourceRange();
973 return true;
974 }
975
976 // Cannot throw WebAssembly table.
977 if (isPointer && Ty.isWebAssemblyReferenceType()) {
978 Diag(Loc: ThrowLoc, DiagID: diag::err_wasm_table_art) << 2 << E->getSourceRange();
979 return true;
980 }
981
982 if (!isPointer || !Ty->isVoidType()) {
983 if (RequireCompleteType(Loc: ThrowLoc, T: Ty,
984 DiagID: isPointer ? diag::err_throw_incomplete_ptr
985 : diag::err_throw_incomplete,
986 Args: E->getSourceRange()))
987 return true;
988
989 if (!isPointer && Ty->isSizelessType()) {
990 Diag(Loc: ThrowLoc, DiagID: diag::err_throw_sizeless) << Ty << E->getSourceRange();
991 return true;
992 }
993
994 if (RequireNonAbstractType(Loc: ThrowLoc, T: ExceptionObjectTy,
995 DiagID: diag::err_throw_abstract_type, Args: E))
996 return true;
997 }
998
999 // If the exception has class type, we need additional handling.
1000 CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
1001 if (!RD)
1002 return false;
1003
1004 // If we are throwing a polymorphic class type or pointer thereof,
1005 // exception handling will make use of the vtable.
1006 MarkVTableUsed(Loc: ThrowLoc, Class: RD);
1007
1008 // If a pointer is thrown, the referenced object will not be destroyed.
1009 if (isPointer)
1010 return false;
1011
1012 // If the class has a destructor, we must be able to call it.
1013 if (!RD->hasIrrelevantDestructor()) {
1014 if (CXXDestructorDecl *Destructor = LookupDestructor(Class: RD)) {
1015 MarkFunctionReferenced(Loc: E->getExprLoc(), Func: Destructor);
1016 CheckDestructorAccess(Loc: E->getExprLoc(), Dtor: Destructor,
1017 PDiag: PDiag(DiagID: diag::err_access_dtor_exception) << Ty);
1018 if (DiagnoseUseOfDecl(D: Destructor, Locs: E->getExprLoc()))
1019 return true;
1020 }
1021 }
1022
1023 // The MSVC ABI creates a list of all types which can catch the exception
1024 // object. This list also references the appropriate copy constructor to call
1025 // if the object is caught by value and has a non-trivial copy constructor.
1026 if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
1027 // We are only interested in the public, unambiguous bases contained within
1028 // the exception object. Bases which are ambiguous or otherwise
1029 // inaccessible are not catchable types.
1030 llvm::SmallVector<CXXRecordDecl *, 2> UnambiguousPublicSubobjects;
1031 getUnambiguousPublicSubobjects(RD, Objects&: UnambiguousPublicSubobjects);
1032
1033 for (CXXRecordDecl *Subobject : UnambiguousPublicSubobjects) {
1034 // Attempt to lookup the copy constructor. Various pieces of machinery
1035 // will spring into action, like template instantiation, which means this
1036 // cannot be a simple walk of the class's decls. Instead, we must perform
1037 // lookup and overload resolution.
1038 CXXConstructorDecl *CD = LookupCopyingConstructor(Class: Subobject, Quals: 0);
1039 if (!CD || CD->isDeleted())
1040 continue;
1041
1042 // Mark the constructor referenced as it is used by this throw expression.
1043 MarkFunctionReferenced(Loc: E->getExprLoc(), Func: CD);
1044
1045 // Skip this copy constructor if it is trivial, we don't need to record it
1046 // in the catchable type data.
1047 if (CD->isTrivial())
1048 continue;
1049
1050 // The copy constructor is non-trivial, create a mapping from this class
1051 // type to this constructor.
1052 // N.B. The selection of copy constructor is not sensitive to this
1053 // particular throw-site. Lookup will be performed at the catch-site to
1054 // ensure that the copy constructor is, in fact, accessible (via
1055 // friendship or any other means).
1056 Context.addCopyConstructorForExceptionObject(RD: Subobject, CD);
1057
1058 // We don't keep the instantiated default argument expressions around so
1059 // we must rebuild them here.
1060 for (unsigned I = 1, E = CD->getNumParams(); I != E; ++I) {
1061 if (CheckCXXDefaultArgExpr(CallLoc: ThrowLoc, FD: CD, Param: CD->getParamDecl(i: I)))
1062 return true;
1063 }
1064 }
1065 }
1066
1067 // Under the Itanium C++ ABI, memory for the exception object is allocated by
1068 // the runtime with no ability for the compiler to request additional
1069 // alignment. Warn if the exception type requires alignment beyond the minimum
1070 // guaranteed by the target C++ runtime.
1071 if (Context.getTargetInfo().getCXXABI().isItaniumFamily()) {
1072 CharUnits TypeAlign = Context.getTypeAlignInChars(T: Ty);
1073 CharUnits ExnObjAlign = Context.getExnObjectAlignment();
1074 if (ExnObjAlign < TypeAlign) {
1075 Diag(Loc: ThrowLoc, DiagID: diag::warn_throw_underaligned_obj);
1076 Diag(Loc: ThrowLoc, DiagID: diag::note_throw_underaligned_obj)
1077 << Ty << (unsigned)TypeAlign.getQuantity()
1078 << (unsigned)ExnObjAlign.getQuantity();
1079 }
1080 }
1081 if (!isPointer && getLangOpts().AssumeNothrowExceptionDtor) {
1082 if (CXXDestructorDecl *Dtor = RD->getDestructor()) {
1083 auto Ty = Dtor->getType();
1084 if (auto *FT = Ty.getTypePtr()->getAs<FunctionProtoType>()) {
1085 if (!isUnresolvedExceptionSpec(ESpecType: FT->getExceptionSpecType()) &&
1086 !FT->isNothrow())
1087 Diag(Loc: ThrowLoc, DiagID: diag::err_throw_object_throwing_dtor) << RD;
1088 }
1089 }
1090 }
1091
1092 return false;
1093}
1094
1095static QualType adjustCVQualifiersForCXXThisWithinLambda(
1096 ArrayRef<FunctionScopeInfo *> FunctionScopes, QualType ThisTy,
1097 DeclContext *CurSemaContext, ASTContext &ASTCtx) {
1098
1099 QualType ClassType = ThisTy->getPointeeType();
1100 LambdaScopeInfo *CurLSI = nullptr;
1101 DeclContext *CurDC = CurSemaContext;
1102
1103 // Iterate through the stack of lambdas starting from the innermost lambda to
1104 // the outermost lambda, checking if '*this' is ever captured by copy - since
1105 // that could change the cv-qualifiers of the '*this' object.
1106 // The object referred to by '*this' starts out with the cv-qualifiers of its
1107 // member function. We then start with the innermost lambda and iterate
1108 // outward checking to see if any lambda performs a by-copy capture of '*this'
1109 // - and if so, any nested lambda must respect the 'constness' of that
1110 // capturing lamdbda's call operator.
1111 //
1112
1113 // Since the FunctionScopeInfo stack is representative of the lexical
1114 // nesting of the lambda expressions during initial parsing (and is the best
1115 // place for querying information about captures about lambdas that are
1116 // partially processed) and perhaps during instantiation of function templates
1117 // that contain lambda expressions that need to be transformed BUT not
1118 // necessarily during instantiation of a nested generic lambda's function call
1119 // operator (which might even be instantiated at the end of the TU) - at which
1120 // time the DeclContext tree is mature enough to query capture information
1121 // reliably - we use a two pronged approach to walk through all the lexically
1122 // enclosing lambda expressions:
1123 //
1124 // 1) Climb down the FunctionScopeInfo stack as long as each item represents
1125 // a Lambda (i.e. LambdaScopeInfo) AND each LSI's 'closure-type' is lexically
1126 // enclosed by the call-operator of the LSI below it on the stack (while
1127 // tracking the enclosing DC for step 2 if needed). Note the topmost LSI on
1128 // the stack represents the innermost lambda.
1129 //
1130 // 2) If we run out of enclosing LSI's, check if the enclosing DeclContext
1131 // represents a lambda's call operator. If it does, we must be instantiating
1132 // a generic lambda's call operator (represented by the Current LSI, and
1133 // should be the only scenario where an inconsistency between the LSI and the
1134 // DeclContext should occur), so climb out the DeclContexts if they
1135 // represent lambdas, while querying the corresponding closure types
1136 // regarding capture information.
1137
1138 // 1) Climb down the function scope info stack.
1139 for (int I = FunctionScopes.size();
1140 I-- && isa<LambdaScopeInfo>(Val: FunctionScopes[I]) &&
1141 (!CurLSI || !CurLSI->Lambda || CurLSI->Lambda->getDeclContext() ==
1142 cast<LambdaScopeInfo>(Val: FunctionScopes[I])->CallOperator);
1143 CurDC = getLambdaAwareParentOfDeclContext(DC: CurDC)) {
1144 CurLSI = cast<LambdaScopeInfo>(Val: FunctionScopes[I]);
1145
1146 if (!CurLSI->isCXXThisCaptured())
1147 continue;
1148
1149 auto C = CurLSI->getCXXThisCapture();
1150
1151 if (C.isCopyCapture()) {
1152 if (CurLSI->lambdaCaptureShouldBeConst())
1153 ClassType.addConst();
1154 return ASTCtx.getPointerType(T: ClassType);
1155 }
1156 }
1157
1158 // 2) We've run out of ScopeInfos but check 1. if CurDC is a lambda (which
1159 // can happen during instantiation of its nested generic lambda call
1160 // operator); 2. if we're in a lambda scope (lambda body).
1161 if (CurLSI && isLambdaCallOperator(DC: CurDC)) {
1162 assert(isGenericLambdaCallOperatorSpecialization(CurLSI->CallOperator) &&
1163 "While computing 'this' capture-type for a generic lambda, when we "
1164 "run out of enclosing LSI's, yet the enclosing DC is a "
1165 "lambda-call-operator we must be (i.e. Current LSI) in a generic "
1166 "lambda call oeprator");
1167 assert(CurDC == getLambdaAwareParentOfDeclContext(CurLSI->CallOperator));
1168
1169 auto IsThisCaptured =
1170 [](CXXRecordDecl *Closure, bool &IsByCopy, bool &IsConst) {
1171 IsConst = false;
1172 IsByCopy = false;
1173 for (auto &&C : Closure->captures()) {
1174 if (C.capturesThis()) {
1175 if (C.getCaptureKind() == LCK_StarThis)
1176 IsByCopy = true;
1177 if (Closure->getLambdaCallOperator()->isConst())
1178 IsConst = true;
1179 return true;
1180 }
1181 }
1182 return false;
1183 };
1184
1185 bool IsByCopyCapture = false;
1186 bool IsConstCapture = false;
1187 CXXRecordDecl *Closure = cast<CXXRecordDecl>(Val: CurDC->getParent());
1188 while (Closure &&
1189 IsThisCaptured(Closure, IsByCopyCapture, IsConstCapture)) {
1190 if (IsByCopyCapture) {
1191 if (IsConstCapture)
1192 ClassType.addConst();
1193 return ASTCtx.getPointerType(T: ClassType);
1194 }
1195 Closure = isLambdaCallOperator(DC: Closure->getParent())
1196 ? cast<CXXRecordDecl>(Val: Closure->getParent()->getParent())
1197 : nullptr;
1198 }
1199 }
1200 return ThisTy;
1201}
1202
1203QualType Sema::getCurrentThisType() {
1204 DeclContext *DC = getFunctionLevelDeclContext();
1205 QualType ThisTy = CXXThisTypeOverride;
1206
1207 if (CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(Val: DC)) {
1208 if (method && method->isImplicitObjectMemberFunction())
1209 ThisTy = method->getThisType().getNonReferenceType();
1210 }
1211
1212 if (ThisTy.isNull() && isLambdaCallWithImplicitObjectParameter(DC: CurContext) &&
1213 inTemplateInstantiation() && isa<CXXRecordDecl>(Val: DC)) {
1214
1215 // This is a lambda call operator that is being instantiated as a default
1216 // initializer. DC must point to the enclosing class type, so we can recover
1217 // the 'this' type from it.
1218 QualType ClassTy = Context.getTypeDeclType(Decl: cast<CXXRecordDecl>(Val: DC));
1219 // There are no cv-qualifiers for 'this' within default initializers,
1220 // per [expr.prim.general]p4.
1221 ThisTy = Context.getPointerType(T: ClassTy);
1222 }
1223
1224 // If we are within a lambda's call operator, the cv-qualifiers of 'this'
1225 // might need to be adjusted if the lambda or any of its enclosing lambda's
1226 // captures '*this' by copy.
1227 if (!ThisTy.isNull() && isLambdaCallOperator(DC: CurContext))
1228 return adjustCVQualifiersForCXXThisWithinLambda(FunctionScopes, ThisTy,
1229 CurSemaContext: CurContext, ASTCtx&: Context);
1230 return ThisTy;
1231}
1232
1233Sema::CXXThisScopeRAII::CXXThisScopeRAII(Sema &S,
1234 Decl *ContextDecl,
1235 Qualifiers CXXThisTypeQuals,
1236 bool Enabled)
1237 : S(S), OldCXXThisTypeOverride(S.CXXThisTypeOverride), Enabled(false)
1238{
1239 if (!Enabled || !ContextDecl)
1240 return;
1241
1242 CXXRecordDecl *Record = nullptr;
1243 if (ClassTemplateDecl *Template = dyn_cast<ClassTemplateDecl>(Val: ContextDecl))
1244 Record = Template->getTemplatedDecl();
1245 else
1246 Record = cast<CXXRecordDecl>(Val: ContextDecl);
1247
1248 QualType T = S.Context.getRecordType(Decl: Record);
1249 T = S.getASTContext().getQualifiedType(T, Qs: CXXThisTypeQuals);
1250
1251 S.CXXThisTypeOverride =
1252 S.Context.getLangOpts().HLSL ? T : S.Context.getPointerType(T);
1253
1254 this->Enabled = true;
1255}
1256
1257
1258Sema::CXXThisScopeRAII::~CXXThisScopeRAII() {
1259 if (Enabled) {
1260 S.CXXThisTypeOverride = OldCXXThisTypeOverride;
1261 }
1262}
1263
1264static void buildLambdaThisCaptureFixit(Sema &Sema, LambdaScopeInfo *LSI) {
1265 SourceLocation DiagLoc = LSI->IntroducerRange.getEnd();
1266 assert(!LSI->isCXXThisCaptured());
1267 // [=, this] {}; // until C++20: Error: this when = is the default
1268 if (LSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByval &&
1269 !Sema.getLangOpts().CPlusPlus20)
1270 return;
1271 Sema.Diag(Loc: DiagLoc, DiagID: diag::note_lambda_this_capture_fixit)
1272 << FixItHint::CreateInsertion(
1273 InsertionLoc: DiagLoc, Code: LSI->NumExplicitCaptures > 0 ? ", this" : "this");
1274}
1275
1276bool Sema::CheckCXXThisCapture(SourceLocation Loc, const bool Explicit,
1277 bool BuildAndDiagnose, const unsigned *const FunctionScopeIndexToStopAt,
1278 const bool ByCopy) {
1279 // We don't need to capture this in an unevaluated context.
1280 if (isUnevaluatedContext() && !Explicit)
1281 return true;
1282
1283 assert((!ByCopy || Explicit) && "cannot implicitly capture *this by value");
1284
1285 const int MaxFunctionScopesIndex = FunctionScopeIndexToStopAt
1286 ? *FunctionScopeIndexToStopAt
1287 : FunctionScopes.size() - 1;
1288
1289 // Check that we can capture the *enclosing object* (referred to by '*this')
1290 // by the capturing-entity/closure (lambda/block/etc) at
1291 // MaxFunctionScopesIndex-deep on the FunctionScopes stack.
1292
1293 // Note: The *enclosing object* can only be captured by-value by a
1294 // closure that is a lambda, using the explicit notation:
1295 // [*this] { ... }.
1296 // Every other capture of the *enclosing object* results in its by-reference
1297 // capture.
1298
1299 // For a closure 'L' (at MaxFunctionScopesIndex in the FunctionScopes
1300 // stack), we can capture the *enclosing object* only if:
1301 // - 'L' has an explicit byref or byval capture of the *enclosing object*
1302 // - or, 'L' has an implicit capture.
1303 // AND
1304 // -- there is no enclosing closure
1305 // -- or, there is some enclosing closure 'E' that has already captured the
1306 // *enclosing object*, and every intervening closure (if any) between 'E'
1307 // and 'L' can implicitly capture the *enclosing object*.
1308 // -- or, every enclosing closure can implicitly capture the
1309 // *enclosing object*
1310
1311
1312 unsigned NumCapturingClosures = 0;
1313 for (int idx = MaxFunctionScopesIndex; idx >= 0; idx--) {
1314 if (CapturingScopeInfo *CSI =
1315 dyn_cast<CapturingScopeInfo>(Val: FunctionScopes[idx])) {
1316 if (CSI->CXXThisCaptureIndex != 0) {
1317 // 'this' is already being captured; there isn't anything more to do.
1318 CSI->Captures[CSI->CXXThisCaptureIndex - 1].markUsed(IsODRUse: BuildAndDiagnose);
1319 break;
1320 }
1321 LambdaScopeInfo *LSI = dyn_cast<LambdaScopeInfo>(Val: CSI);
1322 if (LSI && isGenericLambdaCallOperatorSpecialization(MD: LSI->CallOperator)) {
1323 // This context can't implicitly capture 'this'; fail out.
1324 if (BuildAndDiagnose) {
1325 LSI->CallOperator->setInvalidDecl();
1326 Diag(Loc, DiagID: diag::err_this_capture)
1327 << (Explicit && idx == MaxFunctionScopesIndex);
1328 if (!Explicit)
1329 buildLambdaThisCaptureFixit(Sema&: *this, LSI);
1330 }
1331 return true;
1332 }
1333 if (CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByref ||
1334 CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByval ||
1335 CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_Block ||
1336 CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_CapturedRegion ||
1337 (Explicit && idx == MaxFunctionScopesIndex)) {
1338 // Regarding (Explicit && idx == MaxFunctionScopesIndex): only the first
1339 // iteration through can be an explicit capture, all enclosing closures,
1340 // if any, must perform implicit captures.
1341
1342 // This closure can capture 'this'; continue looking upwards.
1343 NumCapturingClosures++;
1344 continue;
1345 }
1346 // This context can't implicitly capture 'this'; fail out.
1347 if (BuildAndDiagnose) {
1348 LSI->CallOperator->setInvalidDecl();
1349 Diag(Loc, DiagID: diag::err_this_capture)
1350 << (Explicit && idx == MaxFunctionScopesIndex);
1351 }
1352 if (!Explicit)
1353 buildLambdaThisCaptureFixit(Sema&: *this, LSI);
1354 return true;
1355 }
1356 break;
1357 }
1358 if (!BuildAndDiagnose) return false;
1359
1360 // If we got here, then the closure at MaxFunctionScopesIndex on the
1361 // FunctionScopes stack, can capture the *enclosing object*, so capture it
1362 // (including implicit by-reference captures in any enclosing closures).
1363
1364 // In the loop below, respect the ByCopy flag only for the closure requesting
1365 // the capture (i.e. first iteration through the loop below). Ignore it for
1366 // all enclosing closure's up to NumCapturingClosures (since they must be
1367 // implicitly capturing the *enclosing object* by reference (see loop
1368 // above)).
1369 assert((!ByCopy ||
1370 isa<LambdaScopeInfo>(FunctionScopes[MaxFunctionScopesIndex])) &&
1371 "Only a lambda can capture the enclosing object (referred to by "
1372 "*this) by copy");
1373 QualType ThisTy = getCurrentThisType();
1374 for (int idx = MaxFunctionScopesIndex; NumCapturingClosures;
1375 --idx, --NumCapturingClosures) {
1376 CapturingScopeInfo *CSI = cast<CapturingScopeInfo>(Val: FunctionScopes[idx]);
1377
1378 // The type of the corresponding data member (not a 'this' pointer if 'by
1379 // copy').
1380 QualType CaptureType = ByCopy ? ThisTy->getPointeeType() : ThisTy;
1381
1382 bool isNested = NumCapturingClosures > 1;
1383 CSI->addThisCapture(isNested, Loc, CaptureType, ByCopy);
1384 }
1385 return false;
1386}
1387
1388ExprResult Sema::ActOnCXXThis(SourceLocation Loc) {
1389 // C++20 [expr.prim.this]p1:
1390 // The keyword this names a pointer to the object for which an
1391 // implicit object member function is invoked or a non-static
1392 // data member's initializer is evaluated.
1393 QualType ThisTy = getCurrentThisType();
1394
1395 if (CheckCXXThisType(Loc, Type: ThisTy))
1396 return ExprError();
1397
1398 return BuildCXXThisExpr(Loc, Type: ThisTy, /*IsImplicit=*/false);
1399}
1400
1401bool Sema::CheckCXXThisType(SourceLocation Loc, QualType Type) {
1402 if (!Type.isNull())
1403 return false;
1404
1405 // C++20 [expr.prim.this]p3:
1406 // If a declaration declares a member function or member function template
1407 // of a class X, the expression this is a prvalue of type
1408 // "pointer to cv-qualifier-seq X" wherever X is the current class between
1409 // the optional cv-qualifier-seq and the end of the function-definition,
1410 // member-declarator, or declarator. It shall not appear within the
1411 // declaration of either a static member function or an explicit object
1412 // member function of the current class (although its type and value
1413 // category are defined within such member functions as they are within
1414 // an implicit object member function).
1415 DeclContext *DC = getFunctionLevelDeclContext();
1416 const auto *Method = dyn_cast<CXXMethodDecl>(Val: DC);
1417 if (Method && Method->isExplicitObjectMemberFunction()) {
1418 Diag(Loc, DiagID: diag::err_invalid_this_use) << 1;
1419 } else if (Method && isLambdaCallWithExplicitObjectParameter(DC: CurContext)) {
1420 Diag(Loc, DiagID: diag::err_invalid_this_use) << 1;
1421 } else {
1422 Diag(Loc, DiagID: diag::err_invalid_this_use) << 0;
1423 }
1424 return true;
1425}
1426
1427Expr *Sema::BuildCXXThisExpr(SourceLocation Loc, QualType Type,
1428 bool IsImplicit) {
1429 auto *This = CXXThisExpr::Create(Ctx: Context, L: Loc, Ty: Type, IsImplicit);
1430 MarkThisReferenced(This);
1431 return This;
1432}
1433
1434void Sema::MarkThisReferenced(CXXThisExpr *This) {
1435 CheckCXXThisCapture(Loc: This->getExprLoc());
1436 if (This->isTypeDependent())
1437 return;
1438
1439 // Check if 'this' is captured by value in a lambda with a dependent explicit
1440 // object parameter, and mark it as type-dependent as well if so.
1441 auto IsDependent = [&]() {
1442 for (auto *Scope : llvm::reverse(C&: FunctionScopes)) {
1443 auto *LSI = dyn_cast<sema::LambdaScopeInfo>(Val: Scope);
1444 if (!LSI)
1445 continue;
1446
1447 if (LSI->Lambda && !LSI->Lambda->Encloses(DC: CurContext) &&
1448 LSI->AfterParameterList)
1449 return false;
1450
1451 // If this lambda captures 'this' by value, then 'this' is dependent iff
1452 // this lambda has a dependent explicit object parameter. If we can't
1453 // determine whether it does (e.g. because the CXXMethodDecl's type is
1454 // null), assume it doesn't.
1455 if (LSI->isCXXThisCaptured()) {
1456 if (!LSI->getCXXThisCapture().isCopyCapture())
1457 continue;
1458
1459 const auto *MD = LSI->CallOperator;
1460 if (MD->getType().isNull())
1461 return false;
1462
1463 const auto *Ty = MD->getType()->getAs<FunctionProtoType>();
1464 return Ty && MD->isExplicitObjectMemberFunction() &&
1465 Ty->getParamType(i: 0)->isDependentType();
1466 }
1467 }
1468 return false;
1469 }();
1470
1471 This->setCapturedByCopyInLambdaWithExplicitObjectParameter(IsDependent);
1472}
1473
1474bool Sema::isThisOutsideMemberFunctionBody(QualType BaseType) {
1475 // If we're outside the body of a member function, then we'll have a specified
1476 // type for 'this'.
1477 if (CXXThisTypeOverride.isNull())
1478 return false;
1479
1480 // Determine whether we're looking into a class that's currently being
1481 // defined.
1482 CXXRecordDecl *Class = BaseType->getAsCXXRecordDecl();
1483 return Class && Class->isBeingDefined();
1484}
1485
1486ExprResult
1487Sema::ActOnCXXTypeConstructExpr(ParsedType TypeRep,
1488 SourceLocation LParenOrBraceLoc,
1489 MultiExprArg exprs,
1490 SourceLocation RParenOrBraceLoc,
1491 bool ListInitialization) {
1492 if (!TypeRep)
1493 return ExprError();
1494
1495 TypeSourceInfo *TInfo;
1496 QualType Ty = GetTypeFromParser(Ty: TypeRep, TInfo: &TInfo);
1497 if (!TInfo)
1498 TInfo = Context.getTrivialTypeSourceInfo(T: Ty, Loc: SourceLocation());
1499
1500 auto Result = BuildCXXTypeConstructExpr(Type: TInfo, LParenLoc: LParenOrBraceLoc, Exprs: exprs,
1501 RParenLoc: RParenOrBraceLoc, ListInitialization);
1502 if (Result.isInvalid())
1503 Result = CreateRecoveryExpr(Begin: TInfo->getTypeLoc().getBeginLoc(),
1504 End: RParenOrBraceLoc, SubExprs: exprs, T: Ty);
1505 return Result;
1506}
1507
1508ExprResult
1509Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
1510 SourceLocation LParenOrBraceLoc,
1511 MultiExprArg Exprs,
1512 SourceLocation RParenOrBraceLoc,
1513 bool ListInitialization) {
1514 QualType Ty = TInfo->getType();
1515 SourceLocation TyBeginLoc = TInfo->getTypeLoc().getBeginLoc();
1516 SourceRange FullRange = SourceRange(TyBeginLoc, RParenOrBraceLoc);
1517
1518 InitializedEntity Entity =
1519 InitializedEntity::InitializeTemporary(Context, TypeInfo: TInfo);
1520 InitializationKind Kind =
1521 Exprs.size()
1522 ? ListInitialization
1523 ? InitializationKind::CreateDirectList(
1524 InitLoc: TyBeginLoc, LBraceLoc: LParenOrBraceLoc, RBraceLoc: RParenOrBraceLoc)
1525 : InitializationKind::CreateDirect(InitLoc: TyBeginLoc, LParenLoc: LParenOrBraceLoc,
1526 RParenLoc: RParenOrBraceLoc)
1527 : InitializationKind::CreateValue(InitLoc: TyBeginLoc, LParenLoc: LParenOrBraceLoc,
1528 RParenLoc: RParenOrBraceLoc);
1529
1530 // C++17 [expr.type.conv]p1:
1531 // If the type is a placeholder for a deduced class type, [...perform class
1532 // template argument deduction...]
1533 // C++23:
1534 // Otherwise, if the type contains a placeholder type, it is replaced by the
1535 // type determined by placeholder type deduction.
1536 DeducedType *Deduced = Ty->getContainedDeducedType();
1537 if (Deduced && !Deduced->isDeduced() &&
1538 isa<DeducedTemplateSpecializationType>(Val: Deduced)) {
1539 Ty = DeduceTemplateSpecializationFromInitializer(TInfo, Entity,
1540 Kind, Init: Exprs);
1541 if (Ty.isNull())
1542 return ExprError();
1543 Entity = InitializedEntity::InitializeTemporary(TypeInfo: TInfo, Type: Ty);
1544 } else if (Deduced && !Deduced->isDeduced()) {
1545 MultiExprArg Inits = Exprs;
1546 if (ListInitialization) {
1547 auto *ILE = cast<InitListExpr>(Val: Exprs[0]);
1548 Inits = MultiExprArg(ILE->getInits(), ILE->getNumInits());
1549 }
1550
1551 if (Inits.empty())
1552 return ExprError(Diag(Loc: TyBeginLoc, DiagID: diag::err_auto_expr_init_no_expression)
1553 << Ty << FullRange);
1554 if (Inits.size() > 1) {
1555 Expr *FirstBad = Inits[1];
1556 return ExprError(Diag(Loc: FirstBad->getBeginLoc(),
1557 DiagID: diag::err_auto_expr_init_multiple_expressions)
1558 << Ty << FullRange);
1559 }
1560 if (getLangOpts().CPlusPlus23) {
1561 if (Ty->getAs<AutoType>())
1562 Diag(Loc: TyBeginLoc, DiagID: diag::warn_cxx20_compat_auto_expr) << FullRange;
1563 }
1564 Expr *Deduce = Inits[0];
1565 if (isa<InitListExpr>(Val: Deduce))
1566 return ExprError(
1567 Diag(Loc: Deduce->getBeginLoc(), DiagID: diag::err_auto_expr_init_paren_braces)
1568 << ListInitialization << Ty << FullRange);
1569 QualType DeducedType;
1570 TemplateDeductionInfo Info(Deduce->getExprLoc());
1571 TemplateDeductionResult Result =
1572 DeduceAutoType(AutoTypeLoc: TInfo->getTypeLoc(), Initializer: Deduce, Result&: DeducedType, Info);
1573 if (Result != TemplateDeductionResult::Success &&
1574 Result != TemplateDeductionResult::AlreadyDiagnosed)
1575 return ExprError(Diag(Loc: TyBeginLoc, DiagID: diag::err_auto_expr_deduction_failure)
1576 << Ty << Deduce->getType() << FullRange
1577 << Deduce->getSourceRange());
1578 if (DeducedType.isNull()) {
1579 assert(Result == TemplateDeductionResult::AlreadyDiagnosed);
1580 return ExprError();
1581 }
1582
1583 Ty = DeducedType;
1584 Entity = InitializedEntity::InitializeTemporary(TypeInfo: TInfo, Type: Ty);
1585 }
1586
1587 if (Ty->isDependentType() || CallExpr::hasAnyTypeDependentArguments(Exprs))
1588 return CXXUnresolvedConstructExpr::Create(
1589 Context, T: Ty.getNonReferenceType(), TSI: TInfo, LParenLoc: LParenOrBraceLoc, Args: Exprs,
1590 RParenLoc: RParenOrBraceLoc, IsListInit: ListInitialization);
1591
1592 // C++ [expr.type.conv]p1:
1593 // If the expression list is a parenthesized single expression, the type
1594 // conversion expression is equivalent (in definedness, and if defined in
1595 // meaning) to the corresponding cast expression.
1596 if (Exprs.size() == 1 && !ListInitialization &&
1597 !isa<InitListExpr>(Val: Exprs[0])) {
1598 Expr *Arg = Exprs[0];
1599 return BuildCXXFunctionalCastExpr(TInfo, Type: Ty, LParenLoc: LParenOrBraceLoc, CastExpr: Arg,
1600 RParenLoc: RParenOrBraceLoc);
1601 }
1602
1603 // For an expression of the form T(), T shall not be an array type.
1604 QualType ElemTy = Ty;
1605 if (Ty->isArrayType()) {
1606 if (!ListInitialization)
1607 return ExprError(Diag(Loc: TyBeginLoc, DiagID: diag::err_value_init_for_array_type)
1608 << FullRange);
1609 ElemTy = Context.getBaseElementType(QT: Ty);
1610 }
1611
1612 // Only construct objects with object types.
1613 // The standard doesn't explicitly forbid function types here, but that's an
1614 // obvious oversight, as there's no way to dynamically construct a function
1615 // in general.
1616 if (Ty->isFunctionType())
1617 return ExprError(Diag(Loc: TyBeginLoc, DiagID: diag::err_init_for_function_type)
1618 << Ty << FullRange);
1619
1620 // C++17 [expr.type.conv]p2, per DR2351:
1621 // If the type is cv void and the initializer is () or {}, the expression is
1622 // a prvalue of the specified type that performs no initialization.
1623 if (Ty->isVoidType()) {
1624 if (Exprs.empty())
1625 return new (Context) CXXScalarValueInitExpr(
1626 Ty.getUnqualifiedType(), TInfo, Kind.getRange().getEnd());
1627 if (ListInitialization &&
1628 cast<InitListExpr>(Val: Exprs[0])->getNumInits() == 0) {
1629 return CXXFunctionalCastExpr::Create(
1630 Context, T: Ty.getUnqualifiedType(), VK: VK_PRValue, Written: TInfo, Kind: CK_ToVoid,
1631 Op: Exprs[0], /*Path=*/nullptr, FPO: CurFPFeatureOverrides(),
1632 LPLoc: Exprs[0]->getBeginLoc(), RPLoc: Exprs[0]->getEndLoc());
1633 }
1634 } else if (RequireCompleteType(Loc: TyBeginLoc, T: ElemTy,
1635 DiagID: diag::err_invalid_incomplete_type_use,
1636 Args: FullRange))
1637 return ExprError();
1638
1639 // Otherwise, the expression is a prvalue of the specified type whose
1640 // result object is direct-initialized (11.6) with the initializer.
1641 InitializationSequence InitSeq(*this, Entity, Kind, Exprs);
1642 ExprResult Result = InitSeq.Perform(S&: *this, Entity, Kind, Args: Exprs);
1643
1644 if (Result.isInvalid())
1645 return Result;
1646
1647 Expr *Inner = Result.get();
1648 if (CXXBindTemporaryExpr *BTE = dyn_cast_or_null<CXXBindTemporaryExpr>(Val: Inner))
1649 Inner = BTE->getSubExpr();
1650 if (auto *CE = dyn_cast<ConstantExpr>(Val: Inner);
1651 CE && CE->isImmediateInvocation())
1652 Inner = CE->getSubExpr();
1653 if (!isa<CXXTemporaryObjectExpr>(Val: Inner) &&
1654 !isa<CXXScalarValueInitExpr>(Val: Inner)) {
1655 // If we created a CXXTemporaryObjectExpr, that node also represents the
1656 // functional cast. Otherwise, create an explicit cast to represent
1657 // the syntactic form of a functional-style cast that was used here.
1658 //
1659 // FIXME: Creating a CXXFunctionalCastExpr around a CXXConstructExpr
1660 // would give a more consistent AST representation than using a
1661 // CXXTemporaryObjectExpr. It's also weird that the functional cast
1662 // is sometimes handled by initialization and sometimes not.
1663 QualType ResultType = Result.get()->getType();
1664 SourceRange Locs = ListInitialization
1665 ? SourceRange()
1666 : SourceRange(LParenOrBraceLoc, RParenOrBraceLoc);
1667 Result = CXXFunctionalCastExpr::Create(
1668 Context, T: ResultType, VK: Expr::getValueKindForType(T: Ty), Written: TInfo, Kind: CK_NoOp,
1669 Op: Result.get(), /*Path=*/nullptr, FPO: CurFPFeatureOverrides(),
1670 LPLoc: Locs.getBegin(), RPLoc: Locs.getEnd());
1671 }
1672
1673 return Result;
1674}
1675
1676bool Sema::isUsualDeallocationFunction(const CXXMethodDecl *Method) {
1677 // [CUDA] Ignore this function, if we can't call it.
1678 const FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true);
1679 if (getLangOpts().CUDA) {
1680 auto CallPreference = CUDA().IdentifyPreference(Caller, Callee: Method);
1681 // If it's not callable at all, it's not the right function.
1682 if (CallPreference < SemaCUDA::CFP_WrongSide)
1683 return false;
1684 if (CallPreference == SemaCUDA::CFP_WrongSide) {
1685 // Maybe. We have to check if there are better alternatives.
1686 DeclContext::lookup_result R =
1687 Method->getDeclContext()->lookup(Name: Method->getDeclName());
1688 for (const auto *D : R) {
1689 if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
1690 if (CUDA().IdentifyPreference(Caller, Callee: FD) > SemaCUDA::CFP_WrongSide)
1691 return false;
1692 }
1693 }
1694 // We've found no better variants.
1695 }
1696 }
1697
1698 SmallVector<const FunctionDecl*, 4> PreventedBy;
1699 bool Result = Method->isUsualDeallocationFunction(PreventedBy);
1700
1701 if (Result || !getLangOpts().CUDA || PreventedBy.empty())
1702 return Result;
1703
1704 // In case of CUDA, return true if none of the 1-argument deallocator
1705 // functions are actually callable.
1706 return llvm::none_of(Range&: PreventedBy, P: [&](const FunctionDecl *FD) {
1707 assert(FD->getNumParams() == 1 &&
1708 "Only single-operand functions should be in PreventedBy");
1709 return CUDA().IdentifyPreference(Caller, Callee: FD) >= SemaCUDA::CFP_HostDevice;
1710 });
1711}
1712
1713/// Determine whether the given function is a non-placement
1714/// deallocation function.
1715static bool isNonPlacementDeallocationFunction(Sema &S, FunctionDecl *FD) {
1716 if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Val: FD))
1717 return S.isUsualDeallocationFunction(Method);
1718
1719 if (!FD->getDeclName().isAnyOperatorDelete())
1720 return false;
1721
1722 if (FD->isTypeAwareOperatorNewOrDelete())
1723 return FunctionDecl::RequiredTypeAwareDeleteParameterCount ==
1724 FD->getNumParams();
1725
1726 unsigned UsualParams = 1;
1727 if (S.getLangOpts().SizedDeallocation && UsualParams < FD->getNumParams() &&
1728 S.Context.hasSameUnqualifiedType(
1729 T1: FD->getParamDecl(i: UsualParams)->getType(),
1730 T2: S.Context.getSizeType()))
1731 ++UsualParams;
1732
1733 if (S.getLangOpts().AlignedAllocation && UsualParams < FD->getNumParams() &&
1734 S.Context.hasSameUnqualifiedType(
1735 T1: FD->getParamDecl(i: UsualParams)->getType(),
1736 T2: S.Context.getTypeDeclType(Decl: S.getStdAlignValT())))
1737 ++UsualParams;
1738
1739 return UsualParams == FD->getNumParams();
1740}
1741
1742namespace {
1743 struct UsualDeallocFnInfo {
1744 UsualDeallocFnInfo()
1745 : Found(), FD(nullptr),
1746 IDP(AlignedAllocationMode::No, SizedDeallocationMode::No) {}
1747 UsualDeallocFnInfo(Sema &S, DeclAccessPair Found, QualType AllocType,
1748 SourceLocation Loc)
1749 : Found(Found), FD(dyn_cast<FunctionDecl>(Val: Found->getUnderlyingDecl())),
1750 Destroying(false),
1751 IDP({AllocType, TypeAwareAllocationMode::No,
1752 AlignedAllocationMode::No, SizedDeallocationMode::No}),
1753 CUDAPref(SemaCUDA::CFP_Native) {
1754 // A function template declaration is only a usual deallocation function
1755 // if it is a typed delete.
1756 if (!FD) {
1757 if (AllocType.isNull())
1758 return;
1759 auto *FTD = dyn_cast<FunctionTemplateDecl>(Val: Found->getUnderlyingDecl());
1760 if (!FTD)
1761 return;
1762 FunctionDecl *InstantiatedDecl =
1763 S.BuildTypeAwareUsualDelete(FnDecl: FTD, AllocType, Loc);
1764 if (!InstantiatedDecl)
1765 return;
1766 FD = InstantiatedDecl;
1767 }
1768 unsigned NumBaseParams = 1;
1769 if (FD->isTypeAwareOperatorNewOrDelete()) {
1770 // If this is a type aware operator delete we instantiate an appropriate
1771 // specialization of std::type_identity<>. If we do not know the
1772 // type being deallocated, or if the type-identity parameter of the
1773 // deallocation function does not match the constructed type_identity
1774 // specialization we reject the declaration.
1775 if (AllocType.isNull()) {
1776 FD = nullptr;
1777 return;
1778 }
1779 QualType TypeIdentityTag = FD->getParamDecl(i: 0)->getType();
1780 QualType ExpectedTypeIdentityTag =
1781 S.tryBuildStdTypeIdentity(Type: AllocType, Loc);
1782 if (ExpectedTypeIdentityTag.isNull()) {
1783 FD = nullptr;
1784 return;
1785 }
1786 if (!S.Context.hasSameType(T1: TypeIdentityTag, T2: ExpectedTypeIdentityTag)) {
1787 FD = nullptr;
1788 return;
1789 }
1790 IDP.PassTypeIdentity = TypeAwareAllocationMode::Yes;
1791 ++NumBaseParams;
1792 }
1793
1794 if (FD->isDestroyingOperatorDelete()) {
1795 Destroying = true;
1796 ++NumBaseParams;
1797 }
1798
1799 if (NumBaseParams < FD->getNumParams() &&
1800 S.Context.hasSameUnqualifiedType(
1801 T1: FD->getParamDecl(i: NumBaseParams)->getType(),
1802 T2: S.Context.getSizeType())) {
1803 ++NumBaseParams;
1804 IDP.PassSize = SizedDeallocationMode::Yes;
1805 }
1806
1807 if (NumBaseParams < FD->getNumParams() &&
1808 FD->getParamDecl(i: NumBaseParams)->getType()->isAlignValT()) {
1809 ++NumBaseParams;
1810 IDP.PassAlignment = AlignedAllocationMode::Yes;
1811 }
1812
1813 // In CUDA, determine how much we'd like / dislike to call this.
1814 if (S.getLangOpts().CUDA)
1815 CUDAPref = S.CUDA().IdentifyPreference(
1816 Caller: S.getCurFunctionDecl(/*AllowLambda=*/true), Callee: FD);
1817 }
1818
1819 explicit operator bool() const { return FD; }
1820
1821 int Compare(Sema &S, const UsualDeallocFnInfo &Other,
1822 ImplicitDeallocationParameters TargetIDP) const {
1823 assert(!TargetIDP.Type.isNull() ||
1824 !isTypeAwareAllocation(Other.IDP.PassTypeIdentity));
1825
1826 // C++ P0722:
1827 // A destroying operator delete is preferred over a non-destroying
1828 // operator delete.
1829 if (Destroying != Other.Destroying)
1830 return Destroying ? 1 : -1;
1831
1832 const ImplicitDeallocationParameters &OtherIDP = Other.IDP;
1833 // Selection for type awareness has priority over alignment and size
1834 if (IDP.PassTypeIdentity != OtherIDP.PassTypeIdentity)
1835 return IDP.PassTypeIdentity == TargetIDP.PassTypeIdentity ? 1 : -1;
1836
1837 // C++17 [expr.delete]p10:
1838 // If the type has new-extended alignment, a function with a parameter
1839 // of type std::align_val_t is preferred; otherwise a function without
1840 // such a parameter is preferred
1841 if (IDP.PassAlignment != OtherIDP.PassAlignment)
1842 return IDP.PassAlignment == TargetIDP.PassAlignment ? 1 : -1;
1843
1844 if (IDP.PassSize != OtherIDP.PassSize)
1845 return IDP.PassSize == TargetIDP.PassSize ? 1 : -1;
1846
1847 if (isTypeAwareAllocation(Mode: IDP.PassTypeIdentity)) {
1848 // Type aware allocation involves templates so we need to choose
1849 // the best type
1850 FunctionTemplateDecl *PrimaryTemplate = FD->getPrimaryTemplate();
1851 FunctionTemplateDecl *OtherPrimaryTemplate =
1852 Other.FD->getPrimaryTemplate();
1853 if ((!PrimaryTemplate) != (!OtherPrimaryTemplate))
1854 return OtherPrimaryTemplate ? 1 : -1;
1855
1856 if (PrimaryTemplate && OtherPrimaryTemplate) {
1857 const auto *DC = dyn_cast<CXXRecordDecl>(Val: Found->getDeclContext());
1858 const auto *OtherDC =
1859 dyn_cast<CXXRecordDecl>(Val: Other.Found->getDeclContext());
1860 unsigned ImplicitArgCount = Destroying + IDP.getNumImplicitArgs();
1861 if (FunctionTemplateDecl *Best = S.getMoreSpecializedTemplate(
1862 FT1: PrimaryTemplate, FT2: OtherPrimaryTemplate, Loc: SourceLocation(),
1863 TPOC: TPOC_Call, NumCallArguments1: ImplicitArgCount,
1864 RawObj1Ty: DC ? QualType(DC->getTypeForDecl(), 0) : QualType{},
1865 RawObj2Ty: OtherDC ? QualType(OtherDC->getTypeForDecl(), 0) : QualType{},
1866 Reversed: false)) {
1867 return Best == PrimaryTemplate ? 1 : -1;
1868 }
1869 }
1870 }
1871
1872 // Use CUDA call preference as a tiebreaker.
1873 if (CUDAPref > Other.CUDAPref)
1874 return 1;
1875 if (CUDAPref == Other.CUDAPref)
1876 return 0;
1877 return -1;
1878 }
1879
1880 DeclAccessPair Found;
1881 FunctionDecl *FD;
1882 bool Destroying;
1883 ImplicitDeallocationParameters IDP;
1884 SemaCUDA::CUDAFunctionPreference CUDAPref;
1885 };
1886}
1887
1888/// Determine whether a type has new-extended alignment. This may be called when
1889/// the type is incomplete (for a delete-expression with an incomplete pointee
1890/// type), in which case it will conservatively return false if the alignment is
1891/// not known.
1892static bool hasNewExtendedAlignment(Sema &S, QualType AllocType) {
1893 return S.getLangOpts().AlignedAllocation &&
1894 S.getASTContext().getTypeAlignIfKnown(T: AllocType) >
1895 S.getASTContext().getTargetInfo().getNewAlign();
1896}
1897
1898static bool CheckDeleteOperator(Sema &S, SourceLocation StartLoc,
1899 SourceRange Range, bool Diagnose,
1900 CXXRecordDecl *NamingClass, DeclAccessPair Decl,
1901 FunctionDecl *Operator) {
1902 if (Operator->isTypeAwareOperatorNewOrDelete()) {
1903 QualType SelectedTypeIdentityParameter =
1904 Operator->getParamDecl(i: 0)->getType();
1905 if (S.RequireCompleteType(Loc: StartLoc, T: SelectedTypeIdentityParameter,
1906 DiagID: diag::err_incomplete_type))
1907 return true;
1908 }
1909
1910 // FIXME: DiagnoseUseOfDecl?
1911 if (Operator->isDeleted()) {
1912 if (Diagnose) {
1913 StringLiteral *Msg = Operator->getDeletedMessage();
1914 S.Diag(Loc: StartLoc, DiagID: diag::err_deleted_function_use)
1915 << (Msg != nullptr) << (Msg ? Msg->getString() : StringRef());
1916 S.NoteDeletedFunction(FD: Operator);
1917 }
1918 return true;
1919 }
1920 Sema::AccessResult Accessible =
1921 S.CheckAllocationAccess(OperatorLoc: StartLoc, PlacementRange: Range, NamingClass, FoundDecl: Decl, Diagnose);
1922 return Accessible == Sema::AR_inaccessible;
1923}
1924
1925/// Select the correct "usual" deallocation function to use from a selection of
1926/// deallocation functions (either global or class-scope).
1927static UsualDeallocFnInfo resolveDeallocationOverload(
1928 Sema &S, LookupResult &R, const ImplicitDeallocationParameters &IDP,
1929 SourceLocation Loc,
1930 llvm::SmallVectorImpl<UsualDeallocFnInfo> *BestFns = nullptr) {
1931
1932 UsualDeallocFnInfo Best;
1933 for (auto I = R.begin(), E = R.end(); I != E; ++I) {
1934 UsualDeallocFnInfo Info(S, I.getPair(), IDP.Type, Loc);
1935 if (!Info || !isNonPlacementDeallocationFunction(S, FD: Info.FD) ||
1936 Info.CUDAPref == SemaCUDA::CFP_Never)
1937 continue;
1938
1939 if (!isTypeAwareAllocation(Mode: IDP.PassTypeIdentity) &&
1940 isTypeAwareAllocation(Mode: Info.IDP.PassTypeIdentity))
1941 continue;
1942 if (!Best) {
1943 Best = Info;
1944 if (BestFns)
1945 BestFns->push_back(Elt: Info);
1946 continue;
1947 }
1948 int ComparisonResult = Best.Compare(S, Other: Info, TargetIDP: IDP);
1949 if (ComparisonResult > 0)
1950 continue;
1951
1952 // If more than one preferred function is found, all non-preferred
1953 // functions are eliminated from further consideration.
1954 if (BestFns && ComparisonResult < 0)
1955 BestFns->clear();
1956
1957 Best = Info;
1958 if (BestFns)
1959 BestFns->push_back(Elt: Info);
1960 }
1961
1962 return Best;
1963}
1964
1965/// Determine whether a given type is a class for which 'delete[]' would call
1966/// a member 'operator delete[]' with a 'size_t' parameter. This implies that
1967/// we need to store the array size (even if the type is
1968/// trivially-destructible).
1969static bool doesUsualArrayDeleteWantSize(Sema &S, SourceLocation loc,
1970 TypeAwareAllocationMode PassType,
1971 QualType allocType) {
1972 const RecordType *record =
1973 allocType->getBaseElementTypeUnsafe()->getAs<RecordType>();
1974 if (!record) return false;
1975
1976 // Try to find an operator delete[] in class scope.
1977
1978 DeclarationName deleteName =
1979 S.Context.DeclarationNames.getCXXOperatorName(Op: OO_Array_Delete);
1980 LookupResult ops(S, deleteName, loc, Sema::LookupOrdinaryName);
1981 S.LookupQualifiedName(R&: ops, LookupCtx: record->getDecl());
1982
1983 // We're just doing this for information.
1984 ops.suppressDiagnostics();
1985
1986 // Very likely: there's no operator delete[].
1987 if (ops.empty()) return false;
1988
1989 // If it's ambiguous, it should be illegal to call operator delete[]
1990 // on this thing, so it doesn't matter if we allocate extra space or not.
1991 if (ops.isAmbiguous()) return false;
1992
1993 // C++17 [expr.delete]p10:
1994 // If the deallocation functions have class scope, the one without a
1995 // parameter of type std::size_t is selected.
1996 ImplicitDeallocationParameters IDP = {
1997 allocType, PassType,
1998 alignedAllocationModeFromBool(IsAligned: hasNewExtendedAlignment(S, AllocType: allocType)),
1999 SizedDeallocationMode::No};
2000 auto Best = resolveDeallocationOverload(S, R&: ops, IDP, Loc: loc);
2001 return Best && isSizedDeallocation(Mode: Best.IDP.PassSize);
2002}
2003
2004ExprResult
2005Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
2006 SourceLocation PlacementLParen, MultiExprArg PlacementArgs,
2007 SourceLocation PlacementRParen, SourceRange TypeIdParens,
2008 Declarator &D, Expr *Initializer) {
2009 std::optional<Expr *> ArraySize;
2010 // If the specified type is an array, unwrap it and save the expression.
2011 if (D.getNumTypeObjects() > 0 &&
2012 D.getTypeObject(i: 0).Kind == DeclaratorChunk::Array) {
2013 DeclaratorChunk &Chunk = D.getTypeObject(i: 0);
2014 if (D.getDeclSpec().hasAutoTypeSpec())
2015 return ExprError(Diag(Loc: Chunk.Loc, DiagID: diag::err_new_array_of_auto)
2016 << D.getSourceRange());
2017 if (Chunk.Arr.hasStatic)
2018 return ExprError(Diag(Loc: Chunk.Loc, DiagID: diag::err_static_illegal_in_new)
2019 << D.getSourceRange());
2020 if (!Chunk.Arr.NumElts && !Initializer)
2021 return ExprError(Diag(Loc: Chunk.Loc, DiagID: diag::err_array_new_needs_size)
2022 << D.getSourceRange());
2023
2024 ArraySize = static_cast<Expr*>(Chunk.Arr.NumElts);
2025 D.DropFirstTypeObject();
2026 }
2027
2028 // Every dimension shall be of constant size.
2029 if (ArraySize) {
2030 for (unsigned I = 0, N = D.getNumTypeObjects(); I < N; ++I) {
2031 if (D.getTypeObject(i: I).Kind != DeclaratorChunk::Array)
2032 break;
2033
2034 DeclaratorChunk::ArrayTypeInfo &Array = D.getTypeObject(i: I).Arr;
2035 if (Expr *NumElts = (Expr *)Array.NumElts) {
2036 if (!NumElts->isTypeDependent() && !NumElts->isValueDependent()) {
2037 // FIXME: GCC permits constant folding here. We should either do so consistently
2038 // or not do so at all, rather than changing behavior in C++14 onwards.
2039 if (getLangOpts().CPlusPlus14) {
2040 // C++1y [expr.new]p6: Every constant-expression in a noptr-new-declarator
2041 // shall be a converted constant expression (5.19) of type std::size_t
2042 // and shall evaluate to a strictly positive value.
2043 llvm::APSInt Value(Context.getIntWidth(T: Context.getSizeType()));
2044 Array.NumElts =
2045 CheckConvertedConstantExpression(From: NumElts, T: Context.getSizeType(),
2046 Value, CCE: CCEKind::ArrayBound)
2047 .get();
2048 } else {
2049 Array.NumElts = VerifyIntegerConstantExpression(
2050 E: NumElts, Result: nullptr, DiagID: diag::err_new_array_nonconst,
2051 CanFold: AllowFoldKind::Allow)
2052 .get();
2053 }
2054 if (!Array.NumElts)
2055 return ExprError();
2056 }
2057 }
2058 }
2059 }
2060
2061 TypeSourceInfo *TInfo = GetTypeForDeclarator(D);
2062 QualType AllocType = TInfo->getType();
2063 if (D.isInvalidType())
2064 return ExprError();
2065
2066 SourceRange DirectInitRange;
2067 if (ParenListExpr *List = dyn_cast_or_null<ParenListExpr>(Val: Initializer))
2068 DirectInitRange = List->getSourceRange();
2069
2070 return BuildCXXNew(Range: SourceRange(StartLoc, D.getEndLoc()), UseGlobal,
2071 PlacementLParen, PlacementArgs, PlacementRParen,
2072 TypeIdParens, AllocType, AllocTypeInfo: TInfo, ArraySize, DirectInitRange,
2073 Initializer);
2074}
2075
2076static bool isLegalArrayNewInitializer(CXXNewInitializationStyle Style,
2077 Expr *Init, bool IsCPlusPlus20) {
2078 if (!Init)
2079 return true;
2080 if (ParenListExpr *PLE = dyn_cast<ParenListExpr>(Val: Init))
2081 return IsCPlusPlus20 || PLE->getNumExprs() == 0;
2082 if (isa<ImplicitValueInitExpr>(Val: Init))
2083 return true;
2084 else if (CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Val: Init))
2085 return !CCE->isListInitialization() &&
2086 CCE->getConstructor()->isDefaultConstructor();
2087 else if (Style == CXXNewInitializationStyle::Braces) {
2088 assert(isa<InitListExpr>(Init) &&
2089 "Shouldn't create list CXXConstructExprs for arrays.");
2090 return true;
2091 }
2092 return false;
2093}
2094
2095bool
2096Sema::isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const {
2097 if (!getLangOpts().AlignedAllocationUnavailable)
2098 return false;
2099 if (FD.isDefined())
2100 return false;
2101 UnsignedOrNone AlignmentParam = std::nullopt;
2102 if (FD.isReplaceableGlobalAllocationFunction(AlignmentParam: &AlignmentParam) &&
2103 AlignmentParam)
2104 return true;
2105 return false;
2106}
2107
2108// Emit a diagnostic if an aligned allocation/deallocation function that is not
2109// implemented in the standard library is selected.
2110void Sema::diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
2111 SourceLocation Loc) {
2112 if (isUnavailableAlignedAllocationFunction(FD)) {
2113 const llvm::Triple &T = getASTContext().getTargetInfo().getTriple();
2114 StringRef OSName = AvailabilityAttr::getPlatformNameSourceSpelling(
2115 Platform: getASTContext().getTargetInfo().getPlatformName());
2116 VersionTuple OSVersion = alignedAllocMinVersion(OS: T.getOS());
2117
2118 bool IsDelete = FD.getDeclName().isAnyOperatorDelete();
2119 Diag(Loc, DiagID: diag::err_aligned_allocation_unavailable)
2120 << IsDelete << FD.getType().getAsString() << OSName
2121 << OSVersion.getAsString() << OSVersion.empty();
2122 Diag(Loc, DiagID: diag::note_silence_aligned_allocation_unavailable);
2123 }
2124}
2125
2126ExprResult Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
2127 SourceLocation PlacementLParen,
2128 MultiExprArg PlacementArgs,
2129 SourceLocation PlacementRParen,
2130 SourceRange TypeIdParens, QualType AllocType,
2131 TypeSourceInfo *AllocTypeInfo,
2132 std::optional<Expr *> ArraySize,
2133 SourceRange DirectInitRange, Expr *Initializer) {
2134 SourceRange TypeRange = AllocTypeInfo->getTypeLoc().getSourceRange();
2135 SourceLocation StartLoc = Range.getBegin();
2136
2137 CXXNewInitializationStyle InitStyle;
2138 if (DirectInitRange.isValid()) {
2139 assert(Initializer && "Have parens but no initializer.");
2140 InitStyle = CXXNewInitializationStyle::Parens;
2141 } else if (isa_and_nonnull<InitListExpr>(Val: Initializer))
2142 InitStyle = CXXNewInitializationStyle::Braces;
2143 else {
2144 assert((!Initializer || isa<ImplicitValueInitExpr>(Initializer) ||
2145 isa<CXXConstructExpr>(Initializer)) &&
2146 "Initializer expression that cannot have been implicitly created.");
2147 InitStyle = CXXNewInitializationStyle::None;
2148 }
2149
2150 MultiExprArg Exprs(&Initializer, Initializer ? 1 : 0);
2151 if (ParenListExpr *List = dyn_cast_or_null<ParenListExpr>(Val: Initializer)) {
2152 assert(InitStyle == CXXNewInitializationStyle::Parens &&
2153 "paren init for non-call init");
2154 Exprs = MultiExprArg(List->getExprs(), List->getNumExprs());
2155 } else if (auto *List = dyn_cast_or_null<CXXParenListInitExpr>(Val: Initializer)) {
2156 assert(InitStyle == CXXNewInitializationStyle::Parens &&
2157 "paren init for non-call init");
2158 Exprs = List->getInitExprs();
2159 }
2160
2161 // C++11 [expr.new]p15:
2162 // A new-expression that creates an object of type T initializes that
2163 // object as follows:
2164 InitializationKind Kind = [&] {
2165 switch (InitStyle) {
2166 // - If the new-initializer is omitted, the object is default-
2167 // initialized (8.5); if no initialization is performed,
2168 // the object has indeterminate value
2169 case CXXNewInitializationStyle::None:
2170 return InitializationKind::CreateDefault(InitLoc: TypeRange.getBegin());
2171 // - Otherwise, the new-initializer is interpreted according to the
2172 // initialization rules of 8.5 for direct-initialization.
2173 case CXXNewInitializationStyle::Parens:
2174 return InitializationKind::CreateDirect(InitLoc: TypeRange.getBegin(),
2175 LParenLoc: DirectInitRange.getBegin(),
2176 RParenLoc: DirectInitRange.getEnd());
2177 case CXXNewInitializationStyle::Braces:
2178 return InitializationKind::CreateDirectList(InitLoc: TypeRange.getBegin(),
2179 LBraceLoc: Initializer->getBeginLoc(),
2180 RBraceLoc: Initializer->getEndLoc());
2181 }
2182 llvm_unreachable("Unknown initialization kind");
2183 }();
2184
2185 // C++11 [dcl.spec.auto]p6. Deduce the type which 'auto' stands in for.
2186 auto *Deduced = AllocType->getContainedDeducedType();
2187 if (Deduced && !Deduced->isDeduced() &&
2188 isa<DeducedTemplateSpecializationType>(Val: Deduced)) {
2189 if (ArraySize)
2190 return ExprError(
2191 Diag(Loc: *ArraySize ? (*ArraySize)->getExprLoc() : TypeRange.getBegin(),
2192 DiagID: diag::err_deduced_class_template_compound_type)
2193 << /*array*/ 2
2194 << (*ArraySize ? (*ArraySize)->getSourceRange() : TypeRange));
2195
2196 InitializedEntity Entity
2197 = InitializedEntity::InitializeNew(NewLoc: StartLoc, Type: AllocType);
2198 AllocType = DeduceTemplateSpecializationFromInitializer(
2199 TInfo: AllocTypeInfo, Entity, Kind, Init: Exprs);
2200 if (AllocType.isNull())
2201 return ExprError();
2202 } else if (Deduced && !Deduced->isDeduced()) {
2203 MultiExprArg Inits = Exprs;
2204 bool Braced = (InitStyle == CXXNewInitializationStyle::Braces);
2205 if (Braced) {
2206 auto *ILE = cast<InitListExpr>(Val: Exprs[0]);
2207 Inits = MultiExprArg(ILE->getInits(), ILE->getNumInits());
2208 }
2209
2210 if (InitStyle == CXXNewInitializationStyle::None || Inits.empty())
2211 return ExprError(Diag(Loc: StartLoc, DiagID: diag::err_auto_new_requires_ctor_arg)
2212 << AllocType << TypeRange);
2213 if (Inits.size() > 1) {
2214 Expr *FirstBad = Inits[1];
2215 return ExprError(Diag(Loc: FirstBad->getBeginLoc(),
2216 DiagID: diag::err_auto_new_ctor_multiple_expressions)
2217 << AllocType << TypeRange);
2218 }
2219 if (Braced && !getLangOpts().CPlusPlus17)
2220 Diag(Loc: Initializer->getBeginLoc(), DiagID: diag::ext_auto_new_list_init)
2221 << AllocType << TypeRange;
2222 Expr *Deduce = Inits[0];
2223 if (isa<InitListExpr>(Val: Deduce))
2224 return ExprError(
2225 Diag(Loc: Deduce->getBeginLoc(), DiagID: diag::err_auto_expr_init_paren_braces)
2226 << Braced << AllocType << TypeRange);
2227 QualType DeducedType;
2228 TemplateDeductionInfo Info(Deduce->getExprLoc());
2229 TemplateDeductionResult Result =
2230 DeduceAutoType(AutoTypeLoc: AllocTypeInfo->getTypeLoc(), Initializer: Deduce, Result&: DeducedType, Info);
2231 if (Result != TemplateDeductionResult::Success &&
2232 Result != TemplateDeductionResult::AlreadyDiagnosed)
2233 return ExprError(Diag(Loc: StartLoc, DiagID: diag::err_auto_new_deduction_failure)
2234 << AllocType << Deduce->getType() << TypeRange
2235 << Deduce->getSourceRange());
2236 if (DeducedType.isNull()) {
2237 assert(Result == TemplateDeductionResult::AlreadyDiagnosed);
2238 return ExprError();
2239 }
2240 AllocType = DeducedType;
2241 }
2242
2243 // Per C++0x [expr.new]p5, the type being constructed may be a
2244 // typedef of an array type.
2245 // Dependent case will be handled separately.
2246 if (!ArraySize && !AllocType->isDependentType()) {
2247 if (const ConstantArrayType *Array
2248 = Context.getAsConstantArrayType(T: AllocType)) {
2249 ArraySize = IntegerLiteral::Create(C: Context, V: Array->getSize(),
2250 type: Context.getSizeType(),
2251 l: TypeRange.getEnd());
2252 AllocType = Array->getElementType();
2253 }
2254 }
2255
2256 if (CheckAllocatedType(AllocType, Loc: TypeRange.getBegin(), R: TypeRange))
2257 return ExprError();
2258
2259 if (ArraySize && !checkArrayElementAlignment(EltTy: AllocType, Loc: TypeRange.getBegin()))
2260 return ExprError();
2261
2262 // In ARC, infer 'retaining' for the allocated
2263 if (getLangOpts().ObjCAutoRefCount &&
2264 AllocType.getObjCLifetime() == Qualifiers::OCL_None &&
2265 AllocType->isObjCLifetimeType()) {
2266 AllocType = Context.getLifetimeQualifiedType(type: AllocType,
2267 lifetime: AllocType->getObjCARCImplicitLifetime());
2268 }
2269
2270 QualType ResultType = Context.getPointerType(T: AllocType);
2271
2272 if (ArraySize && *ArraySize &&
2273 (*ArraySize)->getType()->isNonOverloadPlaceholderType()) {
2274 ExprResult result = CheckPlaceholderExpr(E: *ArraySize);
2275 if (result.isInvalid()) return ExprError();
2276 ArraySize = result.get();
2277 }
2278 // C++98 5.3.4p6: "The expression in a direct-new-declarator shall have
2279 // integral or enumeration type with a non-negative value."
2280 // C++11 [expr.new]p6: The expression [...] shall be of integral or unscoped
2281 // enumeration type, or a class type for which a single non-explicit
2282 // conversion function to integral or unscoped enumeration type exists.
2283 // C++1y [expr.new]p6: The expression [...] is implicitly converted to
2284 // std::size_t.
2285 std::optional<uint64_t> KnownArraySize;
2286 if (ArraySize && *ArraySize && !(*ArraySize)->isTypeDependent()) {
2287 ExprResult ConvertedSize;
2288 if (getLangOpts().CPlusPlus14) {
2289 assert(Context.getTargetInfo().getIntWidth() && "Builtin type of size 0?");
2290
2291 ConvertedSize = PerformImplicitConversion(
2292 From: *ArraySize, ToType: Context.getSizeType(), Action: AssignmentAction::Converting);
2293
2294 if (!ConvertedSize.isInvalid() &&
2295 (*ArraySize)->getType()->getAs<RecordType>())
2296 // Diagnose the compatibility of this conversion.
2297 Diag(Loc: StartLoc, DiagID: diag::warn_cxx98_compat_array_size_conversion)
2298 << (*ArraySize)->getType() << 0 << "'size_t'";
2299 } else {
2300 class SizeConvertDiagnoser : public ICEConvertDiagnoser {
2301 protected:
2302 Expr *ArraySize;
2303
2304 public:
2305 SizeConvertDiagnoser(Expr *ArraySize)
2306 : ICEConvertDiagnoser(/*AllowScopedEnumerations*/false, false, false),
2307 ArraySize(ArraySize) {}
2308
2309 SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc,
2310 QualType T) override {
2311 return S.Diag(Loc, DiagID: diag::err_array_size_not_integral)
2312 << S.getLangOpts().CPlusPlus11 << T;
2313 }
2314
2315 SemaDiagnosticBuilder diagnoseIncomplete(
2316 Sema &S, SourceLocation Loc, QualType T) override {
2317 return S.Diag(Loc, DiagID: diag::err_array_size_incomplete_type)
2318 << T << ArraySize->getSourceRange();
2319 }
2320
2321 SemaDiagnosticBuilder diagnoseExplicitConv(
2322 Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override {
2323 return S.Diag(Loc, DiagID: diag::err_array_size_explicit_conversion) << T << ConvTy;
2324 }
2325
2326 SemaDiagnosticBuilder noteExplicitConv(
2327 Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
2328 return S.Diag(Loc: Conv->getLocation(), DiagID: diag::note_array_size_conversion)
2329 << ConvTy->isEnumeralType() << ConvTy;
2330 }
2331
2332 SemaDiagnosticBuilder diagnoseAmbiguous(
2333 Sema &S, SourceLocation Loc, QualType T) override {
2334 return S.Diag(Loc, DiagID: diag::err_array_size_ambiguous_conversion) << T;
2335 }
2336
2337 SemaDiagnosticBuilder noteAmbiguous(
2338 Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
2339 return S.Diag(Loc: Conv->getLocation(), DiagID: diag::note_array_size_conversion)
2340 << ConvTy->isEnumeralType() << ConvTy;
2341 }
2342
2343 SemaDiagnosticBuilder diagnoseConversion(Sema &S, SourceLocation Loc,
2344 QualType T,
2345 QualType ConvTy) override {
2346 return S.Diag(Loc,
2347 DiagID: S.getLangOpts().CPlusPlus11
2348 ? diag::warn_cxx98_compat_array_size_conversion
2349 : diag::ext_array_size_conversion)
2350 << T << ConvTy->isEnumeralType() << ConvTy;
2351 }
2352 } SizeDiagnoser(*ArraySize);
2353
2354 ConvertedSize = PerformContextualImplicitConversion(Loc: StartLoc, FromE: *ArraySize,
2355 Converter&: SizeDiagnoser);
2356 }
2357 if (ConvertedSize.isInvalid())
2358 return ExprError();
2359
2360 ArraySize = ConvertedSize.get();
2361 QualType SizeType = (*ArraySize)->getType();
2362
2363 if (!SizeType->isIntegralOrUnscopedEnumerationType())
2364 return ExprError();
2365
2366 // C++98 [expr.new]p7:
2367 // The expression in a direct-new-declarator shall have integral type
2368 // with a non-negative value.
2369 //
2370 // Let's see if this is a constant < 0. If so, we reject it out of hand,
2371 // per CWG1464. Otherwise, if it's not a constant, we must have an
2372 // unparenthesized array type.
2373
2374 // We've already performed any required implicit conversion to integer or
2375 // unscoped enumeration type.
2376 // FIXME: Per CWG1464, we are required to check the value prior to
2377 // converting to size_t. This will never find a negative array size in
2378 // C++14 onwards, because Value is always unsigned here!
2379 if (std::optional<llvm::APSInt> Value =
2380 (*ArraySize)->getIntegerConstantExpr(Ctx: Context)) {
2381 if (Value->isSigned() && Value->isNegative()) {
2382 return ExprError(Diag(Loc: (*ArraySize)->getBeginLoc(),
2383 DiagID: diag::err_typecheck_negative_array_size)
2384 << (*ArraySize)->getSourceRange());
2385 }
2386
2387 if (!AllocType->isDependentType()) {
2388 unsigned ActiveSizeBits =
2389 ConstantArrayType::getNumAddressingBits(Context, ElementType: AllocType, NumElements: *Value);
2390 if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context))
2391 return ExprError(
2392 Diag(Loc: (*ArraySize)->getBeginLoc(), DiagID: diag::err_array_too_large)
2393 << toString(I: *Value, Radix: 10) << (*ArraySize)->getSourceRange());
2394 }
2395
2396 KnownArraySize = Value->getZExtValue();
2397 } else if (TypeIdParens.isValid()) {
2398 // Can't have dynamic array size when the type-id is in parentheses.
2399 Diag(Loc: (*ArraySize)->getBeginLoc(), DiagID: diag::ext_new_paren_array_nonconst)
2400 << (*ArraySize)->getSourceRange()
2401 << FixItHint::CreateRemoval(RemoveRange: TypeIdParens.getBegin())
2402 << FixItHint::CreateRemoval(RemoveRange: TypeIdParens.getEnd());
2403
2404 TypeIdParens = SourceRange();
2405 }
2406
2407 // Note that we do *not* convert the argument in any way. It can
2408 // be signed, larger than size_t, whatever.
2409 }
2410
2411 FunctionDecl *OperatorNew = nullptr;
2412 FunctionDecl *OperatorDelete = nullptr;
2413 unsigned Alignment =
2414 AllocType->isDependentType() ? 0 : Context.getTypeAlign(T: AllocType);
2415 unsigned NewAlignment = Context.getTargetInfo().getNewAlign();
2416 ImplicitAllocationParameters IAP = {
2417 AllocType, ShouldUseTypeAwareOperatorNewOrDelete(),
2418 alignedAllocationModeFromBool(IsAligned: getLangOpts().AlignedAllocation &&
2419 Alignment > NewAlignment)};
2420
2421 if (CheckArgsForPlaceholders(args: PlacementArgs))
2422 return ExprError();
2423
2424 AllocationFunctionScope Scope = UseGlobal ? AllocationFunctionScope::Global
2425 : AllocationFunctionScope::Both;
2426 SourceRange AllocationParameterRange = Range;
2427 if (PlacementLParen.isValid() && PlacementRParen.isValid())
2428 AllocationParameterRange = SourceRange(PlacementLParen, PlacementRParen);
2429 if (!AllocType->isDependentType() &&
2430 !Expr::hasAnyTypeDependentArguments(Exprs: PlacementArgs) &&
2431 FindAllocationFunctions(StartLoc, Range: AllocationParameterRange, NewScope: Scope, DeleteScope: Scope,
2432 AllocType, IsArray: ArraySize.has_value(), IAP,
2433 PlaceArgs: PlacementArgs, OperatorNew, OperatorDelete))
2434 return ExprError();
2435
2436 // If this is an array allocation, compute whether the usual array
2437 // deallocation function for the type has a size_t parameter.
2438 bool UsualArrayDeleteWantsSize = false;
2439 if (ArraySize && !AllocType->isDependentType())
2440 UsualArrayDeleteWantsSize = doesUsualArrayDeleteWantSize(
2441 S&: *this, loc: StartLoc, PassType: IAP.PassTypeIdentity, allocType: AllocType);
2442
2443 SmallVector<Expr *, 8> AllPlaceArgs;
2444 if (OperatorNew) {
2445 auto *Proto = OperatorNew->getType()->castAs<FunctionProtoType>();
2446 VariadicCallType CallType = Proto->isVariadic()
2447 ? VariadicCallType::Function
2448 : VariadicCallType::DoesNotApply;
2449
2450 // We've already converted the placement args, just fill in any default
2451 // arguments. Skip the first parameter because we don't have a corresponding
2452 // argument. Skip the second parameter too if we're passing in the
2453 // alignment; we've already filled it in.
2454 unsigned NumImplicitArgs = 1;
2455 if (isTypeAwareAllocation(Mode: IAP.PassTypeIdentity)) {
2456 assert(OperatorNew->isTypeAwareOperatorNewOrDelete());
2457 NumImplicitArgs++;
2458 }
2459 if (isAlignedAllocation(Mode: IAP.PassAlignment))
2460 NumImplicitArgs++;
2461 if (GatherArgumentsForCall(CallLoc: AllocationParameterRange.getBegin(), FDecl: OperatorNew,
2462 Proto, FirstParam: NumImplicitArgs, Args: PlacementArgs,
2463 AllArgs&: AllPlaceArgs, CallType))
2464 return ExprError();
2465
2466 if (!AllPlaceArgs.empty())
2467 PlacementArgs = AllPlaceArgs;
2468
2469 // We would like to perform some checking on the given `operator new` call,
2470 // but the PlacementArgs does not contain the implicit arguments,
2471 // namely allocation size and maybe allocation alignment,
2472 // so we need to conjure them.
2473
2474 QualType SizeTy = Context.getSizeType();
2475 unsigned SizeTyWidth = Context.getTypeSize(T: SizeTy);
2476
2477 llvm::APInt SingleEltSize(
2478 SizeTyWidth, Context.getTypeSizeInChars(T: AllocType).getQuantity());
2479
2480 // How many bytes do we want to allocate here?
2481 std::optional<llvm::APInt> AllocationSize;
2482 if (!ArraySize && !AllocType->isDependentType()) {
2483 // For non-array operator new, we only want to allocate one element.
2484 AllocationSize = SingleEltSize;
2485 } else if (KnownArraySize && !AllocType->isDependentType()) {
2486 // For array operator new, only deal with static array size case.
2487 bool Overflow;
2488 AllocationSize = llvm::APInt(SizeTyWidth, *KnownArraySize)
2489 .umul_ov(RHS: SingleEltSize, Overflow);
2490 (void)Overflow;
2491 assert(
2492 !Overflow &&
2493 "Expected that all the overflows would have been handled already.");
2494 }
2495
2496 IntegerLiteral AllocationSizeLiteral(
2497 Context, AllocationSize.value_or(u: llvm::APInt::getZero(numBits: SizeTyWidth)),
2498 SizeTy, StartLoc);
2499 // Otherwise, if we failed to constant-fold the allocation size, we'll
2500 // just give up and pass-in something opaque, that isn't a null pointer.
2501 OpaqueValueExpr OpaqueAllocationSize(StartLoc, SizeTy, VK_PRValue,
2502 OK_Ordinary, /*SourceExpr=*/nullptr);
2503
2504 // Let's synthesize the alignment argument in case we will need it.
2505 // Since we *really* want to allocate these on stack, this is slightly ugly
2506 // because there might not be a `std::align_val_t` type.
2507 EnumDecl *StdAlignValT = getStdAlignValT();
2508 QualType AlignValT =
2509 StdAlignValT ? Context.getTypeDeclType(Decl: StdAlignValT) : SizeTy;
2510 IntegerLiteral AlignmentLiteral(
2511 Context,
2512 llvm::APInt(Context.getTypeSize(T: SizeTy),
2513 Alignment / Context.getCharWidth()),
2514 SizeTy, StartLoc);
2515 ImplicitCastExpr DesiredAlignment(ImplicitCastExpr::OnStack, AlignValT,
2516 CK_IntegralCast, &AlignmentLiteral,
2517 VK_PRValue, FPOptionsOverride());
2518
2519 // Adjust placement args by prepending conjured size and alignment exprs.
2520 llvm::SmallVector<Expr *, 8> CallArgs;
2521 CallArgs.reserve(N: NumImplicitArgs + PlacementArgs.size());
2522 CallArgs.emplace_back(Args: AllocationSize
2523 ? static_cast<Expr *>(&AllocationSizeLiteral)
2524 : &OpaqueAllocationSize);
2525 if (isAlignedAllocation(Mode: IAP.PassAlignment))
2526 CallArgs.emplace_back(Args: &DesiredAlignment);
2527 llvm::append_range(C&: CallArgs, R&: PlacementArgs);
2528
2529 DiagnoseSentinelCalls(D: OperatorNew, Loc: PlacementLParen, Args: CallArgs);
2530
2531 checkCall(FDecl: OperatorNew, Proto, /*ThisArg=*/nullptr, Args: CallArgs,
2532 /*IsMemberFunction=*/false, Loc: StartLoc, Range, CallType);
2533
2534 // Warn if the type is over-aligned and is being allocated by (unaligned)
2535 // global operator new.
2536 if (PlacementArgs.empty() && !isAlignedAllocation(Mode: IAP.PassAlignment) &&
2537 (OperatorNew->isImplicit() ||
2538 (OperatorNew->getBeginLoc().isValid() &&
2539 getSourceManager().isInSystemHeader(Loc: OperatorNew->getBeginLoc())))) {
2540 if (Alignment > NewAlignment)
2541 Diag(Loc: StartLoc, DiagID: diag::warn_overaligned_type)
2542 << AllocType
2543 << unsigned(Alignment / Context.getCharWidth())
2544 << unsigned(NewAlignment / Context.getCharWidth());
2545 }
2546 }
2547
2548 // Array 'new' can't have any initializers except empty parentheses.
2549 // Initializer lists are also allowed, in C++11. Rely on the parser for the
2550 // dialect distinction.
2551 if (ArraySize && !isLegalArrayNewInitializer(Style: InitStyle, Init: Initializer,
2552 IsCPlusPlus20: getLangOpts().CPlusPlus20)) {
2553 SourceRange InitRange(Exprs.front()->getBeginLoc(),
2554 Exprs.back()->getEndLoc());
2555 Diag(Loc: StartLoc, DiagID: diag::err_new_array_init_args) << InitRange;
2556 return ExprError();
2557 }
2558
2559 // If we can perform the initialization, and we've not already done so,
2560 // do it now.
2561 if (!AllocType->isDependentType() &&
2562 !Expr::hasAnyTypeDependentArguments(Exprs)) {
2563 // The type we initialize is the complete type, including the array bound.
2564 QualType InitType;
2565 if (KnownArraySize)
2566 InitType = Context.getConstantArrayType(
2567 EltTy: AllocType,
2568 ArySize: llvm::APInt(Context.getTypeSize(T: Context.getSizeType()),
2569 *KnownArraySize),
2570 SizeExpr: *ArraySize, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
2571 else if (ArraySize)
2572 InitType = Context.getIncompleteArrayType(EltTy: AllocType,
2573 ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
2574 else
2575 InitType = AllocType;
2576
2577 InitializedEntity Entity
2578 = InitializedEntity::InitializeNew(NewLoc: StartLoc, Type: InitType);
2579 InitializationSequence InitSeq(*this, Entity, Kind, Exprs);
2580 ExprResult FullInit = InitSeq.Perform(S&: *this, Entity, Kind, Args: Exprs);
2581 if (FullInit.isInvalid())
2582 return ExprError();
2583
2584 // FullInit is our initializer; strip off CXXBindTemporaryExprs, because
2585 // we don't want the initialized object to be destructed.
2586 // FIXME: We should not create these in the first place.
2587 if (CXXBindTemporaryExpr *Binder =
2588 dyn_cast_or_null<CXXBindTemporaryExpr>(Val: FullInit.get()))
2589 FullInit = Binder->getSubExpr();
2590
2591 Initializer = FullInit.get();
2592
2593 // FIXME: If we have a KnownArraySize, check that the array bound of the
2594 // initializer is no greater than that constant value.
2595
2596 if (ArraySize && !*ArraySize) {
2597 auto *CAT = Context.getAsConstantArrayType(T: Initializer->getType());
2598 if (CAT) {
2599 // FIXME: Track that the array size was inferred rather than explicitly
2600 // specified.
2601 ArraySize = IntegerLiteral::Create(
2602 C: Context, V: CAT->getSize(), type: Context.getSizeType(), l: TypeRange.getEnd());
2603 } else {
2604 Diag(Loc: TypeRange.getEnd(), DiagID: diag::err_new_array_size_unknown_from_init)
2605 << Initializer->getSourceRange();
2606 }
2607 }
2608 }
2609
2610 // Mark the new and delete operators as referenced.
2611 if (OperatorNew) {
2612 if (DiagnoseUseOfDecl(D: OperatorNew, Locs: StartLoc))
2613 return ExprError();
2614 MarkFunctionReferenced(Loc: StartLoc, Func: OperatorNew);
2615 }
2616 if (OperatorDelete) {
2617 if (DiagnoseUseOfDecl(D: OperatorDelete, Locs: StartLoc))
2618 return ExprError();
2619 MarkFunctionReferenced(Loc: StartLoc, Func: OperatorDelete);
2620 }
2621
2622 return CXXNewExpr::Create(Ctx: Context, IsGlobalNew: UseGlobal, OperatorNew, OperatorDelete,
2623 IAP, UsualArrayDeleteWantsSize, PlacementArgs,
2624 TypeIdParens, ArraySize, InitializationStyle: InitStyle, Initializer,
2625 Ty: ResultType, AllocatedTypeInfo: AllocTypeInfo, Range, DirectInitRange);
2626}
2627
2628bool Sema::CheckAllocatedType(QualType AllocType, SourceLocation Loc,
2629 SourceRange R) {
2630 // C++ 5.3.4p1: "[The] type shall be a complete object type, but not an
2631 // abstract class type or array thereof.
2632 if (AllocType->isFunctionType())
2633 return Diag(Loc, DiagID: diag::err_bad_new_type)
2634 << AllocType << 0 << R;
2635 else if (AllocType->isReferenceType())
2636 return Diag(Loc, DiagID: diag::err_bad_new_type)
2637 << AllocType << 1 << R;
2638 else if (!AllocType->isDependentType() &&
2639 RequireCompleteSizedType(
2640 Loc, T: AllocType, DiagID: diag::err_new_incomplete_or_sizeless_type, Args: R))
2641 return true;
2642 else if (RequireNonAbstractType(Loc, T: AllocType,
2643 DiagID: diag::err_allocation_of_abstract_type))
2644 return true;
2645 else if (AllocType->isVariablyModifiedType())
2646 return Diag(Loc, DiagID: diag::err_variably_modified_new_type)
2647 << AllocType;
2648 else if (AllocType.getAddressSpace() != LangAS::Default &&
2649 !getLangOpts().OpenCLCPlusPlus)
2650 return Diag(Loc, DiagID: diag::err_address_space_qualified_new)
2651 << AllocType.getUnqualifiedType()
2652 << AllocType.getQualifiers().getAddressSpaceAttributePrintValue();
2653 else if (getLangOpts().ObjCAutoRefCount) {
2654 if (const ArrayType *AT = Context.getAsArrayType(T: AllocType)) {
2655 QualType BaseAllocType = Context.getBaseElementType(VAT: AT);
2656 if (BaseAllocType.getObjCLifetime() == Qualifiers::OCL_None &&
2657 BaseAllocType->isObjCLifetimeType())
2658 return Diag(Loc, DiagID: diag::err_arc_new_array_without_ownership)
2659 << BaseAllocType;
2660 }
2661 }
2662
2663 return false;
2664}
2665
2666enum class ResolveMode { Typed, Untyped };
2667static bool resolveAllocationOverloadInterior(
2668 Sema &S, LookupResult &R, SourceRange Range, ResolveMode Mode,
2669 SmallVectorImpl<Expr *> &Args, AlignedAllocationMode &PassAlignment,
2670 FunctionDecl *&Operator, OverloadCandidateSet *AlignedCandidates,
2671 Expr *AlignArg, bool Diagnose) {
2672 unsigned NonTypeArgumentOffset = 0;
2673 if (Mode == ResolveMode::Typed) {
2674 ++NonTypeArgumentOffset;
2675 }
2676
2677 OverloadCandidateSet Candidates(R.getNameLoc(),
2678 OverloadCandidateSet::CSK_Normal);
2679 for (LookupResult::iterator Alloc = R.begin(), AllocEnd = R.end();
2680 Alloc != AllocEnd; ++Alloc) {
2681 // Even member operator new/delete are implicitly treated as
2682 // static, so don't use AddMemberCandidate.
2683 NamedDecl *D = (*Alloc)->getUnderlyingDecl();
2684 bool IsTypeAware = D->getAsFunction()->isTypeAwareOperatorNewOrDelete();
2685 if (IsTypeAware == (Mode != ResolveMode::Typed))
2686 continue;
2687
2688 if (FunctionTemplateDecl *FnTemplate = dyn_cast<FunctionTemplateDecl>(Val: D)) {
2689 S.AddTemplateOverloadCandidate(FunctionTemplate: FnTemplate, FoundDecl: Alloc.getPair(),
2690 /*ExplicitTemplateArgs=*/nullptr, Args,
2691 CandidateSet&: Candidates,
2692 /*SuppressUserConversions=*/false);
2693 continue;
2694 }
2695
2696 FunctionDecl *Fn = cast<FunctionDecl>(Val: D);
2697 S.AddOverloadCandidate(Function: Fn, FoundDecl: Alloc.getPair(), Args, CandidateSet&: Candidates,
2698 /*SuppressUserConversions=*/false);
2699 }
2700
2701 // Do the resolution.
2702 OverloadCandidateSet::iterator Best;
2703 switch (Candidates.BestViableFunction(S, Loc: R.getNameLoc(), Best)) {
2704 case OR_Success: {
2705 // Got one!
2706 FunctionDecl *FnDecl = Best->Function;
2707 if (S.CheckAllocationAccess(OperatorLoc: R.getNameLoc(), PlacementRange: Range, NamingClass: R.getNamingClass(),
2708 FoundDecl: Best->FoundDecl) == Sema::AR_inaccessible)
2709 return true;
2710
2711 Operator = FnDecl;
2712 return false;
2713 }
2714
2715 case OR_No_Viable_Function:
2716 // C++17 [expr.new]p13:
2717 // If no matching function is found and the allocated object type has
2718 // new-extended alignment, the alignment argument is removed from the
2719 // argument list, and overload resolution is performed again.
2720 if (isAlignedAllocation(Mode: PassAlignment)) {
2721 PassAlignment = AlignedAllocationMode::No;
2722 AlignArg = Args[NonTypeArgumentOffset + 1];
2723 Args.erase(CI: Args.begin() + NonTypeArgumentOffset + 1);
2724 return resolveAllocationOverloadInterior(S, R, Range, Mode, Args,
2725 PassAlignment, Operator,
2726 AlignedCandidates: &Candidates, AlignArg, Diagnose);
2727 }
2728
2729 // MSVC will fall back on trying to find a matching global operator new
2730 // if operator new[] cannot be found. Also, MSVC will leak by not
2731 // generating a call to operator delete or operator delete[], but we
2732 // will not replicate that bug.
2733 // FIXME: Find out how this interacts with the std::align_val_t fallback
2734 // once MSVC implements it.
2735 if (R.getLookupName().getCXXOverloadedOperator() == OO_Array_New &&
2736 S.Context.getLangOpts().MSVCCompat && Mode != ResolveMode::Typed) {
2737 R.clear();
2738 R.setLookupName(S.Context.DeclarationNames.getCXXOperatorName(Op: OO_New));
2739 S.LookupQualifiedName(R, LookupCtx: S.Context.getTranslationUnitDecl());
2740 // FIXME: This will give bad diagnostics pointing at the wrong functions.
2741 return resolveAllocationOverloadInterior(S, R, Range, Mode, Args,
2742 PassAlignment, Operator,
2743 /*Candidates=*/AlignedCandidates: nullptr,
2744 /*AlignArg=*/nullptr, Diagnose);
2745 }
2746 if (Mode == ResolveMode::Typed) {
2747 // If we can't find a matching type aware operator we don't consider this
2748 // a failure.
2749 Operator = nullptr;
2750 return false;
2751 }
2752 if (Diagnose) {
2753 // If this is an allocation of the form 'new (p) X' for some object
2754 // pointer p (or an expression that will decay to such a pointer),
2755 // diagnose the missing inclusion of <new>.
2756 if (!R.isClassLookup() && Args.size() == 2 &&
2757 (Args[1]->getType()->isObjectPointerType() ||
2758 Args[1]->getType()->isArrayType())) {
2759 S.Diag(Loc: R.getNameLoc(), DiagID: diag::err_need_header_before_placement_new)
2760 << R.getLookupName() << Range;
2761 // Listing the candidates is unlikely to be useful; skip it.
2762 return true;
2763 }
2764
2765 // Finish checking all candidates before we note any. This checking can
2766 // produce additional diagnostics so can't be interleaved with our
2767 // emission of notes.
2768 //
2769 // For an aligned allocation, separately check the aligned and unaligned
2770 // candidates with their respective argument lists.
2771 SmallVector<OverloadCandidate*, 32> Cands;
2772 SmallVector<OverloadCandidate*, 32> AlignedCands;
2773 llvm::SmallVector<Expr*, 4> AlignedArgs;
2774 if (AlignedCandidates) {
2775 auto IsAligned = [NonTypeArgumentOffset](OverloadCandidate &C) {
2776 auto AlignArgOffset = NonTypeArgumentOffset + 1;
2777 return C.Function->getNumParams() > AlignArgOffset &&
2778 C.Function->getParamDecl(i: AlignArgOffset)
2779 ->getType()
2780 ->isAlignValT();
2781 };
2782 auto IsUnaligned = [&](OverloadCandidate &C) { return !IsAligned(C); };
2783
2784 AlignedArgs.reserve(N: Args.size() + NonTypeArgumentOffset + 1);
2785 for (unsigned Idx = 0; Idx < NonTypeArgumentOffset + 1; ++Idx)
2786 AlignedArgs.push_back(Elt: Args[Idx]);
2787 AlignedArgs.push_back(Elt: AlignArg);
2788 AlignedArgs.append(in_start: Args.begin() + NonTypeArgumentOffset + 1,
2789 in_end: Args.end());
2790 AlignedCands = AlignedCandidates->CompleteCandidates(
2791 S, OCD: OCD_AllCandidates, Args: AlignedArgs, OpLoc: R.getNameLoc(), Filter: IsAligned);
2792
2793 Cands = Candidates.CompleteCandidates(S, OCD: OCD_AllCandidates, Args,
2794 OpLoc: R.getNameLoc(), Filter: IsUnaligned);
2795 } else {
2796 Cands = Candidates.CompleteCandidates(S, OCD: OCD_AllCandidates, Args,
2797 OpLoc: R.getNameLoc());
2798 }
2799
2800 S.Diag(Loc: R.getNameLoc(), DiagID: diag::err_ovl_no_viable_function_in_call)
2801 << R.getLookupName() << Range;
2802 if (AlignedCandidates)
2803 AlignedCandidates->NoteCandidates(S, Args: AlignedArgs, Cands: AlignedCands, Opc: "",
2804 OpLoc: R.getNameLoc());
2805 Candidates.NoteCandidates(S, Args, Cands, Opc: "", OpLoc: R.getNameLoc());
2806 }
2807 return true;
2808
2809 case OR_Ambiguous:
2810 if (Diagnose) {
2811 Candidates.NoteCandidates(
2812 PA: PartialDiagnosticAt(R.getNameLoc(),
2813 S.PDiag(DiagID: diag::err_ovl_ambiguous_call)
2814 << R.getLookupName() << Range),
2815 S, OCD: OCD_AmbiguousCandidates, Args);
2816 }
2817 return true;
2818
2819 case OR_Deleted: {
2820 if (Diagnose)
2821 S.DiagnoseUseOfDeletedFunction(Loc: R.getNameLoc(), Range, Name: R.getLookupName(),
2822 CandidateSet&: Candidates, Fn: Best->Function, Args);
2823 return true;
2824 }
2825 }
2826 llvm_unreachable("Unreachable, bad result from BestViableFunction");
2827}
2828
2829enum class DeallocLookupMode { Untyped, OptionallyTyped };
2830
2831static void LookupGlobalDeallocationFunctions(Sema &S, SourceLocation Loc,
2832 LookupResult &FoundDelete,
2833 DeallocLookupMode Mode,
2834 DeclarationName Name) {
2835 S.LookupQualifiedName(R&: FoundDelete, LookupCtx: S.Context.getTranslationUnitDecl());
2836 if (Mode != DeallocLookupMode::OptionallyTyped) {
2837 // We're going to remove either the typed or the non-typed
2838 bool RemoveTypedDecl = Mode == DeallocLookupMode::Untyped;
2839 LookupResult::Filter Filter = FoundDelete.makeFilter();
2840 while (Filter.hasNext()) {
2841 FunctionDecl *FD = Filter.next()->getUnderlyingDecl()->getAsFunction();
2842 if (FD->isTypeAwareOperatorNewOrDelete() == RemoveTypedDecl)
2843 Filter.erase();
2844 }
2845 Filter.done();
2846 }
2847}
2848
2849static bool resolveAllocationOverload(
2850 Sema &S, LookupResult &R, SourceRange Range, SmallVectorImpl<Expr *> &Args,
2851 ImplicitAllocationParameters &IAP, FunctionDecl *&Operator,
2852 OverloadCandidateSet *AlignedCandidates, Expr *AlignArg, bool Diagnose) {
2853 Operator = nullptr;
2854 if (isTypeAwareAllocation(Mode: IAP.PassTypeIdentity)) {
2855 assert(S.isStdTypeIdentity(Args[0]->getType(), nullptr));
2856 // The internal overload resolution work mutates the argument list
2857 // in accordance with the spec. We may want to change that in future,
2858 // but for now we deal with this by making a copy of the non-type-identity
2859 // arguments.
2860 SmallVector<Expr *> UntypedParameters;
2861 UntypedParameters.reserve(N: Args.size() - 1);
2862 UntypedParameters.push_back(Elt: Args[1]);
2863 // Type aware allocation implicitly includes the alignment parameter so
2864 // only include it in the untyped parameter list if alignment was explicitly
2865 // requested
2866 if (isAlignedAllocation(Mode: IAP.PassAlignment))
2867 UntypedParameters.push_back(Elt: Args[2]);
2868 UntypedParameters.append(in_start: Args.begin() + 3, in_end: Args.end());
2869
2870 AlignedAllocationMode InitialAlignmentMode = IAP.PassAlignment;
2871 IAP.PassAlignment = AlignedAllocationMode::Yes;
2872 if (resolveAllocationOverloadInterior(
2873 S, R, Range, Mode: ResolveMode::Typed, Args, PassAlignment&: IAP.PassAlignment, Operator,
2874 AlignedCandidates, AlignArg, Diagnose))
2875 return true;
2876 if (Operator)
2877 return false;
2878
2879 // If we got to this point we could not find a matching typed operator
2880 // so we update the IAP flags, and revert to our stored copy of the
2881 // type-identity-less argument list.
2882 IAP.PassTypeIdentity = TypeAwareAllocationMode::No;
2883 IAP.PassAlignment = InitialAlignmentMode;
2884 Args = std::move(UntypedParameters);
2885 }
2886 assert(!S.isStdTypeIdentity(Args[0]->getType(), nullptr));
2887 return resolveAllocationOverloadInterior(
2888 S, R, Range, Mode: ResolveMode::Untyped, Args, PassAlignment&: IAP.PassAlignment, Operator,
2889 AlignedCandidates, AlignArg, Diagnose);
2890}
2891
2892bool Sema::FindAllocationFunctions(
2893 SourceLocation StartLoc, SourceRange Range,
2894 AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope,
2895 QualType AllocType, bool IsArray, ImplicitAllocationParameters &IAP,
2896 MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew,
2897 FunctionDecl *&OperatorDelete, bool Diagnose) {
2898 // --- Choosing an allocation function ---
2899 // C++ 5.3.4p8 - 14 & 18
2900 // 1) If looking in AllocationFunctionScope::Global scope for allocation
2901 // functions, only look in
2902 // the global scope. Else, if AllocationFunctionScope::Class, only look in
2903 // the scope of the allocated class. If AllocationFunctionScope::Both, look
2904 // in both.
2905 // 2) If an array size is given, look for operator new[], else look for
2906 // operator new.
2907 // 3) The first argument is always size_t. Append the arguments from the
2908 // placement form.
2909
2910 SmallVector<Expr*, 8> AllocArgs;
2911 AllocArgs.reserve(N: IAP.getNumImplicitArgs() + PlaceArgs.size());
2912
2913 // C++ [expr.new]p8:
2914 // If the allocated type is a non-array type, the allocation
2915 // function's name is operator new and the deallocation function's
2916 // name is operator delete. If the allocated type is an array
2917 // type, the allocation function's name is operator new[] and the
2918 // deallocation function's name is operator delete[].
2919 DeclarationName NewName = Context.DeclarationNames.getCXXOperatorName(
2920 Op: IsArray ? OO_Array_New : OO_New);
2921
2922 QualType AllocElemType = Context.getBaseElementType(QT: AllocType);
2923
2924 // We don't care about the actual value of these arguments.
2925 // FIXME: Should the Sema create the expression and embed it in the syntax
2926 // tree? Or should the consumer just recalculate the value?
2927 // FIXME: Using a dummy value will interact poorly with attribute enable_if.
2928
2929 // We use size_t as a stand in so that we can construct the init
2930 // expr on the stack
2931 QualType TypeIdentity = Context.getSizeType();
2932 if (isTypeAwareAllocation(Mode: IAP.PassTypeIdentity)) {
2933 QualType SpecializedTypeIdentity =
2934 tryBuildStdTypeIdentity(Type: IAP.Type, Loc: StartLoc);
2935 if (!SpecializedTypeIdentity.isNull()) {
2936 TypeIdentity = SpecializedTypeIdentity;
2937 if (RequireCompleteType(Loc: StartLoc, T: TypeIdentity,
2938 DiagID: diag::err_incomplete_type))
2939 return true;
2940 } else
2941 IAP.PassTypeIdentity = TypeAwareAllocationMode::No;
2942 }
2943 TypeAwareAllocationMode OriginalTypeAwareState = IAP.PassTypeIdentity;
2944
2945 CXXScalarValueInitExpr TypeIdentityParam(TypeIdentity, nullptr, StartLoc);
2946 if (isTypeAwareAllocation(Mode: IAP.PassTypeIdentity))
2947 AllocArgs.push_back(Elt: &TypeIdentityParam);
2948
2949 QualType SizeTy = Context.getSizeType();
2950 unsigned SizeTyWidth = Context.getTypeSize(T: SizeTy);
2951 IntegerLiteral Size(Context, llvm::APInt::getZero(numBits: SizeTyWidth), SizeTy,
2952 SourceLocation());
2953 AllocArgs.push_back(Elt: &Size);
2954
2955 QualType AlignValT = Context.VoidTy;
2956 bool IncludeAlignParam = isAlignedAllocation(Mode: IAP.PassAlignment) ||
2957 isTypeAwareAllocation(Mode: IAP.PassTypeIdentity);
2958 if (IncludeAlignParam) {
2959 DeclareGlobalNewDelete();
2960 AlignValT = Context.getTypeDeclType(Decl: getStdAlignValT());
2961 }
2962 CXXScalarValueInitExpr Align(AlignValT, nullptr, SourceLocation());
2963 if (IncludeAlignParam)
2964 AllocArgs.push_back(Elt: &Align);
2965
2966 llvm::append_range(C&: AllocArgs, R&: PlaceArgs);
2967
2968 // Find the allocation function.
2969 {
2970 LookupResult R(*this, NewName, StartLoc, LookupOrdinaryName);
2971
2972 // C++1z [expr.new]p9:
2973 // If the new-expression begins with a unary :: operator, the allocation
2974 // function's name is looked up in the global scope. Otherwise, if the
2975 // allocated type is a class type T or array thereof, the allocation
2976 // function's name is looked up in the scope of T.
2977 if (AllocElemType->isRecordType() &&
2978 NewScope != AllocationFunctionScope::Global)
2979 LookupQualifiedName(R, LookupCtx: AllocElemType->getAsCXXRecordDecl());
2980
2981 // We can see ambiguity here if the allocation function is found in
2982 // multiple base classes.
2983 if (R.isAmbiguous())
2984 return true;
2985
2986 // If this lookup fails to find the name, or if the allocated type is not
2987 // a class type, the allocation function's name is looked up in the
2988 // global scope.
2989 if (R.empty()) {
2990 if (NewScope == AllocationFunctionScope::Class)
2991 return true;
2992
2993 LookupQualifiedName(R, LookupCtx: Context.getTranslationUnitDecl());
2994 }
2995
2996 if (getLangOpts().OpenCLCPlusPlus && R.empty()) {
2997 if (PlaceArgs.empty()) {
2998 Diag(Loc: StartLoc, DiagID: diag::err_openclcxx_not_supported) << "default new";
2999 } else {
3000 Diag(Loc: StartLoc, DiagID: diag::err_openclcxx_placement_new);
3001 }
3002 return true;
3003 }
3004
3005 assert(!R.empty() && "implicitly declared allocation functions not found");
3006 assert(!R.isAmbiguous() && "global allocation functions are ambiguous");
3007
3008 // We do our own custom access checks below.
3009 R.suppressDiagnostics();
3010
3011 if (resolveAllocationOverload(S&: *this, R, Range, Args&: AllocArgs, IAP, Operator&: OperatorNew,
3012 /*Candidates=*/AlignedCandidates: nullptr,
3013 /*AlignArg=*/nullptr, Diagnose))
3014 return true;
3015 }
3016
3017 // We don't need an operator delete if we're running under -fno-exceptions.
3018 if (!getLangOpts().Exceptions) {
3019 OperatorDelete = nullptr;
3020 return false;
3021 }
3022
3023 // Note, the name of OperatorNew might have been changed from array to
3024 // non-array by resolveAllocationOverload.
3025 DeclarationName DeleteName = Context.DeclarationNames.getCXXOperatorName(
3026 Op: OperatorNew->getDeclName().getCXXOverloadedOperator() == OO_Array_New
3027 ? OO_Array_Delete
3028 : OO_Delete);
3029
3030 // C++ [expr.new]p19:
3031 //
3032 // If the new-expression begins with a unary :: operator, the
3033 // deallocation function's name is looked up in the global
3034 // scope. Otherwise, if the allocated type is a class type T or an
3035 // array thereof, the deallocation function's name is looked up in
3036 // the scope of T. If this lookup fails to find the name, or if
3037 // the allocated type is not a class type or array thereof, the
3038 // deallocation function's name is looked up in the global scope.
3039 LookupResult FoundDelete(*this, DeleteName, StartLoc, LookupOrdinaryName);
3040 if (AllocElemType->isRecordType() &&
3041 DeleteScope != AllocationFunctionScope::Global) {
3042 auto *RD =
3043 cast<CXXRecordDecl>(Val: AllocElemType->castAs<RecordType>()->getDecl());
3044 LookupQualifiedName(R&: FoundDelete, LookupCtx: RD);
3045 }
3046 if (FoundDelete.isAmbiguous())
3047 return true; // FIXME: clean up expressions?
3048
3049 // Filter out any destroying operator deletes. We can't possibly call such a
3050 // function in this context, because we're handling the case where the object
3051 // was not successfully constructed.
3052 // FIXME: This is not covered by the language rules yet.
3053 {
3054 LookupResult::Filter Filter = FoundDelete.makeFilter();
3055 while (Filter.hasNext()) {
3056 auto *FD = dyn_cast<FunctionDecl>(Val: Filter.next()->getUnderlyingDecl());
3057 if (FD && FD->isDestroyingOperatorDelete())
3058 Filter.erase();
3059 }
3060 Filter.done();
3061 }
3062
3063 auto GetRedeclContext = [](Decl *D) {
3064 return D->getDeclContext()->getRedeclContext();
3065 };
3066
3067 DeclContext *OperatorNewContext = GetRedeclContext(OperatorNew);
3068
3069 bool FoundGlobalDelete = FoundDelete.empty();
3070 bool IsClassScopedTypeAwareNew =
3071 isTypeAwareAllocation(Mode: IAP.PassTypeIdentity) &&
3072 OperatorNewContext->isRecord();
3073 auto DiagnoseMissingTypeAwareCleanupOperator = [&](bool IsPlacementOperator) {
3074 assert(isTypeAwareAllocation(IAP.PassTypeIdentity));
3075 if (Diagnose) {
3076 Diag(Loc: StartLoc, DiagID: diag::err_mismatching_type_aware_cleanup_deallocator)
3077 << OperatorNew->getDeclName() << IsPlacementOperator << DeleteName;
3078 Diag(Loc: OperatorNew->getLocation(), DiagID: diag::note_type_aware_operator_declared)
3079 << OperatorNew->isTypeAwareOperatorNewOrDelete()
3080 << OperatorNew->getDeclName() << OperatorNewContext;
3081 }
3082 };
3083 if (IsClassScopedTypeAwareNew && FoundDelete.empty()) {
3084 DiagnoseMissingTypeAwareCleanupOperator(/*isPlacementNew=*/false);
3085 return true;
3086 }
3087 if (FoundDelete.empty()) {
3088 FoundDelete.clear(Kind: LookupOrdinaryName);
3089
3090 if (DeleteScope == AllocationFunctionScope::Class)
3091 return true;
3092
3093 DeclareGlobalNewDelete();
3094 DeallocLookupMode LookupMode = isTypeAwareAllocation(Mode: OriginalTypeAwareState)
3095 ? DeallocLookupMode::OptionallyTyped
3096 : DeallocLookupMode::Untyped;
3097 LookupGlobalDeallocationFunctions(S&: *this, Loc: StartLoc, FoundDelete, Mode: LookupMode,
3098 Name: DeleteName);
3099 }
3100
3101 FoundDelete.suppressDiagnostics();
3102
3103 SmallVector<std::pair<DeclAccessPair,FunctionDecl*>, 2> Matches;
3104
3105 // Whether we're looking for a placement operator delete is dictated
3106 // by whether we selected a placement operator new, not by whether
3107 // we had explicit placement arguments. This matters for things like
3108 // struct A { void *operator new(size_t, int = 0); ... };
3109 // A *a = new A()
3110 //
3111 // We don't have any definition for what a "placement allocation function"
3112 // is, but we assume it's any allocation function whose
3113 // parameter-declaration-clause is anything other than (size_t).
3114 //
3115 // FIXME: Should (size_t, std::align_val_t) also be considered non-placement?
3116 // This affects whether an exception from the constructor of an overaligned
3117 // type uses the sized or non-sized form of aligned operator delete.
3118
3119 unsigned NonPlacementNewArgCount = 1; // size parameter
3120 if (isTypeAwareAllocation(Mode: IAP.PassTypeIdentity))
3121 NonPlacementNewArgCount =
3122 /* type-identity */ 1 + /* size */ 1 + /* alignment */ 1;
3123 bool isPlacementNew = !PlaceArgs.empty() ||
3124 OperatorNew->param_size() != NonPlacementNewArgCount ||
3125 OperatorNew->isVariadic();
3126
3127 if (isPlacementNew) {
3128 // C++ [expr.new]p20:
3129 // A declaration of a placement deallocation function matches the
3130 // declaration of a placement allocation function if it has the
3131 // same number of parameters and, after parameter transformations
3132 // (8.3.5), all parameter types except the first are
3133 // identical. [...]
3134 //
3135 // To perform this comparison, we compute the function type that
3136 // the deallocation function should have, and use that type both
3137 // for template argument deduction and for comparison purposes.
3138 QualType ExpectedFunctionType;
3139 {
3140 auto *Proto = OperatorNew->getType()->castAs<FunctionProtoType>();
3141
3142 SmallVector<QualType, 6> ArgTypes;
3143 int InitialParamOffset = 0;
3144 if (isTypeAwareAllocation(Mode: IAP.PassTypeIdentity)) {
3145 ArgTypes.push_back(Elt: TypeIdentity);
3146 InitialParamOffset = 1;
3147 }
3148 ArgTypes.push_back(Elt: Context.VoidPtrTy);
3149 for (unsigned I = ArgTypes.size() - InitialParamOffset,
3150 N = Proto->getNumParams();
3151 I < N; ++I)
3152 ArgTypes.push_back(Elt: Proto->getParamType(i: I));
3153
3154 FunctionProtoType::ExtProtoInfo EPI;
3155 // FIXME: This is not part of the standard's rule.
3156 EPI.Variadic = Proto->isVariadic();
3157
3158 ExpectedFunctionType
3159 = Context.getFunctionType(ResultTy: Context.VoidTy, Args: ArgTypes, EPI);
3160 }
3161
3162 for (LookupResult::iterator D = FoundDelete.begin(),
3163 DEnd = FoundDelete.end();
3164 D != DEnd; ++D) {
3165 FunctionDecl *Fn = nullptr;
3166 if (FunctionTemplateDecl *FnTmpl =
3167 dyn_cast<FunctionTemplateDecl>(Val: (*D)->getUnderlyingDecl())) {
3168 // Perform template argument deduction to try to match the
3169 // expected function type.
3170 TemplateDeductionInfo Info(StartLoc);
3171 if (DeduceTemplateArguments(FunctionTemplate: FnTmpl, ExplicitTemplateArgs: nullptr, ArgFunctionType: ExpectedFunctionType, Specialization&: Fn,
3172 Info) != TemplateDeductionResult::Success)
3173 continue;
3174 } else
3175 Fn = cast<FunctionDecl>(Val: (*D)->getUnderlyingDecl());
3176
3177 if (Context.hasSameType(T1: adjustCCAndNoReturn(ArgFunctionType: Fn->getType(),
3178 FunctionType: ExpectedFunctionType,
3179 /*AdjustExcpetionSpec*/AdjustExceptionSpec: true),
3180 T2: ExpectedFunctionType))
3181 Matches.push_back(Elt: std::make_pair(x: D.getPair(), y&: Fn));
3182 }
3183
3184 if (getLangOpts().CUDA)
3185 CUDA().EraseUnwantedMatches(Caller: getCurFunctionDecl(/*AllowLambda=*/true),
3186 Matches);
3187 if (Matches.empty() && isTypeAwareAllocation(Mode: IAP.PassTypeIdentity)) {
3188 DiagnoseMissingTypeAwareCleanupOperator(isPlacementNew);
3189 return true;
3190 }
3191 } else {
3192 // C++1y [expr.new]p22:
3193 // For a non-placement allocation function, the normal deallocation
3194 // function lookup is used
3195 //
3196 // Per [expr.delete]p10, this lookup prefers a member operator delete
3197 // without a size_t argument, but prefers a non-member operator delete
3198 // with a size_t where possible (which it always is in this case).
3199 llvm::SmallVector<UsualDeallocFnInfo, 4> BestDeallocFns;
3200 ImplicitDeallocationParameters IDP = {
3201 AllocElemType, OriginalTypeAwareState,
3202 alignedAllocationModeFromBool(
3203 IsAligned: hasNewExtendedAlignment(S&: *this, AllocType: AllocElemType)),
3204 sizedDeallocationModeFromBool(IsSized: FoundGlobalDelete)};
3205 UsualDeallocFnInfo Selected = resolveDeallocationOverload(
3206 S&: *this, R&: FoundDelete, IDP, Loc: StartLoc, BestFns: &BestDeallocFns);
3207 if (Selected && BestDeallocFns.empty())
3208 Matches.push_back(Elt: std::make_pair(x&: Selected.Found, y&: Selected.FD));
3209 else {
3210 // If we failed to select an operator, all remaining functions are viable
3211 // but ambiguous.
3212 for (auto Fn : BestDeallocFns)
3213 Matches.push_back(Elt: std::make_pair(x&: Fn.Found, y&: Fn.FD));
3214 }
3215 }
3216
3217 // C++ [expr.new]p20:
3218 // [...] If the lookup finds a single matching deallocation
3219 // function, that function will be called; otherwise, no
3220 // deallocation function will be called.
3221 if (Matches.size() == 1) {
3222 OperatorDelete = Matches[0].second;
3223 DeclContext *OperatorDeleteContext = GetRedeclContext(OperatorDelete);
3224 bool FoundTypeAwareOperator =
3225 OperatorDelete->isTypeAwareOperatorNewOrDelete() ||
3226 OperatorNew->isTypeAwareOperatorNewOrDelete();
3227 if (Diagnose && FoundTypeAwareOperator) {
3228 bool MismatchedTypeAwareness =
3229 OperatorDelete->isTypeAwareOperatorNewOrDelete() !=
3230 OperatorNew->isTypeAwareOperatorNewOrDelete();
3231 bool MismatchedContext = OperatorDeleteContext != OperatorNewContext;
3232 if (MismatchedTypeAwareness || MismatchedContext) {
3233 FunctionDecl *Operators[] = {OperatorDelete, OperatorNew};
3234 bool TypeAwareOperatorIndex =
3235 OperatorNew->isTypeAwareOperatorNewOrDelete();
3236 Diag(Loc: StartLoc, DiagID: diag::err_mismatching_type_aware_cleanup_deallocator)
3237 << Operators[TypeAwareOperatorIndex]->getDeclName()
3238 << isPlacementNew
3239 << Operators[!TypeAwareOperatorIndex]->getDeclName()
3240 << GetRedeclContext(Operators[TypeAwareOperatorIndex]);
3241 Diag(Loc: OperatorNew->getLocation(),
3242 DiagID: diag::note_type_aware_operator_declared)
3243 << OperatorNew->isTypeAwareOperatorNewOrDelete()
3244 << OperatorNew->getDeclName() << OperatorNewContext;
3245 Diag(Loc: OperatorDelete->getLocation(),
3246 DiagID: diag::note_type_aware_operator_declared)
3247 << OperatorDelete->isTypeAwareOperatorNewOrDelete()
3248 << OperatorDelete->getDeclName() << OperatorDeleteContext;
3249 }
3250 }
3251
3252 // C++1z [expr.new]p23:
3253 // If the lookup finds a usual deallocation function (3.7.4.2)
3254 // with a parameter of type std::size_t and that function, considered
3255 // as a placement deallocation function, would have been
3256 // selected as a match for the allocation function, the program
3257 // is ill-formed.
3258 if (getLangOpts().CPlusPlus11 && isPlacementNew &&
3259 isNonPlacementDeallocationFunction(S&: *this, FD: OperatorDelete)) {
3260 UsualDeallocFnInfo Info(*this,
3261 DeclAccessPair::make(D: OperatorDelete, AS: AS_public),
3262 AllocElemType, StartLoc);
3263 // Core issue, per mail to core reflector, 2016-10-09:
3264 // If this is a member operator delete, and there is a corresponding
3265 // non-sized member operator delete, this isn't /really/ a sized
3266 // deallocation function, it just happens to have a size_t parameter.
3267 bool IsSizedDelete = isSizedDeallocation(Mode: Info.IDP.PassSize);
3268 if (IsSizedDelete && !FoundGlobalDelete) {
3269 ImplicitDeallocationParameters SizeTestingIDP = {
3270 AllocElemType, Info.IDP.PassTypeIdentity, Info.IDP.PassAlignment,
3271 SizedDeallocationMode::No};
3272 auto NonSizedDelete = resolveDeallocationOverload(
3273 S&: *this, R&: FoundDelete, IDP: SizeTestingIDP, Loc: StartLoc);
3274 if (NonSizedDelete &&
3275 !isSizedDeallocation(Mode: NonSizedDelete.IDP.PassSize) &&
3276 NonSizedDelete.IDP.PassAlignment == Info.IDP.PassAlignment)
3277 IsSizedDelete = false;
3278 }
3279
3280 if (IsSizedDelete && !isTypeAwareAllocation(Mode: IAP.PassTypeIdentity)) {
3281 SourceRange R = PlaceArgs.empty()
3282 ? SourceRange()
3283 : SourceRange(PlaceArgs.front()->getBeginLoc(),
3284 PlaceArgs.back()->getEndLoc());
3285 Diag(Loc: StartLoc, DiagID: diag::err_placement_new_non_placement_delete) << R;
3286 if (!OperatorDelete->isImplicit())
3287 Diag(Loc: OperatorDelete->getLocation(), DiagID: diag::note_previous_decl)
3288 << DeleteName;
3289 }
3290 }
3291 if (CheckDeleteOperator(S&: *this, StartLoc, Range, Diagnose,
3292 NamingClass: FoundDelete.getNamingClass(), Decl: Matches[0].first,
3293 Operator: Matches[0].second))
3294 return true;
3295
3296 } else if (!Matches.empty()) {
3297 // We found multiple suitable operators. Per [expr.new]p20, that means we
3298 // call no 'operator delete' function, but we should at least warn the user.
3299 // FIXME: Suppress this warning if the construction cannot throw.
3300 Diag(Loc: StartLoc, DiagID: diag::warn_ambiguous_suitable_delete_function_found)
3301 << DeleteName << AllocElemType;
3302
3303 for (auto &Match : Matches)
3304 Diag(Loc: Match.second->getLocation(),
3305 DiagID: diag::note_member_declared_here) << DeleteName;
3306 }
3307
3308 return false;
3309}
3310
3311void Sema::DeclareGlobalNewDelete() {
3312 if (GlobalNewDeleteDeclared)
3313 return;
3314
3315 // The implicitly declared new and delete operators
3316 // are not supported in OpenCL.
3317 if (getLangOpts().OpenCLCPlusPlus)
3318 return;
3319
3320 // C++ [basic.stc.dynamic.general]p2:
3321 // The library provides default definitions for the global allocation
3322 // and deallocation functions. Some global allocation and deallocation
3323 // functions are replaceable ([new.delete]); these are attached to the
3324 // global module ([module.unit]).
3325 if (getLangOpts().CPlusPlusModules && getCurrentModule())
3326 PushGlobalModuleFragment(BeginLoc: SourceLocation());
3327
3328 // C++ [basic.std.dynamic]p2:
3329 // [...] The following allocation and deallocation functions (18.4) are
3330 // implicitly declared in global scope in each translation unit of a
3331 // program
3332 //
3333 // C++03:
3334 // void* operator new(std::size_t) throw(std::bad_alloc);
3335 // void* operator new[](std::size_t) throw(std::bad_alloc);
3336 // void operator delete(void*) throw();
3337 // void operator delete[](void*) throw();
3338 // C++11:
3339 // void* operator new(std::size_t);
3340 // void* operator new[](std::size_t);
3341 // void operator delete(void*) noexcept;
3342 // void operator delete[](void*) noexcept;
3343 // C++1y:
3344 // void* operator new(std::size_t);
3345 // void* operator new[](std::size_t);
3346 // void operator delete(void*) noexcept;
3347 // void operator delete[](void*) noexcept;
3348 // void operator delete(void*, std::size_t) noexcept;
3349 // void operator delete[](void*, std::size_t) noexcept;
3350 //
3351 // These implicit declarations introduce only the function names operator
3352 // new, operator new[], operator delete, operator delete[].
3353 //
3354 // Here, we need to refer to std::bad_alloc, so we will implicitly declare
3355 // "std" or "bad_alloc" as necessary to form the exception specification.
3356 // However, we do not make these implicit declarations visible to name
3357 // lookup.
3358 if (!StdBadAlloc && !getLangOpts().CPlusPlus11) {
3359 // The "std::bad_alloc" class has not yet been declared, so build it
3360 // implicitly.
3361 StdBadAlloc = CXXRecordDecl::Create(
3362 C: Context, TK: TagTypeKind::Class, DC: getOrCreateStdNamespace(),
3363 StartLoc: SourceLocation(), IdLoc: SourceLocation(),
3364 Id: &PP.getIdentifierTable().get(Name: "bad_alloc"), PrevDecl: nullptr);
3365 getStdBadAlloc()->setImplicit(true);
3366
3367 // The implicitly declared "std::bad_alloc" should live in global module
3368 // fragment.
3369 if (TheGlobalModuleFragment) {
3370 getStdBadAlloc()->setModuleOwnershipKind(
3371 Decl::ModuleOwnershipKind::ReachableWhenImported);
3372 getStdBadAlloc()->setLocalOwningModule(TheGlobalModuleFragment);
3373 }
3374 }
3375 if (!StdAlignValT && getLangOpts().AlignedAllocation) {
3376 // The "std::align_val_t" enum class has not yet been declared, so build it
3377 // implicitly.
3378 auto *AlignValT = EnumDecl::Create(
3379 C&: Context, DC: getOrCreateStdNamespace(), StartLoc: SourceLocation(), IdLoc: SourceLocation(),
3380 Id: &PP.getIdentifierTable().get(Name: "align_val_t"), PrevDecl: nullptr, IsScoped: true, IsScopedUsingClassTag: true, IsFixed: true);
3381
3382 // The implicitly declared "std::align_val_t" should live in global module
3383 // fragment.
3384 if (TheGlobalModuleFragment) {
3385 AlignValT->setModuleOwnershipKind(
3386 Decl::ModuleOwnershipKind::ReachableWhenImported);
3387 AlignValT->setLocalOwningModule(TheGlobalModuleFragment);
3388 }
3389
3390 AlignValT->setIntegerType(Context.getSizeType());
3391 AlignValT->setPromotionType(Context.getSizeType());
3392 AlignValT->setImplicit(true);
3393
3394 StdAlignValT = AlignValT;
3395 }
3396
3397 GlobalNewDeleteDeclared = true;
3398
3399 QualType VoidPtr = Context.getPointerType(T: Context.VoidTy);
3400 QualType SizeT = Context.getSizeType();
3401
3402 auto DeclareGlobalAllocationFunctions = [&](OverloadedOperatorKind Kind,
3403 QualType Return, QualType Param) {
3404 llvm::SmallVector<QualType, 3> Params;
3405 Params.push_back(Elt: Param);
3406
3407 // Create up to four variants of the function (sized/aligned).
3408 bool HasSizedVariant = getLangOpts().SizedDeallocation &&
3409 (Kind == OO_Delete || Kind == OO_Array_Delete);
3410 bool HasAlignedVariant = getLangOpts().AlignedAllocation;
3411
3412 int NumSizeVariants = (HasSizedVariant ? 2 : 1);
3413 int NumAlignVariants = (HasAlignedVariant ? 2 : 1);
3414 for (int Sized = 0; Sized < NumSizeVariants; ++Sized) {
3415 if (Sized)
3416 Params.push_back(Elt: SizeT);
3417
3418 for (int Aligned = 0; Aligned < NumAlignVariants; ++Aligned) {
3419 if (Aligned)
3420 Params.push_back(Elt: Context.getTypeDeclType(Decl: getStdAlignValT()));
3421
3422 DeclareGlobalAllocationFunction(
3423 Name: Context.DeclarationNames.getCXXOperatorName(Op: Kind), Return, Params);
3424
3425 if (Aligned)
3426 Params.pop_back();
3427 }
3428 }
3429 };
3430
3431 DeclareGlobalAllocationFunctions(OO_New, VoidPtr, SizeT);
3432 DeclareGlobalAllocationFunctions(OO_Array_New, VoidPtr, SizeT);
3433 DeclareGlobalAllocationFunctions(OO_Delete, Context.VoidTy, VoidPtr);
3434 DeclareGlobalAllocationFunctions(OO_Array_Delete, Context.VoidTy, VoidPtr);
3435
3436 if (getLangOpts().CPlusPlusModules && getCurrentModule())
3437 PopGlobalModuleFragment();
3438}
3439
3440/// DeclareGlobalAllocationFunction - Declares a single implicit global
3441/// allocation function if it doesn't already exist.
3442void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
3443 QualType Return,
3444 ArrayRef<QualType> Params) {
3445 DeclContext *GlobalCtx = Context.getTranslationUnitDecl();
3446
3447 // Check if this function is already declared.
3448 DeclContext::lookup_result R = GlobalCtx->lookup(Name);
3449 for (DeclContext::lookup_iterator Alloc = R.begin(), AllocEnd = R.end();
3450 Alloc != AllocEnd; ++Alloc) {
3451 // Only look at non-template functions, as it is the predefined,
3452 // non-templated allocation function we are trying to declare here.
3453 if (FunctionDecl *Func = dyn_cast<FunctionDecl>(Val: *Alloc)) {
3454 if (Func->getNumParams() == Params.size()) {
3455 llvm::SmallVector<QualType, 3> FuncParams;
3456 for (auto *P : Func->parameters())
3457 FuncParams.push_back(
3458 Elt: Context.getCanonicalType(T: P->getType().getUnqualifiedType()));
3459 if (llvm::ArrayRef(FuncParams) == Params) {
3460 // Make the function visible to name lookup, even if we found it in
3461 // an unimported module. It either is an implicitly-declared global
3462 // allocation function, or is suppressing that function.
3463 Func->setVisibleDespiteOwningModule();
3464 return;
3465 }
3466 }
3467 }
3468 }
3469
3470 FunctionProtoType::ExtProtoInfo EPI(Context.getDefaultCallingConvention(
3471 /*IsVariadic=*/false, /*IsCXXMethod=*/false, /*IsBuiltin=*/true));
3472
3473 QualType BadAllocType;
3474 bool HasBadAllocExceptionSpec = Name.isAnyOperatorNew();
3475 if (HasBadAllocExceptionSpec) {
3476 if (!getLangOpts().CPlusPlus11) {
3477 BadAllocType = Context.getTypeDeclType(Decl: getStdBadAlloc());
3478 assert(StdBadAlloc && "Must have std::bad_alloc declared");
3479 EPI.ExceptionSpec.Type = EST_Dynamic;
3480 EPI.ExceptionSpec.Exceptions = llvm::ArrayRef(BadAllocType);
3481 }
3482 if (getLangOpts().NewInfallible) {
3483 EPI.ExceptionSpec.Type = EST_DynamicNone;
3484 }
3485 } else {
3486 EPI.ExceptionSpec =
3487 getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone;
3488 }
3489
3490 auto CreateAllocationFunctionDecl = [&](Attr *ExtraAttr) {
3491 QualType FnType = Context.getFunctionType(ResultTy: Return, Args: Params, EPI);
3492 FunctionDecl *Alloc = FunctionDecl::Create(
3493 C&: Context, DC: GlobalCtx, StartLoc: SourceLocation(), NLoc: SourceLocation(), N: Name, T: FnType,
3494 /*TInfo=*/nullptr, SC: SC_None, UsesFPIntrin: getCurFPFeatures().isFPConstrained(), isInlineSpecified: false,
3495 hasWrittenPrototype: true);
3496 Alloc->setImplicit();
3497 // Global allocation functions should always be visible.
3498 Alloc->setVisibleDespiteOwningModule();
3499
3500 if (HasBadAllocExceptionSpec && getLangOpts().NewInfallible &&
3501 !getLangOpts().CheckNew)
3502 Alloc->addAttr(
3503 A: ReturnsNonNullAttr::CreateImplicit(Ctx&: Context, Range: Alloc->getLocation()));
3504
3505 // C++ [basic.stc.dynamic.general]p2:
3506 // The library provides default definitions for the global allocation
3507 // and deallocation functions. Some global allocation and deallocation
3508 // functions are replaceable ([new.delete]); these are attached to the
3509 // global module ([module.unit]).
3510 //
3511 // In the language wording, these functions are attched to the global
3512 // module all the time. But in the implementation, the global module
3513 // is only meaningful when we're in a module unit. So here we attach
3514 // these allocation functions to global module conditionally.
3515 if (TheGlobalModuleFragment) {
3516 Alloc->setModuleOwnershipKind(
3517 Decl::ModuleOwnershipKind::ReachableWhenImported);
3518 Alloc->setLocalOwningModule(TheGlobalModuleFragment);
3519 }
3520
3521 if (LangOpts.hasGlobalAllocationFunctionVisibility())
3522 Alloc->addAttr(A: VisibilityAttr::CreateImplicit(
3523 Ctx&: Context, Visibility: LangOpts.hasHiddenGlobalAllocationFunctionVisibility()
3524 ? VisibilityAttr::Hidden
3525 : LangOpts.hasProtectedGlobalAllocationFunctionVisibility()
3526 ? VisibilityAttr::Protected
3527 : VisibilityAttr::Default));
3528
3529 llvm::SmallVector<ParmVarDecl *, 3> ParamDecls;
3530 for (QualType T : Params) {
3531 ParamDecls.push_back(Elt: ParmVarDecl::Create(
3532 C&: Context, DC: Alloc, StartLoc: SourceLocation(), IdLoc: SourceLocation(), Id: nullptr, T,
3533 /*TInfo=*/nullptr, S: SC_None, DefArg: nullptr));
3534 ParamDecls.back()->setImplicit();
3535 }
3536 Alloc->setParams(ParamDecls);
3537 if (ExtraAttr)
3538 Alloc->addAttr(A: ExtraAttr);
3539 AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(FD: Alloc);
3540 Context.getTranslationUnitDecl()->addDecl(D: Alloc);
3541 IdResolver.tryAddTopLevelDecl(D: Alloc, Name);
3542 };
3543
3544 if (!LangOpts.CUDA)
3545 CreateAllocationFunctionDecl(nullptr);
3546 else {
3547 // Host and device get their own declaration so each can be
3548 // defined or re-declared independently.
3549 CreateAllocationFunctionDecl(CUDAHostAttr::CreateImplicit(Ctx&: Context));
3550 CreateAllocationFunctionDecl(CUDADeviceAttr::CreateImplicit(Ctx&: Context));
3551 }
3552}
3553
3554FunctionDecl *
3555Sema::FindUsualDeallocationFunction(SourceLocation StartLoc,
3556 ImplicitDeallocationParameters IDP,
3557 DeclarationName Name) {
3558 DeclareGlobalNewDelete();
3559
3560 LookupResult FoundDelete(*this, Name, StartLoc, LookupOrdinaryName);
3561 LookupGlobalDeallocationFunctions(S&: *this, Loc: StartLoc, FoundDelete,
3562 Mode: DeallocLookupMode::OptionallyTyped, Name);
3563
3564 // FIXME: It's possible for this to result in ambiguity, through a
3565 // user-declared variadic operator delete or the enable_if attribute. We
3566 // should probably not consider those cases to be usual deallocation
3567 // functions. But for now we just make an arbitrary choice in that case.
3568 auto Result = resolveDeallocationOverload(S&: *this, R&: FoundDelete, IDP, Loc: StartLoc);
3569 if (!Result)
3570 return nullptr;
3571
3572 if (CheckDeleteOperator(S&: *this, StartLoc, Range: StartLoc, /*Diagnose=*/true,
3573 NamingClass: FoundDelete.getNamingClass(), Decl: Result.Found,
3574 Operator: Result.FD))
3575 return nullptr;
3576
3577 assert(Result.FD && "operator delete missing from global scope?");
3578 return Result.FD;
3579}
3580
3581FunctionDecl *Sema::FindDeallocationFunctionForDestructor(SourceLocation Loc,
3582 CXXRecordDecl *RD,
3583 bool Diagnose) {
3584 DeclarationName Name = Context.DeclarationNames.getCXXOperatorName(Op: OO_Delete);
3585
3586 FunctionDecl *OperatorDelete = nullptr;
3587 QualType DeallocType = Context.getRecordType(Decl: RD);
3588 ImplicitDeallocationParameters IDP = {
3589 DeallocType, ShouldUseTypeAwareOperatorNewOrDelete(),
3590 AlignedAllocationMode::No, SizedDeallocationMode::No};
3591
3592 if (FindDeallocationFunction(StartLoc: Loc, RD, Name, Operator&: OperatorDelete, IDP, Diagnose))
3593 return nullptr;
3594
3595 if (OperatorDelete)
3596 return OperatorDelete;
3597
3598 // If there's no class-specific operator delete, look up the global
3599 // non-array delete.
3600 IDP.PassAlignment = alignedAllocationModeFromBool(
3601 IsAligned: hasNewExtendedAlignment(S&: *this, AllocType: DeallocType));
3602 IDP.PassSize = SizedDeallocationMode::Yes;
3603 return FindUsualDeallocationFunction(StartLoc: Loc, IDP, Name);
3604}
3605
3606bool Sema::FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
3607 DeclarationName Name,
3608 FunctionDecl *&Operator,
3609 ImplicitDeallocationParameters IDP,
3610 bool Diagnose) {
3611 LookupResult Found(*this, Name, StartLoc, LookupOrdinaryName);
3612 // Try to find operator delete/operator delete[] in class scope.
3613 LookupQualifiedName(R&: Found, LookupCtx: RD);
3614
3615 if (Found.isAmbiguous())
3616 return true;
3617
3618 Found.suppressDiagnostics();
3619
3620 if (!isAlignedAllocation(Mode: IDP.PassAlignment) &&
3621 hasNewExtendedAlignment(S&: *this, AllocType: Context.getRecordType(Decl: RD)))
3622 IDP.PassAlignment = AlignedAllocationMode::Yes;
3623
3624 // C++17 [expr.delete]p10:
3625 // If the deallocation functions have class scope, the one without a
3626 // parameter of type std::size_t is selected.
3627 llvm::SmallVector<UsualDeallocFnInfo, 4> Matches;
3628 resolveDeallocationOverload(S&: *this, R&: Found, IDP, Loc: StartLoc, BestFns: &Matches);
3629
3630 // If we could find an overload, use it.
3631 if (Matches.size() == 1) {
3632 Operator = cast<CXXMethodDecl>(Val: Matches[0].FD);
3633 return CheckDeleteOperator(S&: *this, StartLoc, Range: StartLoc, Diagnose,
3634 NamingClass: Found.getNamingClass(), Decl: Matches[0].Found,
3635 Operator);
3636 }
3637
3638 // We found multiple suitable operators; complain about the ambiguity.
3639 // FIXME: The standard doesn't say to do this; it appears that the intent
3640 // is that this should never happen.
3641 if (!Matches.empty()) {
3642 if (Diagnose) {
3643 Diag(Loc: StartLoc, DiagID: diag::err_ambiguous_suitable_delete_member_function_found)
3644 << Name << RD;
3645 for (auto &Match : Matches)
3646 Diag(Loc: Match.FD->getLocation(), DiagID: diag::note_member_declared_here) << Name;
3647 }
3648 return true;
3649 }
3650
3651 // We did find operator delete/operator delete[] declarations, but
3652 // none of them were suitable.
3653 if (!Found.empty()) {
3654 if (Diagnose) {
3655 Diag(Loc: StartLoc, DiagID: diag::err_no_suitable_delete_member_function_found)
3656 << Name << RD;
3657
3658 for (NamedDecl *D : Found)
3659 Diag(Loc: D->getUnderlyingDecl()->getLocation(),
3660 DiagID: diag::note_member_declared_here) << Name;
3661 }
3662 return true;
3663 }
3664
3665 Operator = nullptr;
3666 return false;
3667}
3668
3669namespace {
3670/// Checks whether delete-expression, and new-expression used for
3671/// initializing deletee have the same array form.
3672class MismatchingNewDeleteDetector {
3673public:
3674 enum MismatchResult {
3675 /// Indicates that there is no mismatch or a mismatch cannot be proven.
3676 NoMismatch,
3677 /// Indicates that variable is initialized with mismatching form of \a new.
3678 VarInitMismatches,
3679 /// Indicates that member is initialized with mismatching form of \a new.
3680 MemberInitMismatches,
3681 /// Indicates that 1 or more constructors' definitions could not been
3682 /// analyzed, and they will be checked again at the end of translation unit.
3683 AnalyzeLater
3684 };
3685
3686 /// \param EndOfTU True, if this is the final analysis at the end of
3687 /// translation unit. False, if this is the initial analysis at the point
3688 /// delete-expression was encountered.
3689 explicit MismatchingNewDeleteDetector(bool EndOfTU)
3690 : Field(nullptr), IsArrayForm(false), EndOfTU(EndOfTU),
3691 HasUndefinedConstructors(false) {}
3692
3693 /// Checks whether pointee of a delete-expression is initialized with
3694 /// matching form of new-expression.
3695 ///
3696 /// If return value is \c VarInitMismatches or \c MemberInitMismatches at the
3697 /// point where delete-expression is encountered, then a warning will be
3698 /// issued immediately. If return value is \c AnalyzeLater at the point where
3699 /// delete-expression is seen, then member will be analyzed at the end of
3700 /// translation unit. \c AnalyzeLater is returned iff at least one constructor
3701 /// couldn't be analyzed. If at least one constructor initializes the member
3702 /// with matching type of new, the return value is \c NoMismatch.
3703 MismatchResult analyzeDeleteExpr(const CXXDeleteExpr *DE);
3704 /// Analyzes a class member.
3705 /// \param Field Class member to analyze.
3706 /// \param DeleteWasArrayForm Array form-ness of the delete-expression used
3707 /// for deleting the \p Field.
3708 MismatchResult analyzeField(FieldDecl *Field, bool DeleteWasArrayForm);
3709 FieldDecl *Field;
3710 /// List of mismatching new-expressions used for initialization of the pointee
3711 llvm::SmallVector<const CXXNewExpr *, 4> NewExprs;
3712 /// Indicates whether delete-expression was in array form.
3713 bool IsArrayForm;
3714
3715private:
3716 const bool EndOfTU;
3717 /// Indicates that there is at least one constructor without body.
3718 bool HasUndefinedConstructors;
3719 /// Returns \c CXXNewExpr from given initialization expression.
3720 /// \param E Expression used for initializing pointee in delete-expression.
3721 /// E can be a single-element \c InitListExpr consisting of new-expression.
3722 const CXXNewExpr *getNewExprFromInitListOrExpr(const Expr *E);
3723 /// Returns whether member is initialized with mismatching form of
3724 /// \c new either by the member initializer or in-class initialization.
3725 ///
3726 /// If bodies of all constructors are not visible at the end of translation
3727 /// unit or at least one constructor initializes member with the matching
3728 /// form of \c new, mismatch cannot be proven, and this function will return
3729 /// \c NoMismatch.
3730 MismatchResult analyzeMemberExpr(const MemberExpr *ME);
3731 /// Returns whether variable is initialized with mismatching form of
3732 /// \c new.
3733 ///
3734 /// If variable is initialized with matching form of \c new or variable is not
3735 /// initialized with a \c new expression, this function will return true.
3736 /// If variable is initialized with mismatching form of \c new, returns false.
3737 /// \param D Variable to analyze.
3738 bool hasMatchingVarInit(const DeclRefExpr *D);
3739 /// Checks whether the constructor initializes pointee with mismatching
3740 /// form of \c new.
3741 ///
3742 /// Returns true, if member is initialized with matching form of \c new in
3743 /// member initializer list. Returns false, if member is initialized with the
3744 /// matching form of \c new in this constructor's initializer or given
3745 /// constructor isn't defined at the point where delete-expression is seen, or
3746 /// member isn't initialized by the constructor.
3747 bool hasMatchingNewInCtor(const CXXConstructorDecl *CD);
3748 /// Checks whether member is initialized with matching form of
3749 /// \c new in member initializer list.
3750 bool hasMatchingNewInCtorInit(const CXXCtorInitializer *CI);
3751 /// Checks whether member is initialized with mismatching form of \c new by
3752 /// in-class initializer.
3753 MismatchResult analyzeInClassInitializer();
3754};
3755}
3756
3757MismatchingNewDeleteDetector::MismatchResult
3758MismatchingNewDeleteDetector::analyzeDeleteExpr(const CXXDeleteExpr *DE) {
3759 NewExprs.clear();
3760 assert(DE && "Expected delete-expression");
3761 IsArrayForm = DE->isArrayForm();
3762 const Expr *E = DE->getArgument()->IgnoreParenImpCasts();
3763 if (const MemberExpr *ME = dyn_cast<const MemberExpr>(Val: E)) {
3764 return analyzeMemberExpr(ME);
3765 } else if (const DeclRefExpr *D = dyn_cast<const DeclRefExpr>(Val: E)) {
3766 if (!hasMatchingVarInit(D))
3767 return VarInitMismatches;
3768 }
3769 return NoMismatch;
3770}
3771
3772const CXXNewExpr *
3773MismatchingNewDeleteDetector::getNewExprFromInitListOrExpr(const Expr *E) {
3774 assert(E != nullptr && "Expected a valid initializer expression");
3775 E = E->IgnoreParenImpCasts();
3776 if (const InitListExpr *ILE = dyn_cast<const InitListExpr>(Val: E)) {
3777 if (ILE->getNumInits() == 1)
3778 E = dyn_cast<const CXXNewExpr>(Val: ILE->getInit(Init: 0)->IgnoreParenImpCasts());
3779 }
3780
3781 return dyn_cast_or_null<const CXXNewExpr>(Val: E);
3782}
3783
3784bool MismatchingNewDeleteDetector::hasMatchingNewInCtorInit(
3785 const CXXCtorInitializer *CI) {
3786 const CXXNewExpr *NE = nullptr;
3787 if (Field == CI->getMember() &&
3788 (NE = getNewExprFromInitListOrExpr(E: CI->getInit()))) {
3789 if (NE->isArray() == IsArrayForm)
3790 return true;
3791 else
3792 NewExprs.push_back(Elt: NE);
3793 }
3794 return false;
3795}
3796
3797bool MismatchingNewDeleteDetector::hasMatchingNewInCtor(
3798 const CXXConstructorDecl *CD) {
3799 if (CD->isImplicit())
3800 return false;
3801 const FunctionDecl *Definition = CD;
3802 if (!CD->isThisDeclarationADefinition() && !CD->isDefined(Definition)) {
3803 HasUndefinedConstructors = true;
3804 return EndOfTU;
3805 }
3806 for (const auto *CI : cast<const CXXConstructorDecl>(Val: Definition)->inits()) {
3807 if (hasMatchingNewInCtorInit(CI))
3808 return true;
3809 }
3810 return false;
3811}
3812
3813MismatchingNewDeleteDetector::MismatchResult
3814MismatchingNewDeleteDetector::analyzeInClassInitializer() {
3815 assert(Field != nullptr && "This should be called only for members");
3816 const Expr *InitExpr = Field->getInClassInitializer();
3817 if (!InitExpr)
3818 return EndOfTU ? NoMismatch : AnalyzeLater;
3819 if (const CXXNewExpr *NE = getNewExprFromInitListOrExpr(E: InitExpr)) {
3820 if (NE->isArray() != IsArrayForm) {
3821 NewExprs.push_back(Elt: NE);
3822 return MemberInitMismatches;
3823 }
3824 }
3825 return NoMismatch;
3826}
3827
3828MismatchingNewDeleteDetector::MismatchResult
3829MismatchingNewDeleteDetector::analyzeField(FieldDecl *Field,
3830 bool DeleteWasArrayForm) {
3831 assert(Field != nullptr && "Analysis requires a valid class member.");
3832 this->Field = Field;
3833 IsArrayForm = DeleteWasArrayForm;
3834 const CXXRecordDecl *RD = cast<const CXXRecordDecl>(Val: Field->getParent());
3835 for (const auto *CD : RD->ctors()) {
3836 if (hasMatchingNewInCtor(CD))
3837 return NoMismatch;
3838 }
3839 if (HasUndefinedConstructors)
3840 return EndOfTU ? NoMismatch : AnalyzeLater;
3841 if (!NewExprs.empty())
3842 return MemberInitMismatches;
3843 return Field->hasInClassInitializer() ? analyzeInClassInitializer()
3844 : NoMismatch;
3845}
3846
3847MismatchingNewDeleteDetector::MismatchResult
3848MismatchingNewDeleteDetector::analyzeMemberExpr(const MemberExpr *ME) {
3849 assert(ME != nullptr && "Expected a member expression");
3850 if (FieldDecl *F = dyn_cast<FieldDecl>(Val: ME->getMemberDecl()))
3851 return analyzeField(Field: F, DeleteWasArrayForm: IsArrayForm);
3852 return NoMismatch;
3853}
3854
3855bool MismatchingNewDeleteDetector::hasMatchingVarInit(const DeclRefExpr *D) {
3856 const CXXNewExpr *NE = nullptr;
3857 if (const VarDecl *VD = dyn_cast<const VarDecl>(Val: D->getDecl())) {
3858 if (VD->hasInit() && (NE = getNewExprFromInitListOrExpr(E: VD->getInit())) &&
3859 NE->isArray() != IsArrayForm) {
3860 NewExprs.push_back(Elt: NE);
3861 }
3862 }
3863 return NewExprs.empty();
3864}
3865
3866static void
3867DiagnoseMismatchedNewDelete(Sema &SemaRef, SourceLocation DeleteLoc,
3868 const MismatchingNewDeleteDetector &Detector) {
3869 SourceLocation EndOfDelete = SemaRef.getLocForEndOfToken(Loc: DeleteLoc);
3870 FixItHint H;
3871 if (!Detector.IsArrayForm)
3872 H = FixItHint::CreateInsertion(InsertionLoc: EndOfDelete, Code: "[]");
3873 else {
3874 SourceLocation RSquare = Lexer::findLocationAfterToken(
3875 loc: DeleteLoc, TKind: tok::l_square, SM: SemaRef.getSourceManager(),
3876 LangOpts: SemaRef.getLangOpts(), SkipTrailingWhitespaceAndNewLine: true);
3877 if (RSquare.isValid())
3878 H = FixItHint::CreateRemoval(RemoveRange: SourceRange(EndOfDelete, RSquare));
3879 }
3880 SemaRef.Diag(Loc: DeleteLoc, DiagID: diag::warn_mismatched_delete_new)
3881 << Detector.IsArrayForm << H;
3882
3883 for (const auto *NE : Detector.NewExprs)
3884 SemaRef.Diag(Loc: NE->getExprLoc(), DiagID: diag::note_allocated_here)
3885 << Detector.IsArrayForm;
3886}
3887
3888void Sema::AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE) {
3889 if (Diags.isIgnored(DiagID: diag::warn_mismatched_delete_new, Loc: SourceLocation()))
3890 return;
3891 MismatchingNewDeleteDetector Detector(/*EndOfTU=*/false);
3892 switch (Detector.analyzeDeleteExpr(DE)) {
3893 case MismatchingNewDeleteDetector::VarInitMismatches:
3894 case MismatchingNewDeleteDetector::MemberInitMismatches: {
3895 DiagnoseMismatchedNewDelete(SemaRef&: *this, DeleteLoc: DE->getBeginLoc(), Detector);
3896 break;
3897 }
3898 case MismatchingNewDeleteDetector::AnalyzeLater: {
3899 DeleteExprs[Detector.Field].push_back(
3900 Elt: std::make_pair(x: DE->getBeginLoc(), y: DE->isArrayForm()));
3901 break;
3902 }
3903 case MismatchingNewDeleteDetector::NoMismatch:
3904 break;
3905 }
3906}
3907
3908void Sema::AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
3909 bool DeleteWasArrayForm) {
3910 MismatchingNewDeleteDetector Detector(/*EndOfTU=*/true);
3911 switch (Detector.analyzeField(Field, DeleteWasArrayForm)) {
3912 case MismatchingNewDeleteDetector::VarInitMismatches:
3913 llvm_unreachable("This analysis should have been done for class members.");
3914 case MismatchingNewDeleteDetector::AnalyzeLater:
3915 llvm_unreachable("Analysis cannot be postponed any point beyond end of "
3916 "translation unit.");
3917 case MismatchingNewDeleteDetector::MemberInitMismatches:
3918 DiagnoseMismatchedNewDelete(SemaRef&: *this, DeleteLoc, Detector);
3919 break;
3920 case MismatchingNewDeleteDetector::NoMismatch:
3921 break;
3922 }
3923}
3924
3925ExprResult
3926Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
3927 bool ArrayForm, Expr *ExE) {
3928 // C++ [expr.delete]p1:
3929 // The operand shall have a pointer type, or a class type having a single
3930 // non-explicit conversion function to a pointer type. The result has type
3931 // void.
3932 //
3933 // DR599 amends "pointer type" to "pointer to object type" in both cases.
3934
3935 ExprResult Ex = ExE;
3936 FunctionDecl *OperatorDelete = nullptr;
3937 bool ArrayFormAsWritten = ArrayForm;
3938 bool UsualArrayDeleteWantsSize = false;
3939
3940 if (!Ex.get()->isTypeDependent()) {
3941 // Perform lvalue-to-rvalue cast, if needed.
3942 Ex = DefaultLvalueConversion(E: Ex.get());
3943 if (Ex.isInvalid())
3944 return ExprError();
3945
3946 QualType Type = Ex.get()->getType();
3947
3948 class DeleteConverter : public ContextualImplicitConverter {
3949 public:
3950 DeleteConverter() : ContextualImplicitConverter(false, true) {}
3951
3952 bool match(QualType ConvType) override {
3953 // FIXME: If we have an operator T* and an operator void*, we must pick
3954 // the operator T*.
3955 if (const PointerType *ConvPtrType = ConvType->getAs<PointerType>())
3956 if (ConvPtrType->getPointeeType()->isIncompleteOrObjectType())
3957 return true;
3958 return false;
3959 }
3960
3961 SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc,
3962 QualType T) override {
3963 return S.Diag(Loc, DiagID: diag::err_delete_operand) << T;
3964 }
3965
3966 SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc,
3967 QualType T) override {
3968 return S.Diag(Loc, DiagID: diag::err_delete_incomplete_class_type) << T;
3969 }
3970
3971 SemaDiagnosticBuilder diagnoseExplicitConv(Sema &S, SourceLocation Loc,
3972 QualType T,
3973 QualType ConvTy) override {
3974 return S.Diag(Loc, DiagID: diag::err_delete_explicit_conversion) << T << ConvTy;
3975 }
3976
3977 SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv,
3978 QualType ConvTy) override {
3979 return S.Diag(Loc: Conv->getLocation(), DiagID: diag::note_delete_conversion)
3980 << ConvTy;
3981 }
3982
3983 SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc,
3984 QualType T) override {
3985 return S.Diag(Loc, DiagID: diag::err_ambiguous_delete_operand) << T;
3986 }
3987
3988 SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv,
3989 QualType ConvTy) override {
3990 return S.Diag(Loc: Conv->getLocation(), DiagID: diag::note_delete_conversion)
3991 << ConvTy;
3992 }
3993
3994 SemaDiagnosticBuilder diagnoseConversion(Sema &S, SourceLocation Loc,
3995 QualType T,
3996 QualType ConvTy) override {
3997 llvm_unreachable("conversion functions are permitted");
3998 }
3999 } Converter;
4000
4001 Ex = PerformContextualImplicitConversion(Loc: StartLoc, FromE: Ex.get(), Converter);
4002 if (Ex.isInvalid())
4003 return ExprError();
4004 Type = Ex.get()->getType();
4005 if (!Converter.match(ConvType: Type))
4006 // FIXME: PerformContextualImplicitConversion should return ExprError
4007 // itself in this case.
4008 return ExprError();
4009
4010 QualType Pointee = Type->castAs<PointerType>()->getPointeeType();
4011 QualType PointeeElem = Context.getBaseElementType(QT: Pointee);
4012
4013 if (Pointee.getAddressSpace() != LangAS::Default &&
4014 !getLangOpts().OpenCLCPlusPlus)
4015 return Diag(Loc: Ex.get()->getBeginLoc(),
4016 DiagID: diag::err_address_space_qualified_delete)
4017 << Pointee.getUnqualifiedType()
4018 << Pointee.getQualifiers().getAddressSpaceAttributePrintValue();
4019
4020 CXXRecordDecl *PointeeRD = nullptr;
4021 if (Pointee->isVoidType() && !isSFINAEContext()) {
4022 // The C++ standard bans deleting a pointer to a non-object type, which
4023 // effectively bans deletion of "void*". However, most compilers support
4024 // this, so we treat it as a warning unless we're in a SFINAE context.
4025 // But we still prohibit this since C++26.
4026 Diag(Loc: StartLoc, DiagID: LangOpts.CPlusPlus26 ? diag::err_delete_incomplete
4027 : diag::ext_delete_void_ptr_operand)
4028 << (LangOpts.CPlusPlus26 ? Pointee : Type)
4029 << Ex.get()->getSourceRange();
4030 } else if (Pointee->isFunctionType() || Pointee->isVoidType() ||
4031 Pointee->isSizelessType()) {
4032 return ExprError(Diag(Loc: StartLoc, DiagID: diag::err_delete_operand)
4033 << Type << Ex.get()->getSourceRange());
4034 } else if (!Pointee->isDependentType()) {
4035 // FIXME: This can result in errors if the definition was imported from a
4036 // module but is hidden.
4037 if (Pointee->isEnumeralType() ||
4038 !RequireCompleteType(Loc: StartLoc, T: Pointee,
4039 DiagID: LangOpts.CPlusPlus26
4040 ? diag::err_delete_incomplete
4041 : diag::warn_delete_incomplete,
4042 Args: Ex.get())) {
4043 if (const RecordType *RT = PointeeElem->getAs<RecordType>())
4044 PointeeRD = cast<CXXRecordDecl>(Val: RT->getDecl());
4045 }
4046 }
4047
4048 if (Pointee->isArrayType() && !ArrayForm) {
4049 Diag(Loc: StartLoc, DiagID: diag::warn_delete_array_type)
4050 << Type << Ex.get()->getSourceRange()
4051 << FixItHint::CreateInsertion(InsertionLoc: getLocForEndOfToken(Loc: StartLoc), Code: "[]");
4052 ArrayForm = true;
4053 }
4054
4055 DeclarationName DeleteName = Context.DeclarationNames.getCXXOperatorName(
4056 Op: ArrayForm ? OO_Array_Delete : OO_Delete);
4057
4058 if (PointeeRD) {
4059 ImplicitDeallocationParameters IDP = {
4060 Pointee, ShouldUseTypeAwareOperatorNewOrDelete(),
4061 AlignedAllocationMode::No, SizedDeallocationMode::No};
4062 if (!UseGlobal &&
4063 FindDeallocationFunction(StartLoc, RD: PointeeRD, Name: DeleteName,
4064 Operator&: OperatorDelete, IDP))
4065 return ExprError();
4066
4067 // If we're allocating an array of records, check whether the
4068 // usual operator delete[] has a size_t parameter.
4069 if (ArrayForm) {
4070 // If the user specifically asked to use the global allocator,
4071 // we'll need to do the lookup into the class.
4072 if (UseGlobal)
4073 UsualArrayDeleteWantsSize = doesUsualArrayDeleteWantSize(
4074 S&: *this, loc: StartLoc, PassType: IDP.PassTypeIdentity, allocType: PointeeElem);
4075
4076 // Otherwise, the usual operator delete[] should be the
4077 // function we just found.
4078 else if (isa_and_nonnull<CXXMethodDecl>(Val: OperatorDelete)) {
4079 UsualDeallocFnInfo UDFI(
4080 *this, DeclAccessPair::make(D: OperatorDelete, AS: AS_public), Pointee,
4081 StartLoc);
4082 UsualArrayDeleteWantsSize = isSizedDeallocation(Mode: UDFI.IDP.PassSize);
4083 }
4084 }
4085
4086 if (!PointeeRD->hasIrrelevantDestructor()) {
4087 if (CXXDestructorDecl *Dtor = LookupDestructor(Class: PointeeRD)) {
4088 if (Dtor->isCalledByDelete(OpDel: OperatorDelete)) {
4089 MarkFunctionReferenced(Loc: StartLoc,
4090 Func: const_cast<CXXDestructorDecl *>(Dtor));
4091 if (DiagnoseUseOfDecl(D: Dtor, Locs: StartLoc))
4092 return ExprError();
4093 }
4094 }
4095 }
4096
4097 CheckVirtualDtorCall(dtor: PointeeRD->getDestructor(), Loc: StartLoc,
4098 /*IsDelete=*/true, /*CallCanBeVirtual=*/true,
4099 /*WarnOnNonAbstractTypes=*/!ArrayForm,
4100 DtorLoc: SourceLocation());
4101 }
4102
4103 if (!OperatorDelete) {
4104 if (getLangOpts().OpenCLCPlusPlus) {
4105 Diag(Loc: StartLoc, DiagID: diag::err_openclcxx_not_supported) << "default delete";
4106 return ExprError();
4107 }
4108
4109 bool IsComplete = isCompleteType(Loc: StartLoc, T: Pointee);
4110 bool CanProvideSize =
4111 IsComplete && (!ArrayForm || UsualArrayDeleteWantsSize ||
4112 Pointee.isDestructedType());
4113 bool Overaligned = hasNewExtendedAlignment(S&: *this, AllocType: Pointee);
4114
4115 // Look for a global declaration.
4116 ImplicitDeallocationParameters IDP = {
4117 Pointee, ShouldUseTypeAwareOperatorNewOrDelete(),
4118 alignedAllocationModeFromBool(IsAligned: Overaligned),
4119 sizedDeallocationModeFromBool(IsSized: CanProvideSize)};
4120 OperatorDelete = FindUsualDeallocationFunction(StartLoc, IDP, Name: DeleteName);
4121 if (!OperatorDelete)
4122 return ExprError();
4123 }
4124
4125 if (OperatorDelete->isInvalidDecl())
4126 return ExprError();
4127
4128 MarkFunctionReferenced(Loc: StartLoc, Func: OperatorDelete);
4129
4130 // Check access and ambiguity of destructor if we're going to call it.
4131 // Note that this is required even for a virtual delete.
4132 bool IsVirtualDelete = false;
4133 if (PointeeRD) {
4134 if (CXXDestructorDecl *Dtor = LookupDestructor(Class: PointeeRD)) {
4135 if (Dtor->isCalledByDelete(OpDel: OperatorDelete))
4136 CheckDestructorAccess(Loc: Ex.get()->getExprLoc(), Dtor,
4137 PDiag: PDiag(DiagID: diag::err_access_dtor) << PointeeElem);
4138 IsVirtualDelete = Dtor->isVirtual();
4139 }
4140 }
4141
4142 DiagnoseUseOfDecl(D: OperatorDelete, Locs: StartLoc);
4143
4144 unsigned AddressParamIdx = 0;
4145 if (OperatorDelete->isTypeAwareOperatorNewOrDelete()) {
4146 QualType TypeIdentity = OperatorDelete->getParamDecl(i: 0)->getType();
4147 if (RequireCompleteType(Loc: StartLoc, T: TypeIdentity,
4148 DiagID: diag::err_incomplete_type))
4149 return ExprError();
4150 AddressParamIdx = 1;
4151 }
4152
4153 // Convert the operand to the type of the first parameter of operator
4154 // delete. This is only necessary if we selected a destroying operator
4155 // delete that we are going to call (non-virtually); converting to void*
4156 // is trivial and left to AST consumers to handle.
4157 QualType ParamType =
4158 OperatorDelete->getParamDecl(i: AddressParamIdx)->getType();
4159 if (!IsVirtualDelete && !ParamType->getPointeeType()->isVoidType()) {
4160 Qualifiers Qs = Pointee.getQualifiers();
4161 if (Qs.hasCVRQualifiers()) {
4162 // Qualifiers are irrelevant to this conversion; we're only looking
4163 // for access and ambiguity.
4164 Qs.removeCVRQualifiers();
4165 QualType Unqual = Context.getPointerType(
4166 T: Context.getQualifiedType(T: Pointee.getUnqualifiedType(), Qs));
4167 Ex = ImpCastExprToType(E: Ex.get(), Type: Unqual, CK: CK_NoOp);
4168 }
4169 Ex = PerformImplicitConversion(From: Ex.get(), ToType: ParamType,
4170 Action: AssignmentAction::Passing);
4171 if (Ex.isInvalid())
4172 return ExprError();
4173 }
4174 }
4175
4176 CXXDeleteExpr *Result = new (Context) CXXDeleteExpr(
4177 Context.VoidTy, UseGlobal, ArrayForm, ArrayFormAsWritten,
4178 UsualArrayDeleteWantsSize, OperatorDelete, Ex.get(), StartLoc);
4179 AnalyzeDeleteExprMismatch(DE: Result);
4180 return Result;
4181}
4182
4183static bool resolveBuiltinNewDeleteOverload(Sema &S, CallExpr *TheCall,
4184 bool IsDelete,
4185 FunctionDecl *&Operator) {
4186
4187 DeclarationName NewName = S.Context.DeclarationNames.getCXXOperatorName(
4188 Op: IsDelete ? OO_Delete : OO_New);
4189
4190 LookupResult R(S, NewName, TheCall->getBeginLoc(), Sema::LookupOrdinaryName);
4191 S.LookupQualifiedName(R, LookupCtx: S.Context.getTranslationUnitDecl());
4192 assert(!R.empty() && "implicitly declared allocation functions not found");
4193 assert(!R.isAmbiguous() && "global allocation functions are ambiguous");
4194
4195 // We do our own custom access checks below.
4196 R.suppressDiagnostics();
4197
4198 SmallVector<Expr *, 8> Args(TheCall->arguments());
4199 OverloadCandidateSet Candidates(R.getNameLoc(),
4200 OverloadCandidateSet::CSK_Normal);
4201 for (LookupResult::iterator FnOvl = R.begin(), FnOvlEnd = R.end();
4202 FnOvl != FnOvlEnd; ++FnOvl) {
4203 // Even member operator new/delete are implicitly treated as
4204 // static, so don't use AddMemberCandidate.
4205 NamedDecl *D = (*FnOvl)->getUnderlyingDecl();
4206
4207 if (FunctionTemplateDecl *FnTemplate = dyn_cast<FunctionTemplateDecl>(Val: D)) {
4208 S.AddTemplateOverloadCandidate(FunctionTemplate: FnTemplate, FoundDecl: FnOvl.getPair(),
4209 /*ExplicitTemplateArgs=*/nullptr, Args,
4210 CandidateSet&: Candidates,
4211 /*SuppressUserConversions=*/false);
4212 continue;
4213 }
4214
4215 FunctionDecl *Fn = cast<FunctionDecl>(Val: D);
4216 S.AddOverloadCandidate(Function: Fn, FoundDecl: FnOvl.getPair(), Args, CandidateSet&: Candidates,
4217 /*SuppressUserConversions=*/false);
4218 }
4219
4220 SourceRange Range = TheCall->getSourceRange();
4221
4222 // Do the resolution.
4223 OverloadCandidateSet::iterator Best;
4224 switch (Candidates.BestViableFunction(S, Loc: R.getNameLoc(), Best)) {
4225 case OR_Success: {
4226 // Got one!
4227 FunctionDecl *FnDecl = Best->Function;
4228 assert(R.getNamingClass() == nullptr &&
4229 "class members should not be considered");
4230
4231 if (!FnDecl->isReplaceableGlobalAllocationFunction()) {
4232 S.Diag(Loc: R.getNameLoc(), DiagID: diag::err_builtin_operator_new_delete_not_usual)
4233 << (IsDelete ? 1 : 0) << Range;
4234 S.Diag(Loc: FnDecl->getLocation(), DiagID: diag::note_non_usual_function_declared_here)
4235 << R.getLookupName() << FnDecl->getSourceRange();
4236 return true;
4237 }
4238
4239 Operator = FnDecl;
4240 return false;
4241 }
4242
4243 case OR_No_Viable_Function:
4244 Candidates.NoteCandidates(
4245 PA: PartialDiagnosticAt(R.getNameLoc(),
4246 S.PDiag(DiagID: diag::err_ovl_no_viable_function_in_call)
4247 << R.getLookupName() << Range),
4248 S, OCD: OCD_AllCandidates, Args);
4249 return true;
4250
4251 case OR_Ambiguous:
4252 Candidates.NoteCandidates(
4253 PA: PartialDiagnosticAt(R.getNameLoc(),
4254 S.PDiag(DiagID: diag::err_ovl_ambiguous_call)
4255 << R.getLookupName() << Range),
4256 S, OCD: OCD_AmbiguousCandidates, Args);
4257 return true;
4258
4259 case OR_Deleted:
4260 S.DiagnoseUseOfDeletedFunction(Loc: R.getNameLoc(), Range, Name: R.getLookupName(),
4261 CandidateSet&: Candidates, Fn: Best->Function, Args);
4262 return true;
4263 }
4264 llvm_unreachable("Unreachable, bad result from BestViableFunction");
4265}
4266
4267ExprResult Sema::BuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
4268 bool IsDelete) {
4269 CallExpr *TheCall = cast<CallExpr>(Val: TheCallResult.get());
4270 if (!getLangOpts().CPlusPlus) {
4271 Diag(Loc: TheCall->getExprLoc(), DiagID: diag::err_builtin_requires_language)
4272 << (IsDelete ? "__builtin_operator_delete" : "__builtin_operator_new")
4273 << "C++";
4274 return ExprError();
4275 }
4276 // CodeGen assumes it can find the global new and delete to call,
4277 // so ensure that they are declared.
4278 DeclareGlobalNewDelete();
4279
4280 FunctionDecl *OperatorNewOrDelete = nullptr;
4281 if (resolveBuiltinNewDeleteOverload(S&: *this, TheCall, IsDelete,
4282 Operator&: OperatorNewOrDelete))
4283 return ExprError();
4284 assert(OperatorNewOrDelete && "should be found");
4285
4286 DiagnoseUseOfDecl(D: OperatorNewOrDelete, Locs: TheCall->getExprLoc());
4287 MarkFunctionReferenced(Loc: TheCall->getExprLoc(), Func: OperatorNewOrDelete);
4288
4289 TheCall->setType(OperatorNewOrDelete->getReturnType());
4290 for (unsigned i = 0; i != TheCall->getNumArgs(); ++i) {
4291 QualType ParamTy = OperatorNewOrDelete->getParamDecl(i)->getType();
4292 InitializedEntity Entity =
4293 InitializedEntity::InitializeParameter(Context, Type: ParamTy, Consumed: false);
4294 ExprResult Arg = PerformCopyInitialization(
4295 Entity, EqualLoc: TheCall->getArg(Arg: i)->getBeginLoc(), Init: TheCall->getArg(Arg: i));
4296 if (Arg.isInvalid())
4297 return ExprError();
4298 TheCall->setArg(Arg: i, ArgExpr: Arg.get());
4299 }
4300 auto Callee = dyn_cast<ImplicitCastExpr>(Val: TheCall->getCallee());
4301 assert(Callee && Callee->getCastKind() == CK_BuiltinFnToFnPtr &&
4302 "Callee expected to be implicit cast to a builtin function pointer");
4303 Callee->setType(OperatorNewOrDelete->getType());
4304
4305 return TheCallResult;
4306}
4307
4308void Sema::CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
4309 bool IsDelete, bool CallCanBeVirtual,
4310 bool WarnOnNonAbstractTypes,
4311 SourceLocation DtorLoc) {
4312 if (!dtor || dtor->isVirtual() || !CallCanBeVirtual || isUnevaluatedContext())
4313 return;
4314
4315 // C++ [expr.delete]p3:
4316 // In the first alternative (delete object), if the static type of the
4317 // object to be deleted is different from its dynamic type, the static
4318 // type shall be a base class of the dynamic type of the object to be
4319 // deleted and the static type shall have a virtual destructor or the
4320 // behavior is undefined.
4321 //
4322 const CXXRecordDecl *PointeeRD = dtor->getParent();
4323 // Note: a final class cannot be derived from, no issue there
4324 if (!PointeeRD->isPolymorphic() || PointeeRD->hasAttr<FinalAttr>())
4325 return;
4326
4327 // If the superclass is in a system header, there's nothing that can be done.
4328 // The `delete` (where we emit the warning) can be in a system header,
4329 // what matters for this warning is where the deleted type is defined.
4330 if (getSourceManager().isInSystemHeader(Loc: PointeeRD->getLocation()))
4331 return;
4332
4333 QualType ClassType = dtor->getFunctionObjectParameterType();
4334 if (PointeeRD->isAbstract()) {
4335 // If the class is abstract, we warn by default, because we're
4336 // sure the code has undefined behavior.
4337 Diag(Loc, DiagID: diag::warn_delete_abstract_non_virtual_dtor) << (IsDelete ? 0 : 1)
4338 << ClassType;
4339 } else if (WarnOnNonAbstractTypes) {
4340 // Otherwise, if this is not an array delete, it's a bit suspect,
4341 // but not necessarily wrong.
4342 Diag(Loc, DiagID: diag::warn_delete_non_virtual_dtor) << (IsDelete ? 0 : 1)
4343 << ClassType;
4344 }
4345 if (!IsDelete) {
4346 std::string TypeStr;
4347 ClassType.getAsStringInternal(Str&: TypeStr, Policy: getPrintingPolicy());
4348 Diag(Loc: DtorLoc, DiagID: diag::note_delete_non_virtual)
4349 << FixItHint::CreateInsertion(InsertionLoc: DtorLoc, Code: TypeStr + "::");
4350 }
4351}
4352
4353Sema::ConditionResult Sema::ActOnConditionVariable(Decl *ConditionVar,
4354 SourceLocation StmtLoc,
4355 ConditionKind CK) {
4356 ExprResult E =
4357 CheckConditionVariable(ConditionVar: cast<VarDecl>(Val: ConditionVar), StmtLoc, CK);
4358 if (E.isInvalid())
4359 return ConditionError();
4360 return ConditionResult(*this, ConditionVar, MakeFullExpr(Arg: E.get(), CC: StmtLoc),
4361 CK == ConditionKind::ConstexprIf);
4362}
4363
4364ExprResult Sema::CheckConditionVariable(VarDecl *ConditionVar,
4365 SourceLocation StmtLoc,
4366 ConditionKind CK) {
4367 if (ConditionVar->isInvalidDecl())
4368 return ExprError();
4369
4370 QualType T = ConditionVar->getType();
4371
4372 // C++ [stmt.select]p2:
4373 // The declarator shall not specify a function or an array.
4374 if (T->isFunctionType())
4375 return ExprError(Diag(Loc: ConditionVar->getLocation(),
4376 DiagID: diag::err_invalid_use_of_function_type)
4377 << ConditionVar->getSourceRange());
4378 else if (T->isArrayType())
4379 return ExprError(Diag(Loc: ConditionVar->getLocation(),
4380 DiagID: diag::err_invalid_use_of_array_type)
4381 << ConditionVar->getSourceRange());
4382
4383 ExprResult Condition = BuildDeclRefExpr(
4384 D: ConditionVar, Ty: ConditionVar->getType().getNonReferenceType(), VK: VK_LValue,
4385 Loc: ConditionVar->getLocation());
4386
4387 switch (CK) {
4388 case ConditionKind::Boolean:
4389 return CheckBooleanCondition(Loc: StmtLoc, E: Condition.get());
4390
4391 case ConditionKind::ConstexprIf:
4392 return CheckBooleanCondition(Loc: StmtLoc, E: Condition.get(), IsConstexpr: true);
4393
4394 case ConditionKind::Switch:
4395 return CheckSwitchCondition(SwitchLoc: StmtLoc, Cond: Condition.get());
4396 }
4397
4398 llvm_unreachable("unexpected condition kind");
4399}
4400
4401ExprResult Sema::CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr) {
4402 // C++11 6.4p4:
4403 // The value of a condition that is an initialized declaration in a statement
4404 // other than a switch statement is the value of the declared variable
4405 // implicitly converted to type bool. If that conversion is ill-formed, the
4406 // program is ill-formed.
4407 // The value of a condition that is an expression is the value of the
4408 // expression, implicitly converted to bool.
4409 //
4410 // C++23 8.5.2p2
4411 // If the if statement is of the form if constexpr, the value of the condition
4412 // is contextually converted to bool and the converted expression shall be
4413 // a constant expression.
4414 //
4415
4416 ExprResult E = PerformContextuallyConvertToBool(From: CondExpr);
4417 if (!IsConstexpr || E.isInvalid() || E.get()->isValueDependent())
4418 return E;
4419
4420 // FIXME: Return this value to the caller so they don't need to recompute it.
4421 llvm::APSInt Cond;
4422 E = VerifyIntegerConstantExpression(
4423 E: E.get(), Result: &Cond,
4424 DiagID: diag::err_constexpr_if_condition_expression_is_not_constant);
4425 return E;
4426}
4427
4428bool
4429Sema::IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType) {
4430 // Look inside the implicit cast, if it exists.
4431 if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(Val: From))
4432 From = Cast->getSubExpr();
4433
4434 // A string literal (2.13.4) that is not a wide string literal can
4435 // be converted to an rvalue of type "pointer to char"; a wide
4436 // string literal can be converted to an rvalue of type "pointer
4437 // to wchar_t" (C++ 4.2p2).
4438 if (StringLiteral *StrLit = dyn_cast<StringLiteral>(Val: From->IgnoreParens()))
4439 if (const PointerType *ToPtrType = ToType->getAs<PointerType>())
4440 if (const BuiltinType *ToPointeeType
4441 = ToPtrType->getPointeeType()->getAs<BuiltinType>()) {
4442 // This conversion is considered only when there is an
4443 // explicit appropriate pointer target type (C++ 4.2p2).
4444 if (!ToPtrType->getPointeeType().hasQualifiers()) {
4445 switch (StrLit->getKind()) {
4446 case StringLiteralKind::UTF8:
4447 case StringLiteralKind::UTF16:
4448 case StringLiteralKind::UTF32:
4449 // We don't allow UTF literals to be implicitly converted
4450 break;
4451 case StringLiteralKind::Ordinary:
4452 case StringLiteralKind::Binary:
4453 return (ToPointeeType->getKind() == BuiltinType::Char_U ||
4454 ToPointeeType->getKind() == BuiltinType::Char_S);
4455 case StringLiteralKind::Wide:
4456 return Context.typesAreCompatible(T1: Context.getWideCharType(),
4457 T2: QualType(ToPointeeType, 0));
4458 case StringLiteralKind::Unevaluated:
4459 assert(false && "Unevaluated string literal in expression");
4460 break;
4461 }
4462 }
4463 }
4464
4465 return false;
4466}
4467
4468static ExprResult BuildCXXCastArgument(Sema &S,
4469 SourceLocation CastLoc,
4470 QualType Ty,
4471 CastKind Kind,
4472 CXXMethodDecl *Method,
4473 DeclAccessPair FoundDecl,
4474 bool HadMultipleCandidates,
4475 Expr *From) {
4476 switch (Kind) {
4477 default: llvm_unreachable("Unhandled cast kind!");
4478 case CK_ConstructorConversion: {
4479 CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(Val: Method);
4480 SmallVector<Expr*, 8> ConstructorArgs;
4481
4482 if (S.RequireNonAbstractType(Loc: CastLoc, T: Ty,
4483 DiagID: diag::err_allocation_of_abstract_type))
4484 return ExprError();
4485
4486 if (S.CompleteConstructorCall(Constructor, DeclInitType: Ty, ArgsPtr: From, Loc: CastLoc,
4487 ConvertedArgs&: ConstructorArgs))
4488 return ExprError();
4489
4490 S.CheckConstructorAccess(Loc: CastLoc, D: Constructor, FoundDecl,
4491 Entity: InitializedEntity::InitializeTemporary(Type: Ty));
4492 if (S.DiagnoseUseOfDecl(D: Method, Locs: CastLoc))
4493 return ExprError();
4494
4495 ExprResult Result = S.BuildCXXConstructExpr(
4496 ConstructLoc: CastLoc, DeclInitType: Ty, FoundDecl, Constructor: cast<CXXConstructorDecl>(Val: Method),
4497 Exprs: ConstructorArgs, HadMultipleCandidates,
4498 /*ListInit*/ IsListInitialization: false, /*StdInitListInit*/ IsStdInitListInitialization: false, /*ZeroInit*/ RequiresZeroInit: false,
4499 ConstructKind: CXXConstructionKind::Complete, ParenRange: SourceRange());
4500 if (Result.isInvalid())
4501 return ExprError();
4502
4503 return S.MaybeBindToTemporary(E: Result.getAs<Expr>());
4504 }
4505
4506 case CK_UserDefinedConversion: {
4507 assert(!From->getType()->isPointerType() && "Arg can't have pointer type!");
4508
4509 S.CheckMemberOperatorAccess(Loc: CastLoc, ObjectExpr: From, /*arg*/ ArgExpr: nullptr, FoundDecl);
4510 if (S.DiagnoseUseOfDecl(D: Method, Locs: CastLoc))
4511 return ExprError();
4512
4513 // Create an implicit call expr that calls it.
4514 CXXConversionDecl *Conv = cast<CXXConversionDecl>(Val: Method);
4515 ExprResult Result = S.BuildCXXMemberCallExpr(Exp: From, FoundDecl, Method: Conv,
4516 HadMultipleCandidates);
4517 if (Result.isInvalid())
4518 return ExprError();
4519 // Record usage of conversion in an implicit cast.
4520 Result = ImplicitCastExpr::Create(Context: S.Context, T: Result.get()->getType(),
4521 Kind: CK_UserDefinedConversion, Operand: Result.get(),
4522 BasePath: nullptr, Cat: Result.get()->getValueKind(),
4523 FPO: S.CurFPFeatureOverrides());
4524
4525 return S.MaybeBindToTemporary(E: Result.get());
4526 }
4527 }
4528}
4529
4530ExprResult
4531Sema::PerformImplicitConversion(Expr *From, QualType ToType,
4532 const ImplicitConversionSequence &ICS,
4533 AssignmentAction Action,
4534 CheckedConversionKind CCK) {
4535 // C++ [over.match.oper]p7: [...] operands of class type are converted [...]
4536 if (CCK == CheckedConversionKind::ForBuiltinOverloadedOp &&
4537 !From->getType()->isRecordType())
4538 return From;
4539
4540 switch (ICS.getKind()) {
4541 case ImplicitConversionSequence::StandardConversion: {
4542 ExprResult Res = PerformImplicitConversion(From, ToType, SCS: ICS.Standard,
4543 Action, CCK);
4544 if (Res.isInvalid())
4545 return ExprError();
4546 From = Res.get();
4547 break;
4548 }
4549
4550 case ImplicitConversionSequence::UserDefinedConversion: {
4551
4552 FunctionDecl *FD = ICS.UserDefined.ConversionFunction;
4553 CastKind CastKind;
4554 QualType BeforeToType;
4555 assert(FD && "no conversion function for user-defined conversion seq");
4556 if (const CXXConversionDecl *Conv = dyn_cast<CXXConversionDecl>(Val: FD)) {
4557 CastKind = CK_UserDefinedConversion;
4558
4559 // If the user-defined conversion is specified by a conversion function,
4560 // the initial standard conversion sequence converts the source type to
4561 // the implicit object parameter of the conversion function.
4562 BeforeToType = Context.getTagDeclType(Decl: Conv->getParent());
4563 } else {
4564 const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(Val: FD);
4565 CastKind = CK_ConstructorConversion;
4566 // Do no conversion if dealing with ... for the first conversion.
4567 if (!ICS.UserDefined.EllipsisConversion) {
4568 // If the user-defined conversion is specified by a constructor, the
4569 // initial standard conversion sequence converts the source type to
4570 // the type required by the argument of the constructor
4571 BeforeToType = Ctor->getParamDecl(i: 0)->getType().getNonReferenceType();
4572 }
4573 }
4574 // Watch out for ellipsis conversion.
4575 if (!ICS.UserDefined.EllipsisConversion) {
4576 ExprResult Res = PerformImplicitConversion(
4577 From, ToType: BeforeToType, SCS: ICS.UserDefined.Before,
4578 Action: AssignmentAction::Converting, CCK);
4579 if (Res.isInvalid())
4580 return ExprError();
4581 From = Res.get();
4582 }
4583
4584 ExprResult CastArg = BuildCXXCastArgument(
4585 S&: *this, CastLoc: From->getBeginLoc(), Ty: ToType.getNonReferenceType(), Kind: CastKind,
4586 Method: cast<CXXMethodDecl>(Val: FD), FoundDecl: ICS.UserDefined.FoundConversionFunction,
4587 HadMultipleCandidates: ICS.UserDefined.HadMultipleCandidates, From);
4588
4589 if (CastArg.isInvalid())
4590 return ExprError();
4591
4592 From = CastArg.get();
4593
4594 // C++ [over.match.oper]p7:
4595 // [...] the second standard conversion sequence of a user-defined
4596 // conversion sequence is not applied.
4597 if (CCK == CheckedConversionKind::ForBuiltinOverloadedOp)
4598 return From;
4599
4600 return PerformImplicitConversion(From, ToType, SCS: ICS.UserDefined.After,
4601 Action: AssignmentAction::Converting, CCK);
4602 }
4603
4604 case ImplicitConversionSequence::AmbiguousConversion:
4605 ICS.DiagnoseAmbiguousConversion(S&: *this, CaretLoc: From->getExprLoc(),
4606 PDiag: PDiag(DiagID: diag::err_typecheck_ambiguous_condition)
4607 << From->getSourceRange());
4608 return ExprError();
4609
4610 case ImplicitConversionSequence::EllipsisConversion:
4611 case ImplicitConversionSequence::StaticObjectArgumentConversion:
4612 llvm_unreachable("bad conversion");
4613
4614 case ImplicitConversionSequence::BadConversion:
4615 AssignConvertType ConvTy =
4616 CheckAssignmentConstraints(Loc: From->getExprLoc(), LHSType: ToType, RHSType: From->getType());
4617 bool Diagnosed = DiagnoseAssignmentResult(
4618 ConvTy: ConvTy == AssignConvertType::Compatible
4619 ? AssignConvertType::Incompatible
4620 : ConvTy,
4621 Loc: From->getExprLoc(), DstType: ToType, SrcType: From->getType(), SrcExpr: From, Action);
4622 assert(Diagnosed && "failed to diagnose bad conversion"); (void)Diagnosed;
4623 return ExprError();
4624 }
4625
4626 // Everything went well.
4627 return From;
4628}
4629
4630// adjustVectorType - Compute the intermediate cast type casting elements of the
4631// from type to the elements of the to type without resizing the vector.
4632static QualType adjustVectorType(ASTContext &Context, QualType FromTy,
4633 QualType ToType, QualType *ElTy = nullptr) {
4634 QualType ElType = ToType;
4635 if (auto *ToVec = ToType->getAs<VectorType>())
4636 ElType = ToVec->getElementType();
4637
4638 if (ElTy)
4639 *ElTy = ElType;
4640 if (!FromTy->isVectorType())
4641 return ElType;
4642 auto *FromVec = FromTy->castAs<VectorType>();
4643 return Context.getExtVectorType(VectorType: ElType, NumElts: FromVec->getNumElements());
4644}
4645
4646ExprResult
4647Sema::PerformImplicitConversion(Expr *From, QualType ToType,
4648 const StandardConversionSequence& SCS,
4649 AssignmentAction Action,
4650 CheckedConversionKind CCK) {
4651 bool CStyle = (CCK == CheckedConversionKind::CStyleCast ||
4652 CCK == CheckedConversionKind::FunctionalCast);
4653
4654 // Overall FIXME: we are recomputing too many types here and doing far too
4655 // much extra work. What this means is that we need to keep track of more
4656 // information that is computed when we try the implicit conversion initially,
4657 // so that we don't need to recompute anything here.
4658 QualType FromType = From->getType();
4659
4660 if (SCS.CopyConstructor) {
4661 // FIXME: When can ToType be a reference type?
4662 assert(!ToType->isReferenceType());
4663 if (SCS.Second == ICK_Derived_To_Base) {
4664 SmallVector<Expr*, 8> ConstructorArgs;
4665 if (CompleteConstructorCall(
4666 Constructor: cast<CXXConstructorDecl>(Val: SCS.CopyConstructor), DeclInitType: ToType, ArgsPtr: From,
4667 /*FIXME:ConstructLoc*/ Loc: SourceLocation(), ConvertedArgs&: ConstructorArgs))
4668 return ExprError();
4669 return BuildCXXConstructExpr(
4670 /*FIXME:ConstructLoc*/ ConstructLoc: SourceLocation(), DeclInitType: ToType,
4671 FoundDecl: SCS.FoundCopyConstructor, Constructor: SCS.CopyConstructor, Exprs: ConstructorArgs,
4672 /*HadMultipleCandidates*/ false,
4673 /*ListInit*/ IsListInitialization: false, /*StdInitListInit*/ IsStdInitListInitialization: false, /*ZeroInit*/ RequiresZeroInit: false,
4674 ConstructKind: CXXConstructionKind::Complete, ParenRange: SourceRange());
4675 }
4676 return BuildCXXConstructExpr(
4677 /*FIXME:ConstructLoc*/ ConstructLoc: SourceLocation(), DeclInitType: ToType,
4678 FoundDecl: SCS.FoundCopyConstructor, Constructor: SCS.CopyConstructor, Exprs: From,
4679 /*HadMultipleCandidates*/ false,
4680 /*ListInit*/ IsListInitialization: false, /*StdInitListInit*/ IsStdInitListInitialization: false, /*ZeroInit*/ RequiresZeroInit: false,
4681 ConstructKind: CXXConstructionKind::Complete, ParenRange: SourceRange());
4682 }
4683
4684 // Resolve overloaded function references.
4685 if (Context.hasSameType(T1: FromType, T2: Context.OverloadTy)) {
4686 DeclAccessPair Found;
4687 FunctionDecl *Fn = ResolveAddressOfOverloadedFunction(AddressOfExpr: From, TargetType: ToType,
4688 Complain: true, Found);
4689 if (!Fn)
4690 return ExprError();
4691
4692 if (DiagnoseUseOfDecl(D: Fn, Locs: From->getBeginLoc()))
4693 return ExprError();
4694
4695 ExprResult Res = FixOverloadedFunctionReference(E: From, FoundDecl: Found, Fn);
4696 if (Res.isInvalid())
4697 return ExprError();
4698
4699 // We might get back another placeholder expression if we resolved to a
4700 // builtin.
4701 Res = CheckPlaceholderExpr(E: Res.get());
4702 if (Res.isInvalid())
4703 return ExprError();
4704
4705 From = Res.get();
4706 FromType = From->getType();
4707 }
4708
4709 // If we're converting to an atomic type, first convert to the corresponding
4710 // non-atomic type.
4711 QualType ToAtomicType;
4712 if (const AtomicType *ToAtomic = ToType->getAs<AtomicType>()) {
4713 ToAtomicType = ToType;
4714 ToType = ToAtomic->getValueType();
4715 }
4716
4717 QualType InitialFromType = FromType;
4718 // Perform the first implicit conversion.
4719 switch (SCS.First) {
4720 case ICK_Identity:
4721 if (const AtomicType *FromAtomic = FromType->getAs<AtomicType>()) {
4722 FromType = FromAtomic->getValueType().getUnqualifiedType();
4723 From = ImplicitCastExpr::Create(Context, T: FromType, Kind: CK_AtomicToNonAtomic,
4724 Operand: From, /*BasePath=*/nullptr, Cat: VK_PRValue,
4725 FPO: FPOptionsOverride());
4726 }
4727 break;
4728
4729 case ICK_Lvalue_To_Rvalue: {
4730 assert(From->getObjectKind() != OK_ObjCProperty);
4731 ExprResult FromRes = DefaultLvalueConversion(E: From);
4732 if (FromRes.isInvalid())
4733 return ExprError();
4734
4735 From = FromRes.get();
4736 FromType = From->getType();
4737 break;
4738 }
4739
4740 case ICK_Array_To_Pointer:
4741 FromType = Context.getArrayDecayedType(T: FromType);
4742 From = ImpCastExprToType(E: From, Type: FromType, CK: CK_ArrayToPointerDecay, VK: VK_PRValue,
4743 /*BasePath=*/nullptr, CCK)
4744 .get();
4745 break;
4746
4747 case ICK_HLSL_Array_RValue:
4748 if (ToType->isArrayParameterType()) {
4749 FromType = Context.getArrayParameterType(Ty: FromType);
4750 } else if (FromType->isArrayParameterType()) {
4751 const ArrayParameterType *APT = cast<ArrayParameterType>(Val&: FromType);
4752 FromType = APT->getConstantArrayType(Ctx: Context);
4753 }
4754 From = ImpCastExprToType(E: From, Type: FromType, CK: CK_HLSLArrayRValue, VK: VK_PRValue,
4755 /*BasePath=*/nullptr, CCK)
4756 .get();
4757 break;
4758
4759 case ICK_Function_To_Pointer:
4760 FromType = Context.getPointerType(T: FromType);
4761 From = ImpCastExprToType(E: From, Type: FromType, CK: CK_FunctionToPointerDecay,
4762 VK: VK_PRValue, /*BasePath=*/nullptr, CCK)
4763 .get();
4764 break;
4765
4766 default:
4767 llvm_unreachable("Improper first standard conversion");
4768 }
4769
4770 // Perform the second implicit conversion
4771 switch (SCS.Second) {
4772 case ICK_Identity:
4773 // C++ [except.spec]p5:
4774 // [For] assignment to and initialization of pointers to functions,
4775 // pointers to member functions, and references to functions: the
4776 // target entity shall allow at least the exceptions allowed by the
4777 // source value in the assignment or initialization.
4778 switch (Action) {
4779 case AssignmentAction::Assigning:
4780 case AssignmentAction::Initializing:
4781 // Note, function argument passing and returning are initialization.
4782 case AssignmentAction::Passing:
4783 case AssignmentAction::Returning:
4784 case AssignmentAction::Sending:
4785 case AssignmentAction::Passing_CFAudited:
4786 if (CheckExceptionSpecCompatibility(From, ToType))
4787 return ExprError();
4788 break;
4789
4790 case AssignmentAction::Casting:
4791 case AssignmentAction::Converting:
4792 // Casts and implicit conversions are not initialization, so are not
4793 // checked for exception specification mismatches.
4794 break;
4795 }
4796 // Nothing else to do.
4797 break;
4798
4799 case ICK_Integral_Promotion:
4800 case ICK_Integral_Conversion: {
4801 QualType ElTy = ToType;
4802 QualType StepTy = ToType;
4803 if (FromType->isVectorType() || ToType->isVectorType())
4804 StepTy = adjustVectorType(Context, FromTy: FromType, ToType, ElTy: &ElTy);
4805 if (ElTy->isBooleanType()) {
4806 assert(FromType->castAs<EnumType>()->getDecl()->isFixed() &&
4807 SCS.Second == ICK_Integral_Promotion &&
4808 "only enums with fixed underlying type can promote to bool");
4809 From = ImpCastExprToType(E: From, Type: StepTy, CK: CK_IntegralToBoolean, VK: VK_PRValue,
4810 /*BasePath=*/nullptr, CCK)
4811 .get();
4812 } else {
4813 From = ImpCastExprToType(E: From, Type: StepTy, CK: CK_IntegralCast, VK: VK_PRValue,
4814 /*BasePath=*/nullptr, CCK)
4815 .get();
4816 }
4817 break;
4818 }
4819
4820 case ICK_Floating_Promotion:
4821 case ICK_Floating_Conversion: {
4822 QualType StepTy = ToType;
4823 if (FromType->isVectorType() || ToType->isVectorType())
4824 StepTy = adjustVectorType(Context, FromTy: FromType, ToType);
4825 From = ImpCastExprToType(E: From, Type: StepTy, CK: CK_FloatingCast, VK: VK_PRValue,
4826 /*BasePath=*/nullptr, CCK)
4827 .get();
4828 break;
4829 }
4830
4831 case ICK_Complex_Promotion:
4832 case ICK_Complex_Conversion: {
4833 QualType FromEl = From->getType()->castAs<ComplexType>()->getElementType();
4834 QualType ToEl = ToType->castAs<ComplexType>()->getElementType();
4835 CastKind CK;
4836 if (FromEl->isRealFloatingType()) {
4837 if (ToEl->isRealFloatingType())
4838 CK = CK_FloatingComplexCast;
4839 else
4840 CK = CK_FloatingComplexToIntegralComplex;
4841 } else if (ToEl->isRealFloatingType()) {
4842 CK = CK_IntegralComplexToFloatingComplex;
4843 } else {
4844 CK = CK_IntegralComplexCast;
4845 }
4846 From = ImpCastExprToType(E: From, Type: ToType, CK, VK: VK_PRValue, /*BasePath=*/nullptr,
4847 CCK)
4848 .get();
4849 break;
4850 }
4851
4852 case ICK_Floating_Integral: {
4853 QualType ElTy = ToType;
4854 QualType StepTy = ToType;
4855 if (FromType->isVectorType() || ToType->isVectorType())
4856 StepTy = adjustVectorType(Context, FromTy: FromType, ToType, ElTy: &ElTy);
4857 if (ElTy->isRealFloatingType())
4858 From = ImpCastExprToType(E: From, Type: StepTy, CK: CK_IntegralToFloating, VK: VK_PRValue,
4859 /*BasePath=*/nullptr, CCK)
4860 .get();
4861 else
4862 From = ImpCastExprToType(E: From, Type: StepTy, CK: CK_FloatingToIntegral, VK: VK_PRValue,
4863 /*BasePath=*/nullptr, CCK)
4864 .get();
4865 break;
4866 }
4867
4868 case ICK_Fixed_Point_Conversion:
4869 assert((FromType->isFixedPointType() || ToType->isFixedPointType()) &&
4870 "Attempting implicit fixed point conversion without a fixed "
4871 "point operand");
4872 if (FromType->isFloatingType())
4873 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_FloatingToFixedPoint,
4874 VK: VK_PRValue,
4875 /*BasePath=*/nullptr, CCK).get();
4876 else if (ToType->isFloatingType())
4877 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_FixedPointToFloating,
4878 VK: VK_PRValue,
4879 /*BasePath=*/nullptr, CCK).get();
4880 else if (FromType->isIntegralType(Ctx: Context))
4881 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_IntegralToFixedPoint,
4882 VK: VK_PRValue,
4883 /*BasePath=*/nullptr, CCK).get();
4884 else if (ToType->isIntegralType(Ctx: Context))
4885 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_FixedPointToIntegral,
4886 VK: VK_PRValue,
4887 /*BasePath=*/nullptr, CCK).get();
4888 else if (ToType->isBooleanType())
4889 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_FixedPointToBoolean,
4890 VK: VK_PRValue,
4891 /*BasePath=*/nullptr, CCK).get();
4892 else
4893 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_FixedPointCast,
4894 VK: VK_PRValue,
4895 /*BasePath=*/nullptr, CCK).get();
4896 break;
4897
4898 case ICK_Compatible_Conversion:
4899 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_NoOp, VK: From->getValueKind(),
4900 /*BasePath=*/nullptr, CCK).get();
4901 break;
4902
4903 case ICK_Writeback_Conversion:
4904 case ICK_Pointer_Conversion: {
4905 if (SCS.IncompatibleObjC && Action != AssignmentAction::Casting) {
4906 // Diagnose incompatible Objective-C conversions
4907 if (Action == AssignmentAction::Initializing ||
4908 Action == AssignmentAction::Assigning)
4909 Diag(Loc: From->getBeginLoc(),
4910 DiagID: diag::ext_typecheck_convert_incompatible_pointer)
4911 << ToType << From->getType() << Action << From->getSourceRange()
4912 << 0;
4913 else
4914 Diag(Loc: From->getBeginLoc(),
4915 DiagID: diag::ext_typecheck_convert_incompatible_pointer)
4916 << From->getType() << ToType << Action << From->getSourceRange()
4917 << 0;
4918
4919 if (From->getType()->isObjCObjectPointerType() &&
4920 ToType->isObjCObjectPointerType())
4921 ObjC().EmitRelatedResultTypeNote(E: From);
4922 } else if (getLangOpts().allowsNonTrivialObjCLifetimeQualifiers() &&
4923 !ObjC().CheckObjCARCUnavailableWeakConversion(castType: ToType,
4924 ExprType: From->getType())) {
4925 if (Action == AssignmentAction::Initializing)
4926 Diag(Loc: From->getBeginLoc(), DiagID: diag::err_arc_weak_unavailable_assign);
4927 else
4928 Diag(Loc: From->getBeginLoc(), DiagID: diag::err_arc_convesion_of_weak_unavailable)
4929 << (Action == AssignmentAction::Casting) << From->getType()
4930 << ToType << From->getSourceRange();
4931 }
4932
4933 // Defer address space conversion to the third conversion.
4934 QualType FromPteeType = From->getType()->getPointeeType();
4935 QualType ToPteeType = ToType->getPointeeType();
4936 QualType NewToType = ToType;
4937 if (!FromPteeType.isNull() && !ToPteeType.isNull() &&
4938 FromPteeType.getAddressSpace() != ToPteeType.getAddressSpace()) {
4939 NewToType = Context.removeAddrSpaceQualType(T: ToPteeType);
4940 NewToType = Context.getAddrSpaceQualType(T: NewToType,
4941 AddressSpace: FromPteeType.getAddressSpace());
4942 if (ToType->isObjCObjectPointerType())
4943 NewToType = Context.getObjCObjectPointerType(OIT: NewToType);
4944 else if (ToType->isBlockPointerType())
4945 NewToType = Context.getBlockPointerType(T: NewToType);
4946 else
4947 NewToType = Context.getPointerType(T: NewToType);
4948 }
4949
4950 CastKind Kind;
4951 CXXCastPath BasePath;
4952 if (CheckPointerConversion(From, ToType: NewToType, Kind, BasePath, IgnoreBaseAccess: CStyle))
4953 return ExprError();
4954
4955 // Make sure we extend blocks if necessary.
4956 // FIXME: doing this here is really ugly.
4957 if (Kind == CK_BlockPointerToObjCPointerCast) {
4958 ExprResult E = From;
4959 (void)ObjC().PrepareCastToObjCObjectPointer(E);
4960 From = E.get();
4961 }
4962 if (getLangOpts().allowsNonTrivialObjCLifetimeQualifiers())
4963 ObjC().CheckObjCConversion(castRange: SourceRange(), castType: NewToType, op&: From, CCK);
4964 From = ImpCastExprToType(E: From, Type: NewToType, CK: Kind, VK: VK_PRValue, BasePath: &BasePath, CCK)
4965 .get();
4966 break;
4967 }
4968
4969 case ICK_Pointer_Member: {
4970 CastKind Kind;
4971 CXXCastPath BasePath;
4972 switch (CheckMemberPointerConversion(
4973 FromType: From->getType(), ToPtrType: ToType->castAs<MemberPointerType>(), Kind, BasePath,
4974 CheckLoc: From->getExprLoc(), OpRange: From->getSourceRange(), IgnoreBaseAccess: CStyle,
4975 Direction: MemberPointerConversionDirection::Downcast)) {
4976 case MemberPointerConversionResult::Success:
4977 assert((Kind != CK_NullToMemberPointer ||
4978 From->isNullPointerConstant(Context,
4979 Expr::NPC_ValueDependentIsNull)) &&
4980 "Expr must be null pointer constant!");
4981 break;
4982 case MemberPointerConversionResult::Inaccessible:
4983 break;
4984 case MemberPointerConversionResult::DifferentPointee:
4985 llvm_unreachable("unexpected result");
4986 case MemberPointerConversionResult::NotDerived:
4987 llvm_unreachable("Should not have been called if derivation isn't OK.");
4988 case MemberPointerConversionResult::Ambiguous:
4989 case MemberPointerConversionResult::Virtual:
4990 return ExprError();
4991 }
4992 if (CheckExceptionSpecCompatibility(From, ToType))
4993 return ExprError();
4994
4995 From =
4996 ImpCastExprToType(E: From, Type: ToType, CK: Kind, VK: VK_PRValue, BasePath: &BasePath, CCK).get();
4997 break;
4998 }
4999
5000 case ICK_Boolean_Conversion: {
5001 // Perform half-to-boolean conversion via float.
5002 if (From->getType()->isHalfType()) {
5003 From = ImpCastExprToType(E: From, Type: Context.FloatTy, CK: CK_FloatingCast).get();
5004 FromType = Context.FloatTy;
5005 }
5006 QualType ElTy = FromType;
5007 QualType StepTy = ToType;
5008 if (FromType->isVectorType())
5009 ElTy = FromType->castAs<VectorType>()->getElementType();
5010 if (getLangOpts().HLSL &&
5011 (FromType->isVectorType() || ToType->isVectorType()))
5012 StepTy = adjustVectorType(Context, FromTy: FromType, ToType);
5013
5014 From = ImpCastExprToType(E: From, Type: StepTy, CK: ScalarTypeToBooleanCastKind(ScalarTy: ElTy),
5015 VK: VK_PRValue,
5016 /*BasePath=*/nullptr, CCK)
5017 .get();
5018 break;
5019 }
5020
5021 case ICK_Derived_To_Base: {
5022 CXXCastPath BasePath;
5023 if (CheckDerivedToBaseConversion(
5024 Derived: From->getType(), Base: ToType.getNonReferenceType(), Loc: From->getBeginLoc(),
5025 Range: From->getSourceRange(), BasePath: &BasePath, IgnoreAccess: CStyle))
5026 return ExprError();
5027
5028 From = ImpCastExprToType(E: From, Type: ToType.getNonReferenceType(),
5029 CK: CK_DerivedToBase, VK: From->getValueKind(),
5030 BasePath: &BasePath, CCK).get();
5031 break;
5032 }
5033
5034 case ICK_Vector_Conversion:
5035 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_BitCast, VK: VK_PRValue,
5036 /*BasePath=*/nullptr, CCK)
5037 .get();
5038 break;
5039
5040 case ICK_SVE_Vector_Conversion:
5041 case ICK_RVV_Vector_Conversion:
5042 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_BitCast, VK: VK_PRValue,
5043 /*BasePath=*/nullptr, CCK)
5044 .get();
5045 break;
5046
5047 case ICK_Vector_Splat: {
5048 // Vector splat from any arithmetic type to a vector.
5049 Expr *Elem = prepareVectorSplat(VectorTy: ToType, SplattedExpr: From).get();
5050 From = ImpCastExprToType(E: Elem, Type: ToType, CK: CK_VectorSplat, VK: VK_PRValue,
5051 /*BasePath=*/nullptr, CCK)
5052 .get();
5053 break;
5054 }
5055
5056 case ICK_Complex_Real:
5057 // Case 1. x -> _Complex y
5058 if (const ComplexType *ToComplex = ToType->getAs<ComplexType>()) {
5059 QualType ElType = ToComplex->getElementType();
5060 bool isFloatingComplex = ElType->isRealFloatingType();
5061
5062 // x -> y
5063 if (Context.hasSameUnqualifiedType(T1: ElType, T2: From->getType())) {
5064 // do nothing
5065 } else if (From->getType()->isRealFloatingType()) {
5066 From = ImpCastExprToType(E: From, Type: ElType,
5067 CK: isFloatingComplex ? CK_FloatingCast : CK_FloatingToIntegral).get();
5068 } else {
5069 assert(From->getType()->isIntegerType());
5070 From = ImpCastExprToType(E: From, Type: ElType,
5071 CK: isFloatingComplex ? CK_IntegralToFloating : CK_IntegralCast).get();
5072 }
5073 // y -> _Complex y
5074 From = ImpCastExprToType(E: From, Type: ToType,
5075 CK: isFloatingComplex ? CK_FloatingRealToComplex
5076 : CK_IntegralRealToComplex).get();
5077
5078 // Case 2. _Complex x -> y
5079 } else {
5080 auto *FromComplex = From->getType()->castAs<ComplexType>();
5081 QualType ElType = FromComplex->getElementType();
5082 bool isFloatingComplex = ElType->isRealFloatingType();
5083
5084 // _Complex x -> x
5085 From = ImpCastExprToType(E: From, Type: ElType,
5086 CK: isFloatingComplex ? CK_FloatingComplexToReal
5087 : CK_IntegralComplexToReal,
5088 VK: VK_PRValue, /*BasePath=*/nullptr, CCK)
5089 .get();
5090
5091 // x -> y
5092 if (Context.hasSameUnqualifiedType(T1: ElType, T2: ToType)) {
5093 // do nothing
5094 } else if (ToType->isRealFloatingType()) {
5095 From = ImpCastExprToType(E: From, Type: ToType,
5096 CK: isFloatingComplex ? CK_FloatingCast
5097 : CK_IntegralToFloating,
5098 VK: VK_PRValue, /*BasePath=*/nullptr, CCK)
5099 .get();
5100 } else {
5101 assert(ToType->isIntegerType());
5102 From = ImpCastExprToType(E: From, Type: ToType,
5103 CK: isFloatingComplex ? CK_FloatingToIntegral
5104 : CK_IntegralCast,
5105 VK: VK_PRValue, /*BasePath=*/nullptr, CCK)
5106 .get();
5107 }
5108 }
5109 break;
5110
5111 case ICK_Block_Pointer_Conversion: {
5112 LangAS AddrSpaceL =
5113 ToType->castAs<BlockPointerType>()->getPointeeType().getAddressSpace();
5114 LangAS AddrSpaceR =
5115 FromType->castAs<BlockPointerType>()->getPointeeType().getAddressSpace();
5116 assert(Qualifiers::isAddressSpaceSupersetOf(AddrSpaceL, AddrSpaceR,
5117 getASTContext()) &&
5118 "Invalid cast");
5119 CastKind Kind =
5120 AddrSpaceL != AddrSpaceR ? CK_AddressSpaceConversion : CK_BitCast;
5121 From = ImpCastExprToType(E: From, Type: ToType.getUnqualifiedType(), CK: Kind,
5122 VK: VK_PRValue, /*BasePath=*/nullptr, CCK)
5123 .get();
5124 break;
5125 }
5126
5127 case ICK_TransparentUnionConversion: {
5128 ExprResult FromRes = From;
5129 AssignConvertType ConvTy =
5130 CheckTransparentUnionArgumentConstraints(ArgType: ToType, RHS&: FromRes);
5131 if (FromRes.isInvalid())
5132 return ExprError();
5133 From = FromRes.get();
5134 assert((ConvTy == AssignConvertType::Compatible) &&
5135 "Improper transparent union conversion");
5136 (void)ConvTy;
5137 break;
5138 }
5139
5140 case ICK_Zero_Event_Conversion:
5141 case ICK_Zero_Queue_Conversion:
5142 From = ImpCastExprToType(E: From, Type: ToType,
5143 CK: CK_ZeroToOCLOpaqueType,
5144 VK: From->getValueKind()).get();
5145 break;
5146
5147 case ICK_Lvalue_To_Rvalue:
5148 case ICK_Array_To_Pointer:
5149 case ICK_Function_To_Pointer:
5150 case ICK_Function_Conversion:
5151 case ICK_Qualification:
5152 case ICK_Num_Conversion_Kinds:
5153 case ICK_C_Only_Conversion:
5154 case ICK_Incompatible_Pointer_Conversion:
5155 case ICK_HLSL_Array_RValue:
5156 case ICK_HLSL_Vector_Truncation:
5157 case ICK_HLSL_Vector_Splat:
5158 llvm_unreachable("Improper second standard conversion");
5159 }
5160
5161 if (SCS.Dimension != ICK_Identity) {
5162 // If SCS.Element is not ICK_Identity the To and From types must be HLSL
5163 // vectors or matrices.
5164
5165 // TODO: Support HLSL matrices.
5166 assert((!From->getType()->isMatrixType() && !ToType->isMatrixType()) &&
5167 "Dimension conversion for matrix types is not implemented yet.");
5168 assert((ToType->isVectorType() || ToType->isBuiltinType()) &&
5169 "Dimension conversion output must be vector or scalar type.");
5170 switch (SCS.Dimension) {
5171 case ICK_HLSL_Vector_Splat: {
5172 // Vector splat from any arithmetic type to a vector.
5173 Expr *Elem = prepareVectorSplat(VectorTy: ToType, SplattedExpr: From).get();
5174 From = ImpCastExprToType(E: Elem, Type: ToType, CK: CK_VectorSplat, VK: VK_PRValue,
5175 /*BasePath=*/nullptr, CCK)
5176 .get();
5177 break;
5178 }
5179 case ICK_HLSL_Vector_Truncation: {
5180 // Note: HLSL built-in vectors are ExtVectors. Since this truncates a
5181 // vector to a smaller vector or to a scalar, this can only operate on
5182 // arguments where the source type is an ExtVector and the destination
5183 // type is destination type is either an ExtVectorType or a builtin scalar
5184 // type.
5185 auto *FromVec = From->getType()->castAs<VectorType>();
5186 QualType TruncTy = FromVec->getElementType();
5187 if (auto *ToVec = ToType->getAs<VectorType>())
5188 TruncTy = Context.getExtVectorType(VectorType: TruncTy, NumElts: ToVec->getNumElements());
5189 From = ImpCastExprToType(E: From, Type: TruncTy, CK: CK_HLSLVectorTruncation,
5190 VK: From->getValueKind())
5191 .get();
5192
5193 break;
5194 }
5195 case ICK_Identity:
5196 default:
5197 llvm_unreachable("Improper element standard conversion");
5198 }
5199 }
5200
5201 switch (SCS.Third) {
5202 case ICK_Identity:
5203 // Nothing to do.
5204 break;
5205
5206 case ICK_Function_Conversion:
5207 // If both sides are functions (or pointers/references to them), there could
5208 // be incompatible exception declarations.
5209 if (CheckExceptionSpecCompatibility(From, ToType))
5210 return ExprError();
5211
5212 From = ImpCastExprToType(E: From, Type: ToType, CK: CK_NoOp, VK: VK_PRValue,
5213 /*BasePath=*/nullptr, CCK)
5214 .get();
5215 break;
5216
5217 case ICK_Qualification: {
5218 ExprValueKind VK = From->getValueKind();
5219 CastKind CK = CK_NoOp;
5220
5221 if (ToType->isReferenceType() &&
5222 ToType->getPointeeType().getAddressSpace() !=
5223 From->getType().getAddressSpace())
5224 CK = CK_AddressSpaceConversion;
5225
5226 if (ToType->isPointerType() &&
5227 ToType->getPointeeType().getAddressSpace() !=
5228 From->getType()->getPointeeType().getAddressSpace())
5229 CK = CK_AddressSpaceConversion;
5230
5231 if (!isCast(CCK) &&
5232 !ToType->getPointeeType().getQualifiers().hasUnaligned() &&
5233 From->getType()->getPointeeType().getQualifiers().hasUnaligned()) {
5234 Diag(Loc: From->getBeginLoc(), DiagID: diag::warn_imp_cast_drops_unaligned)
5235 << InitialFromType << ToType;
5236 }
5237
5238 From = ImpCastExprToType(E: From, Type: ToType.getNonLValueExprType(Context), CK, VK,
5239 /*BasePath=*/nullptr, CCK)
5240 .get();
5241
5242 if (SCS.DeprecatedStringLiteralToCharPtr &&
5243 !getLangOpts().WritableStrings) {
5244 Diag(Loc: From->getBeginLoc(),
5245 DiagID: getLangOpts().CPlusPlus11
5246 ? diag::ext_deprecated_string_literal_conversion
5247 : diag::warn_deprecated_string_literal_conversion)
5248 << ToType.getNonReferenceType();
5249 }
5250
5251 break;
5252 }
5253
5254 default:
5255 llvm_unreachable("Improper third standard conversion");
5256 }
5257
5258 // If this conversion sequence involved a scalar -> atomic conversion, perform
5259 // that conversion now.
5260 if (!ToAtomicType.isNull()) {
5261 assert(Context.hasSameType(
5262 ToAtomicType->castAs<AtomicType>()->getValueType(), From->getType()));
5263 From = ImpCastExprToType(E: From, Type: ToAtomicType, CK: CK_NonAtomicToAtomic,
5264 VK: VK_PRValue, BasePath: nullptr, CCK)
5265 .get();
5266 }
5267
5268 // Materialize a temporary if we're implicitly converting to a reference
5269 // type. This is not required by the C++ rules but is necessary to maintain
5270 // AST invariants.
5271 if (ToType->isReferenceType() && From->isPRValue()) {
5272 ExprResult Res = TemporaryMaterializationConversion(E: From);
5273 if (Res.isInvalid())
5274 return ExprError();
5275 From = Res.get();
5276 }
5277
5278 // If this conversion sequence succeeded and involved implicitly converting a
5279 // _Nullable type to a _Nonnull one, complain.
5280 if (!isCast(CCK))
5281 diagnoseNullableToNonnullConversion(DstType: ToType, SrcType: InitialFromType,
5282 Loc: From->getBeginLoc());
5283
5284 return From;
5285}
5286
5287QualType Sema::CheckPointerToMemberOperands(ExprResult &LHS, ExprResult &RHS,
5288 ExprValueKind &VK,
5289 SourceLocation Loc,
5290 bool isIndirect) {
5291 assert(!LHS.get()->hasPlaceholderType() && !RHS.get()->hasPlaceholderType() &&
5292 "placeholders should have been weeded out by now");
5293
5294 // The LHS undergoes lvalue conversions if this is ->*, and undergoes the
5295 // temporary materialization conversion otherwise.
5296 if (isIndirect)
5297 LHS = DefaultLvalueConversion(E: LHS.get());
5298 else if (LHS.get()->isPRValue())
5299 LHS = TemporaryMaterializationConversion(E: LHS.get());
5300 if (LHS.isInvalid())
5301 return QualType();
5302
5303 // The RHS always undergoes lvalue conversions.
5304 RHS = DefaultLvalueConversion(E: RHS.get());
5305 if (RHS.isInvalid()) return QualType();
5306
5307 const char *OpSpelling = isIndirect ? "->*" : ".*";
5308 // C++ 5.5p2
5309 // The binary operator .* [p3: ->*] binds its second operand, which shall
5310 // be of type "pointer to member of T" (where T is a completely-defined
5311 // class type) [...]
5312 QualType RHSType = RHS.get()->getType();
5313 const MemberPointerType *MemPtr = RHSType->getAs<MemberPointerType>();
5314 if (!MemPtr) {
5315 Diag(Loc, DiagID: diag::err_bad_memptr_rhs)
5316 << OpSpelling << RHSType << RHS.get()->getSourceRange();
5317 return QualType();
5318 }
5319
5320 CXXRecordDecl *RHSClass = MemPtr->getMostRecentCXXRecordDecl();
5321
5322 // Note: C++ [expr.mptr.oper]p2-3 says that the class type into which the
5323 // member pointer points must be completely-defined. However, there is no
5324 // reason for this semantic distinction, and the rule is not enforced by
5325 // other compilers. Therefore, we do not check this property, as it is
5326 // likely to be considered a defect.
5327
5328 // C++ 5.5p2
5329 // [...] to its first operand, which shall be of class T or of a class of
5330 // which T is an unambiguous and accessible base class. [p3: a pointer to
5331 // such a class]
5332 QualType LHSType = LHS.get()->getType();
5333 if (isIndirect) {
5334 if (const PointerType *Ptr = LHSType->getAs<PointerType>())
5335 LHSType = Ptr->getPointeeType();
5336 else {
5337 Diag(Loc, DiagID: diag::err_bad_memptr_lhs)
5338 << OpSpelling << 1 << LHSType
5339 << FixItHint::CreateReplacement(RemoveRange: SourceRange(Loc), Code: ".*");
5340 return QualType();
5341 }
5342 }
5343 CXXRecordDecl *LHSClass = LHSType->getAsCXXRecordDecl();
5344
5345 if (!declaresSameEntity(D1: LHSClass, D2: RHSClass)) {
5346 // If we want to check the hierarchy, we need a complete type.
5347 if (RequireCompleteType(Loc, T: LHSType, DiagID: diag::err_bad_memptr_lhs,
5348 Args: OpSpelling, Args: (int)isIndirect)) {
5349 return QualType();
5350 }
5351
5352 if (!IsDerivedFrom(Loc, Derived: LHSClass, Base: RHSClass)) {
5353 Diag(Loc, DiagID: diag::err_bad_memptr_lhs) << OpSpelling
5354 << (int)isIndirect << LHS.get()->getType();
5355 return QualType();
5356 }
5357
5358 CXXCastPath BasePath;
5359 if (CheckDerivedToBaseConversion(
5360 Derived: LHSType, Base: QualType(RHSClass->getTypeForDecl(), 0), Loc,
5361 Range: SourceRange(LHS.get()->getBeginLoc(), RHS.get()->getEndLoc()),
5362 BasePath: &BasePath))
5363 return QualType();
5364
5365 // Cast LHS to type of use.
5366 QualType UseType = Context.getQualifiedType(T: RHSClass->getTypeForDecl(),
5367 Qs: LHSType.getQualifiers());
5368 if (isIndirect)
5369 UseType = Context.getPointerType(T: UseType);
5370 ExprValueKind VK = isIndirect ? VK_PRValue : LHS.get()->getValueKind();
5371 LHS = ImpCastExprToType(E: LHS.get(), Type: UseType, CK: CK_DerivedToBase, VK,
5372 BasePath: &BasePath);
5373 }
5374
5375 if (isa<CXXScalarValueInitExpr>(Val: RHS.get()->IgnoreParens())) {
5376 // Diagnose use of pointer-to-member type which when used as
5377 // the functional cast in a pointer-to-member expression.
5378 Diag(Loc, DiagID: diag::err_pointer_to_member_type) << isIndirect;
5379 return QualType();
5380 }
5381
5382 // C++ 5.5p2
5383 // The result is an object or a function of the type specified by the
5384 // second operand.
5385 // The cv qualifiers are the union of those in the pointer and the left side,
5386 // in accordance with 5.5p5 and 5.2.5.
5387 QualType Result = MemPtr->getPointeeType();
5388 Result = Context.getCVRQualifiedType(T: Result, CVR: LHSType.getCVRQualifiers());
5389
5390 // C++0x [expr.mptr.oper]p6:
5391 // In a .* expression whose object expression is an rvalue, the program is
5392 // ill-formed if the second operand is a pointer to member function with
5393 // ref-qualifier &. In a ->* expression or in a .* expression whose object
5394 // expression is an lvalue, the program is ill-formed if the second operand
5395 // is a pointer to member function with ref-qualifier &&.
5396 if (const FunctionProtoType *Proto = Result->getAs<FunctionProtoType>()) {
5397 switch (Proto->getRefQualifier()) {
5398 case RQ_None:
5399 // Do nothing
5400 break;
5401
5402 case RQ_LValue:
5403 if (!isIndirect && !LHS.get()->Classify(Ctx&: Context).isLValue()) {
5404 // C++2a allows functions with ref-qualifier & if their cv-qualifier-seq
5405 // is (exactly) 'const'.
5406 if (Proto->isConst() && !Proto->isVolatile())
5407 Diag(Loc, DiagID: getLangOpts().CPlusPlus20
5408 ? diag::warn_cxx17_compat_pointer_to_const_ref_member_on_rvalue
5409 : diag::ext_pointer_to_const_ref_member_on_rvalue);
5410 else
5411 Diag(Loc, DiagID: diag::err_pointer_to_member_oper_value_classify)
5412 << RHSType << 1 << LHS.get()->getSourceRange();
5413 }
5414 break;
5415
5416 case RQ_RValue:
5417 if (isIndirect || !LHS.get()->Classify(Ctx&: Context).isRValue())
5418 Diag(Loc, DiagID: diag::err_pointer_to_member_oper_value_classify)
5419 << RHSType << 0 << LHS.get()->getSourceRange();
5420 break;
5421 }
5422 }
5423
5424 // C++ [expr.mptr.oper]p6:
5425 // The result of a .* expression whose second operand is a pointer
5426 // to a data member is of the same value category as its
5427 // first operand. The result of a .* expression whose second
5428 // operand is a pointer to a member function is a prvalue. The
5429 // result of an ->* expression is an lvalue if its second operand
5430 // is a pointer to data member and a prvalue otherwise.
5431 if (Result->isFunctionType()) {
5432 VK = VK_PRValue;
5433 return Context.BoundMemberTy;
5434 } else if (isIndirect) {
5435 VK = VK_LValue;
5436 } else {
5437 VK = LHS.get()->getValueKind();
5438 }
5439
5440 return Result;
5441}
5442
5443/// Try to convert a type to another according to C++11 5.16p3.
5444///
5445/// This is part of the parameter validation for the ? operator. If either
5446/// value operand is a class type, the two operands are attempted to be
5447/// converted to each other. This function does the conversion in one direction.
5448/// It returns true if the program is ill-formed and has already been diagnosed
5449/// as such.
5450static bool TryClassUnification(Sema &Self, Expr *From, Expr *To,
5451 SourceLocation QuestionLoc,
5452 bool &HaveConversion,
5453 QualType &ToType) {
5454 HaveConversion = false;
5455 ToType = To->getType();
5456
5457 InitializationKind Kind =
5458 InitializationKind::CreateCopy(InitLoc: To->getBeginLoc(), EqualLoc: SourceLocation());
5459 // C++11 5.16p3
5460 // The process for determining whether an operand expression E1 of type T1
5461 // can be converted to match an operand expression E2 of type T2 is defined
5462 // as follows:
5463 // -- If E2 is an lvalue: E1 can be converted to match E2 if E1 can be
5464 // implicitly converted to type "lvalue reference to T2", subject to the
5465 // constraint that in the conversion the reference must bind directly to
5466 // an lvalue.
5467 // -- If E2 is an xvalue: E1 can be converted to match E2 if E1 can be
5468 // implicitly converted to the type "rvalue reference to R2", subject to
5469 // the constraint that the reference must bind directly.
5470 if (To->isGLValue()) {
5471 QualType T = Self.Context.getReferenceQualifiedType(e: To);
5472 InitializedEntity Entity = InitializedEntity::InitializeTemporary(Type: T);
5473
5474 InitializationSequence InitSeq(Self, Entity, Kind, From);
5475 if (InitSeq.isDirectReferenceBinding()) {
5476 ToType = T;
5477 HaveConversion = true;
5478 return false;
5479 }
5480
5481 if (InitSeq.isAmbiguous())
5482 return InitSeq.Diagnose(S&: Self, Entity, Kind, Args: From);
5483 }
5484
5485 // -- If E2 is an rvalue, or if the conversion above cannot be done:
5486 // -- if E1 and E2 have class type, and the underlying class types are
5487 // the same or one is a base class of the other:
5488 QualType FTy = From->getType();
5489 QualType TTy = To->getType();
5490 const RecordType *FRec = FTy->getAs<RecordType>();
5491 const RecordType *TRec = TTy->getAs<RecordType>();
5492 bool FDerivedFromT = FRec && TRec && FRec != TRec &&
5493 Self.IsDerivedFrom(Loc: QuestionLoc, Derived: FTy, Base: TTy);
5494 if (FRec && TRec && (FRec == TRec || FDerivedFromT ||
5495 Self.IsDerivedFrom(Loc: QuestionLoc, Derived: TTy, Base: FTy))) {
5496 // E1 can be converted to match E2 if the class of T2 is the
5497 // same type as, or a base class of, the class of T1, and
5498 // [cv2 > cv1].
5499 if (FRec == TRec || FDerivedFromT) {
5500 if (TTy.isAtLeastAsQualifiedAs(other: FTy, Ctx: Self.getASTContext())) {
5501 InitializedEntity Entity = InitializedEntity::InitializeTemporary(Type: TTy);
5502 InitializationSequence InitSeq(Self, Entity, Kind, From);
5503 if (InitSeq) {
5504 HaveConversion = true;
5505 return false;
5506 }
5507
5508 if (InitSeq.isAmbiguous())
5509 return InitSeq.Diagnose(S&: Self, Entity, Kind, Args: From);
5510 }
5511 }
5512
5513 return false;
5514 }
5515
5516 // -- Otherwise: E1 can be converted to match E2 if E1 can be
5517 // implicitly converted to the type that expression E2 would have
5518 // if E2 were converted to an rvalue (or the type it has, if E2 is
5519 // an rvalue).
5520 //
5521 // This actually refers very narrowly to the lvalue-to-rvalue conversion, not
5522 // to the array-to-pointer or function-to-pointer conversions.
5523 TTy = TTy.getNonLValueExprType(Context: Self.Context);
5524
5525 InitializedEntity Entity = InitializedEntity::InitializeTemporary(Type: TTy);
5526 InitializationSequence InitSeq(Self, Entity, Kind, From);
5527 HaveConversion = !InitSeq.Failed();
5528 ToType = TTy;
5529 if (InitSeq.isAmbiguous())
5530 return InitSeq.Diagnose(S&: Self, Entity, Kind, Args: From);
5531
5532 return false;
5533}
5534
5535/// Try to find a common type for two according to C++0x 5.16p5.
5536///
5537/// This is part of the parameter validation for the ? operator. If either
5538/// value operand is a class type, overload resolution is used to find a
5539/// conversion to a common type.
5540static bool FindConditionalOverload(Sema &Self, ExprResult &LHS, ExprResult &RHS,
5541 SourceLocation QuestionLoc) {
5542 Expr *Args[2] = { LHS.get(), RHS.get() };
5543 OverloadCandidateSet CandidateSet(QuestionLoc,
5544 OverloadCandidateSet::CSK_Operator);
5545 Self.AddBuiltinOperatorCandidates(Op: OO_Conditional, OpLoc: QuestionLoc, Args,
5546 CandidateSet);
5547
5548 OverloadCandidateSet::iterator Best;
5549 switch (CandidateSet.BestViableFunction(S&: Self, Loc: QuestionLoc, Best)) {
5550 case OR_Success: {
5551 // We found a match. Perform the conversions on the arguments and move on.
5552 ExprResult LHSRes = Self.PerformImplicitConversion(
5553 From: LHS.get(), ToType: Best->BuiltinParamTypes[0], ICS: Best->Conversions[0],
5554 Action: AssignmentAction::Converting);
5555 if (LHSRes.isInvalid())
5556 break;
5557 LHS = LHSRes;
5558
5559 ExprResult RHSRes = Self.PerformImplicitConversion(
5560 From: RHS.get(), ToType: Best->BuiltinParamTypes[1], ICS: Best->Conversions[1],
5561 Action: AssignmentAction::Converting);
5562 if (RHSRes.isInvalid())
5563 break;
5564 RHS = RHSRes;
5565 if (Best->Function)
5566 Self.MarkFunctionReferenced(Loc: QuestionLoc, Func: Best->Function);
5567 return false;
5568 }
5569
5570 case OR_No_Viable_Function:
5571
5572 // Emit a better diagnostic if one of the expressions is a null pointer
5573 // constant and the other is a pointer type. In this case, the user most
5574 // likely forgot to take the address of the other expression.
5575 if (Self.DiagnoseConditionalForNull(LHSExpr: LHS.get(), RHSExpr: RHS.get(), QuestionLoc))
5576 return true;
5577
5578 Self.Diag(Loc: QuestionLoc, DiagID: diag::err_typecheck_cond_incompatible_operands)
5579 << LHS.get()->getType() << RHS.get()->getType()
5580 << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
5581 return true;
5582
5583 case OR_Ambiguous:
5584 Self.Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_ambiguous_ovl)
5585 << LHS.get()->getType() << RHS.get()->getType()
5586 << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
5587 // FIXME: Print the possible common types by printing the return types of
5588 // the viable candidates.
5589 break;
5590
5591 case OR_Deleted:
5592 llvm_unreachable("Conditional operator has only built-in overloads");
5593 }
5594 return true;
5595}
5596
5597/// Perform an "extended" implicit conversion as returned by
5598/// TryClassUnification.
5599static bool ConvertForConditional(Sema &Self, ExprResult &E, QualType T) {
5600 InitializedEntity Entity = InitializedEntity::InitializeTemporary(Type: T);
5601 InitializationKind Kind =
5602 InitializationKind::CreateCopy(InitLoc: E.get()->getBeginLoc(), EqualLoc: SourceLocation());
5603 Expr *Arg = E.get();
5604 InitializationSequence InitSeq(Self, Entity, Kind, Arg);
5605 ExprResult Result = InitSeq.Perform(S&: Self, Entity, Kind, Args: Arg);
5606 if (Result.isInvalid())
5607 return true;
5608
5609 E = Result;
5610 return false;
5611}
5612
5613// Check the condition operand of ?: to see if it is valid for the GCC
5614// extension.
5615static bool isValidVectorForConditionalCondition(ASTContext &Ctx,
5616 QualType CondTy) {
5617 if (!CondTy->isVectorType() && !CondTy->isExtVectorType())
5618 return false;
5619 const QualType EltTy =
5620 cast<VectorType>(Val: CondTy.getCanonicalType())->getElementType();
5621 assert(!EltTy->isEnumeralType() && "Vectors cant be enum types");
5622 return EltTy->isIntegralType(Ctx);
5623}
5624
5625static bool isValidSizelessVectorForConditionalCondition(ASTContext &Ctx,
5626 QualType CondTy) {
5627 if (!CondTy->isSveVLSBuiltinType())
5628 return false;
5629 const QualType EltTy =
5630 cast<BuiltinType>(Val: CondTy.getCanonicalType())->getSveEltType(Ctx);
5631 assert(!EltTy->isEnumeralType() && "Vectors cant be enum types");
5632 return EltTy->isIntegralType(Ctx);
5633}
5634
5635QualType Sema::CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
5636 ExprResult &RHS,
5637 SourceLocation QuestionLoc) {
5638 LHS = DefaultFunctionArrayLvalueConversion(E: LHS.get());
5639 RHS = DefaultFunctionArrayLvalueConversion(E: RHS.get());
5640
5641 QualType CondType = Cond.get()->getType();
5642 const auto *CondVT = CondType->castAs<VectorType>();
5643 QualType CondElementTy = CondVT->getElementType();
5644 unsigned CondElementCount = CondVT->getNumElements();
5645 QualType LHSType = LHS.get()->getType();
5646 const auto *LHSVT = LHSType->getAs<VectorType>();
5647 QualType RHSType = RHS.get()->getType();
5648 const auto *RHSVT = RHSType->getAs<VectorType>();
5649
5650 QualType ResultType;
5651
5652
5653 if (LHSVT && RHSVT) {
5654 if (isa<ExtVectorType>(Val: CondVT) != isa<ExtVectorType>(Val: LHSVT)) {
5655 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_cond_result_mismatch)
5656 << /*isExtVector*/ isa<ExtVectorType>(Val: CondVT);
5657 return {};
5658 }
5659
5660 // If both are vector types, they must be the same type.
5661 if (!Context.hasSameType(T1: LHSType, T2: RHSType)) {
5662 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_mismatched)
5663 << LHSType << RHSType;
5664 return {};
5665 }
5666 ResultType = Context.getCommonSugaredType(X: LHSType, Y: RHSType);
5667 } else if (LHSVT || RHSVT) {
5668 ResultType = CheckVectorOperands(
5669 LHS, RHS, Loc: QuestionLoc, /*isCompAssign*/ IsCompAssign: false, /*AllowBothBool*/ true,
5670 /*AllowBoolConversions*/ AllowBoolConversion: false,
5671 /*AllowBoolOperation*/ true,
5672 /*ReportInvalid*/ true);
5673 if (ResultType.isNull())
5674 return {};
5675 } else {
5676 // Both are scalar.
5677 LHSType = LHSType.getUnqualifiedType();
5678 RHSType = RHSType.getUnqualifiedType();
5679 QualType ResultElementTy =
5680 Context.hasSameType(T1: LHSType, T2: RHSType)
5681 ? Context.getCommonSugaredType(X: LHSType, Y: RHSType)
5682 : UsualArithmeticConversions(LHS, RHS, Loc: QuestionLoc,
5683 ACK: ArithConvKind::Conditional);
5684
5685 if (ResultElementTy->isEnumeralType()) {
5686 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_operand_type)
5687 << ResultElementTy;
5688 return {};
5689 }
5690 if (CondType->isExtVectorType())
5691 ResultType =
5692 Context.getExtVectorType(VectorType: ResultElementTy, NumElts: CondVT->getNumElements());
5693 else
5694 ResultType = Context.getVectorType(
5695 VectorType: ResultElementTy, NumElts: CondVT->getNumElements(), VecKind: VectorKind::Generic);
5696
5697 LHS = ImpCastExprToType(E: LHS.get(), Type: ResultType, CK: CK_VectorSplat);
5698 RHS = ImpCastExprToType(E: RHS.get(), Type: ResultType, CK: CK_VectorSplat);
5699 }
5700
5701 assert(!ResultType.isNull() && ResultType->isVectorType() &&
5702 (!CondType->isExtVectorType() || ResultType->isExtVectorType()) &&
5703 "Result should have been a vector type");
5704 auto *ResultVectorTy = ResultType->castAs<VectorType>();
5705 QualType ResultElementTy = ResultVectorTy->getElementType();
5706 unsigned ResultElementCount = ResultVectorTy->getNumElements();
5707
5708 if (ResultElementCount != CondElementCount) {
5709 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_size) << CondType
5710 << ResultType;
5711 return {};
5712 }
5713
5714 if (Context.getTypeSize(T: ResultElementTy) !=
5715 Context.getTypeSize(T: CondElementTy)) {
5716 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_element_size) << CondType
5717 << ResultType;
5718 return {};
5719 }
5720
5721 return ResultType;
5722}
5723
5724QualType Sema::CheckSizelessVectorConditionalTypes(ExprResult &Cond,
5725 ExprResult &LHS,
5726 ExprResult &RHS,
5727 SourceLocation QuestionLoc) {
5728 LHS = DefaultFunctionArrayLvalueConversion(E: LHS.get());
5729 RHS = DefaultFunctionArrayLvalueConversion(E: RHS.get());
5730
5731 QualType CondType = Cond.get()->getType();
5732 const auto *CondBT = CondType->castAs<BuiltinType>();
5733 QualType CondElementTy = CondBT->getSveEltType(Ctx: Context);
5734 llvm::ElementCount CondElementCount =
5735 Context.getBuiltinVectorTypeInfo(VecTy: CondBT).EC;
5736
5737 QualType LHSType = LHS.get()->getType();
5738 const auto *LHSBT =
5739 LHSType->isSveVLSBuiltinType() ? LHSType->getAs<BuiltinType>() : nullptr;
5740 QualType RHSType = RHS.get()->getType();
5741 const auto *RHSBT =
5742 RHSType->isSveVLSBuiltinType() ? RHSType->getAs<BuiltinType>() : nullptr;
5743
5744 QualType ResultType;
5745
5746 if (LHSBT && RHSBT) {
5747 // If both are sizeless vector types, they must be the same type.
5748 if (!Context.hasSameType(T1: LHSType, T2: RHSType)) {
5749 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_mismatched)
5750 << LHSType << RHSType;
5751 return QualType();
5752 }
5753 ResultType = LHSType;
5754 } else if (LHSBT || RHSBT) {
5755 ResultType = CheckSizelessVectorOperands(LHS, RHS, Loc: QuestionLoc,
5756 /*IsCompAssign*/ false,
5757 OperationKind: ArithConvKind::Conditional);
5758 if (ResultType.isNull())
5759 return QualType();
5760 } else {
5761 // Both are scalar so splat
5762 QualType ResultElementTy;
5763 LHSType = LHSType.getCanonicalType().getUnqualifiedType();
5764 RHSType = RHSType.getCanonicalType().getUnqualifiedType();
5765
5766 if (Context.hasSameType(T1: LHSType, T2: RHSType))
5767 ResultElementTy = LHSType;
5768 else
5769 ResultElementTy = UsualArithmeticConversions(LHS, RHS, Loc: QuestionLoc,
5770 ACK: ArithConvKind::Conditional);
5771
5772 if (ResultElementTy->isEnumeralType()) {
5773 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_operand_type)
5774 << ResultElementTy;
5775 return QualType();
5776 }
5777
5778 ResultType = Context.getScalableVectorType(
5779 EltTy: ResultElementTy, NumElts: CondElementCount.getKnownMinValue());
5780
5781 LHS = ImpCastExprToType(E: LHS.get(), Type: ResultType, CK: CK_VectorSplat);
5782 RHS = ImpCastExprToType(E: RHS.get(), Type: ResultType, CK: CK_VectorSplat);
5783 }
5784
5785 assert(!ResultType.isNull() && ResultType->isSveVLSBuiltinType() &&
5786 "Result should have been a vector type");
5787 auto *ResultBuiltinTy = ResultType->castAs<BuiltinType>();
5788 QualType ResultElementTy = ResultBuiltinTy->getSveEltType(Ctx: Context);
5789 llvm::ElementCount ResultElementCount =
5790 Context.getBuiltinVectorTypeInfo(VecTy: ResultBuiltinTy).EC;
5791
5792 if (ResultElementCount != CondElementCount) {
5793 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_size)
5794 << CondType << ResultType;
5795 return QualType();
5796 }
5797
5798 if (Context.getTypeSize(T: ResultElementTy) !=
5799 Context.getTypeSize(T: CondElementTy)) {
5800 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_vector_element_size)
5801 << CondType << ResultType;
5802 return QualType();
5803 }
5804
5805 return ResultType;
5806}
5807
5808QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
5809 ExprResult &RHS, ExprValueKind &VK,
5810 ExprObjectKind &OK,
5811 SourceLocation QuestionLoc) {
5812 // FIXME: Handle C99's complex types, block pointers and Obj-C++ interface
5813 // pointers.
5814
5815 // Assume r-value.
5816 VK = VK_PRValue;
5817 OK = OK_Ordinary;
5818 bool IsVectorConditional =
5819 isValidVectorForConditionalCondition(Ctx&: Context, CondTy: Cond.get()->getType());
5820
5821 bool IsSizelessVectorConditional =
5822 isValidSizelessVectorForConditionalCondition(Ctx&: Context,
5823 CondTy: Cond.get()->getType());
5824
5825 // C++11 [expr.cond]p1
5826 // The first expression is contextually converted to bool.
5827 if (!Cond.get()->isTypeDependent()) {
5828 ExprResult CondRes = IsVectorConditional || IsSizelessVectorConditional
5829 ? DefaultFunctionArrayLvalueConversion(E: Cond.get())
5830 : CheckCXXBooleanCondition(CondExpr: Cond.get());
5831 if (CondRes.isInvalid())
5832 return QualType();
5833 Cond = CondRes;
5834 } else {
5835 // To implement C++, the first expression typically doesn't alter the result
5836 // type of the conditional, however the GCC compatible vector extension
5837 // changes the result type to be that of the conditional. Since we cannot
5838 // know if this is a vector extension here, delay the conversion of the
5839 // LHS/RHS below until later.
5840 return Context.DependentTy;
5841 }
5842
5843
5844 // Either of the arguments dependent?
5845 if (LHS.get()->isTypeDependent() || RHS.get()->isTypeDependent())
5846 return Context.DependentTy;
5847
5848 // C++11 [expr.cond]p2
5849 // If either the second or the third operand has type (cv) void, ...
5850 QualType LTy = LHS.get()->getType();
5851 QualType RTy = RHS.get()->getType();
5852 bool LVoid = LTy->isVoidType();
5853 bool RVoid = RTy->isVoidType();
5854 if (LVoid || RVoid) {
5855 // ... one of the following shall hold:
5856 // -- The second or the third operand (but not both) is a (possibly
5857 // parenthesized) throw-expression; the result is of the type
5858 // and value category of the other.
5859 bool LThrow = isa<CXXThrowExpr>(Val: LHS.get()->IgnoreParenImpCasts());
5860 bool RThrow = isa<CXXThrowExpr>(Val: RHS.get()->IgnoreParenImpCasts());
5861
5862 // Void expressions aren't legal in the vector-conditional expressions.
5863 if (IsVectorConditional) {
5864 SourceRange DiagLoc =
5865 LVoid ? LHS.get()->getSourceRange() : RHS.get()->getSourceRange();
5866 bool IsThrow = LVoid ? LThrow : RThrow;
5867 Diag(Loc: DiagLoc.getBegin(), DiagID: diag::err_conditional_vector_has_void)
5868 << DiagLoc << IsThrow;
5869 return QualType();
5870 }
5871
5872 if (LThrow != RThrow) {
5873 Expr *NonThrow = LThrow ? RHS.get() : LHS.get();
5874 VK = NonThrow->getValueKind();
5875 // DR (no number yet): the result is a bit-field if the
5876 // non-throw-expression operand is a bit-field.
5877 OK = NonThrow->getObjectKind();
5878 return NonThrow->getType();
5879 }
5880
5881 // -- Both the second and third operands have type void; the result is of
5882 // type void and is a prvalue.
5883 if (LVoid && RVoid)
5884 return Context.getCommonSugaredType(X: LTy, Y: RTy);
5885
5886 // Neither holds, error.
5887 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_void_nonvoid)
5888 << (LVoid ? RTy : LTy) << (LVoid ? 0 : 1)
5889 << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
5890 return QualType();
5891 }
5892
5893 // Neither is void.
5894 if (IsVectorConditional)
5895 return CheckVectorConditionalTypes(Cond, LHS, RHS, QuestionLoc);
5896
5897 if (IsSizelessVectorConditional)
5898 return CheckSizelessVectorConditionalTypes(Cond, LHS, RHS, QuestionLoc);
5899
5900 // WebAssembly tables are not allowed as conditional LHS or RHS.
5901 if (LTy->isWebAssemblyTableType() || RTy->isWebAssemblyTableType()) {
5902 Diag(Loc: QuestionLoc, DiagID: diag::err_wasm_table_conditional_expression)
5903 << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
5904 return QualType();
5905 }
5906
5907 // C++11 [expr.cond]p3
5908 // Otherwise, if the second and third operand have different types, and
5909 // either has (cv) class type [...] an attempt is made to convert each of
5910 // those operands to the type of the other.
5911 if (!Context.hasSameType(T1: LTy, T2: RTy) &&
5912 (LTy->isRecordType() || RTy->isRecordType())) {
5913 // These return true if a single direction is already ambiguous.
5914 QualType L2RType, R2LType;
5915 bool HaveL2R, HaveR2L;
5916 if (TryClassUnification(Self&: *this, From: LHS.get(), To: RHS.get(), QuestionLoc, HaveConversion&: HaveL2R, ToType&: L2RType))
5917 return QualType();
5918 if (TryClassUnification(Self&: *this, From: RHS.get(), To: LHS.get(), QuestionLoc, HaveConversion&: HaveR2L, ToType&: R2LType))
5919 return QualType();
5920
5921 // If both can be converted, [...] the program is ill-formed.
5922 if (HaveL2R && HaveR2L) {
5923 Diag(Loc: QuestionLoc, DiagID: diag::err_conditional_ambiguous)
5924 << LTy << RTy << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
5925 return QualType();
5926 }
5927
5928 // If exactly one conversion is possible, that conversion is applied to
5929 // the chosen operand and the converted operands are used in place of the
5930 // original operands for the remainder of this section.
5931 if (HaveL2R) {
5932 if (ConvertForConditional(Self&: *this, E&: LHS, T: L2RType) || LHS.isInvalid())
5933 return QualType();
5934 LTy = LHS.get()->getType();
5935 } else if (HaveR2L) {
5936 if (ConvertForConditional(Self&: *this, E&: RHS, T: R2LType) || RHS.isInvalid())
5937 return QualType();
5938 RTy = RHS.get()->getType();
5939 }
5940 }
5941
5942 // C++11 [expr.cond]p3
5943 // if both are glvalues of the same value category and the same type except
5944 // for cv-qualification, an attempt is made to convert each of those
5945 // operands to the type of the other.
5946 // FIXME:
5947 // Resolving a defect in P0012R1: we extend this to cover all cases where
5948 // one of the operands is reference-compatible with the other, in order
5949 // to support conditionals between functions differing in noexcept. This
5950 // will similarly cover difference in array bounds after P0388R4.
5951 // FIXME: If LTy and RTy have a composite pointer type, should we convert to
5952 // that instead?
5953 ExprValueKind LVK = LHS.get()->getValueKind();
5954 ExprValueKind RVK = RHS.get()->getValueKind();
5955 if (!Context.hasSameType(T1: LTy, T2: RTy) && LVK == RVK && LVK != VK_PRValue) {
5956 // DerivedToBase was already handled by the class-specific case above.
5957 // FIXME: Should we allow ObjC conversions here?
5958 const ReferenceConversions AllowedConversions =
5959 ReferenceConversions::Qualification |
5960 ReferenceConversions::NestedQualification |
5961 ReferenceConversions::Function;
5962
5963 ReferenceConversions RefConv;
5964 if (CompareReferenceRelationship(Loc: QuestionLoc, T1: LTy, T2: RTy, Conv: &RefConv) ==
5965 Ref_Compatible &&
5966 !(RefConv & ~AllowedConversions) &&
5967 // [...] subject to the constraint that the reference must bind
5968 // directly [...]
5969 !RHS.get()->refersToBitField() && !RHS.get()->refersToVectorElement()) {
5970 RHS = ImpCastExprToType(E: RHS.get(), Type: LTy, CK: CK_NoOp, VK: RVK);
5971 RTy = RHS.get()->getType();
5972 } else if (CompareReferenceRelationship(Loc: QuestionLoc, T1: RTy, T2: LTy, Conv: &RefConv) ==
5973 Ref_Compatible &&
5974 !(RefConv & ~AllowedConversions) &&
5975 !LHS.get()->refersToBitField() &&
5976 !LHS.get()->refersToVectorElement()) {
5977 LHS = ImpCastExprToType(E: LHS.get(), Type: RTy, CK: CK_NoOp, VK: LVK);
5978 LTy = LHS.get()->getType();
5979 }
5980 }
5981
5982 // C++11 [expr.cond]p4
5983 // If the second and third operands are glvalues of the same value
5984 // category and have the same type, the result is of that type and
5985 // value category and it is a bit-field if the second or the third
5986 // operand is a bit-field, or if both are bit-fields.
5987 // We only extend this to bitfields, not to the crazy other kinds of
5988 // l-values.
5989 bool Same = Context.hasSameType(T1: LTy, T2: RTy);
5990 if (Same && LVK == RVK && LVK != VK_PRValue &&
5991 LHS.get()->isOrdinaryOrBitFieldObject() &&
5992 RHS.get()->isOrdinaryOrBitFieldObject()) {
5993 VK = LHS.get()->getValueKind();
5994 if (LHS.get()->getObjectKind() == OK_BitField ||
5995 RHS.get()->getObjectKind() == OK_BitField)
5996 OK = OK_BitField;
5997 return Context.getCommonSugaredType(X: LTy, Y: RTy);
5998 }
5999
6000 // C++11 [expr.cond]p5
6001 // Otherwise, the result is a prvalue. If the second and third operands
6002 // do not have the same type, and either has (cv) class type, ...
6003 if (!Same && (LTy->isRecordType() || RTy->isRecordType())) {
6004 // ... overload resolution is used to determine the conversions (if any)
6005 // to be applied to the operands. If the overload resolution fails, the
6006 // program is ill-formed.
6007 if (FindConditionalOverload(Self&: *this, LHS, RHS, QuestionLoc))
6008 return QualType();
6009 }
6010
6011 // C++11 [expr.cond]p6
6012 // Lvalue-to-rvalue, array-to-pointer, and function-to-pointer standard
6013 // conversions are performed on the second and third operands.
6014 LHS = DefaultFunctionArrayLvalueConversion(E: LHS.get());
6015 RHS = DefaultFunctionArrayLvalueConversion(E: RHS.get());
6016 if (LHS.isInvalid() || RHS.isInvalid())
6017 return QualType();
6018 LTy = LHS.get()->getType();
6019 RTy = RHS.get()->getType();
6020
6021 // After those conversions, one of the following shall hold:
6022 // -- The second and third operands have the same type; the result
6023 // is of that type. If the operands have class type, the result
6024 // is a prvalue temporary of the result type, which is
6025 // copy-initialized from either the second operand or the third
6026 // operand depending on the value of the first operand.
6027 if (Context.hasSameType(T1: LTy, T2: RTy)) {
6028 if (LTy->isRecordType()) {
6029 // The operands have class type. Make a temporary copy.
6030 ExprResult LHSCopy = PerformCopyInitialization(
6031 Entity: InitializedEntity::InitializeTemporary(Type: LTy), EqualLoc: SourceLocation(), Init: LHS);
6032 if (LHSCopy.isInvalid())
6033 return QualType();
6034
6035 ExprResult RHSCopy = PerformCopyInitialization(
6036 Entity: InitializedEntity::InitializeTemporary(Type: RTy), EqualLoc: SourceLocation(), Init: RHS);
6037 if (RHSCopy.isInvalid())
6038 return QualType();
6039
6040 LHS = LHSCopy;
6041 RHS = RHSCopy;
6042 }
6043 return Context.getCommonSugaredType(X: LTy, Y: RTy);
6044 }
6045
6046 // Extension: conditional operator involving vector types.
6047 if (LTy->isVectorType() || RTy->isVectorType())
6048 return CheckVectorOperands(LHS, RHS, Loc: QuestionLoc, /*isCompAssign*/ IsCompAssign: false,
6049 /*AllowBothBool*/ true,
6050 /*AllowBoolConversions*/ AllowBoolConversion: false,
6051 /*AllowBoolOperation*/ false,
6052 /*ReportInvalid*/ true);
6053
6054 // -- The second and third operands have arithmetic or enumeration type;
6055 // the usual arithmetic conversions are performed to bring them to a
6056 // common type, and the result is of that type.
6057 if (LTy->isArithmeticType() && RTy->isArithmeticType()) {
6058 QualType ResTy = UsualArithmeticConversions(LHS, RHS, Loc: QuestionLoc,
6059 ACK: ArithConvKind::Conditional);
6060 if (LHS.isInvalid() || RHS.isInvalid())
6061 return QualType();
6062 if (ResTy.isNull()) {
6063 Diag(Loc: QuestionLoc,
6064 DiagID: diag::err_typecheck_cond_incompatible_operands) << LTy << RTy
6065 << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
6066 return QualType();
6067 }
6068
6069 LHS = ImpCastExprToType(E: LHS.get(), Type: ResTy, CK: PrepareScalarCast(src&: LHS, destType: ResTy));
6070 RHS = ImpCastExprToType(E: RHS.get(), Type: ResTy, CK: PrepareScalarCast(src&: RHS, destType: ResTy));
6071
6072 return ResTy;
6073 }
6074
6075 // -- The second and third operands have pointer type, or one has pointer
6076 // type and the other is a null pointer constant, or both are null
6077 // pointer constants, at least one of which is non-integral; pointer
6078 // conversions and qualification conversions are performed to bring them
6079 // to their composite pointer type. The result is of the composite
6080 // pointer type.
6081 // -- The second and third operands have pointer to member type, or one has
6082 // pointer to member type and the other is a null pointer constant;
6083 // pointer to member conversions and qualification conversions are
6084 // performed to bring them to a common type, whose cv-qualification
6085 // shall match the cv-qualification of either the second or the third
6086 // operand. The result is of the common type.
6087 QualType Composite = FindCompositePointerType(Loc: QuestionLoc, E1&: LHS, E2&: RHS);
6088 if (!Composite.isNull())
6089 return Composite;
6090
6091 // Similarly, attempt to find composite type of two objective-c pointers.
6092 Composite = ObjC().FindCompositeObjCPointerType(LHS, RHS, QuestionLoc);
6093 if (LHS.isInvalid() || RHS.isInvalid())
6094 return QualType();
6095 if (!Composite.isNull())
6096 return Composite;
6097
6098 // Check if we are using a null with a non-pointer type.
6099 if (DiagnoseConditionalForNull(LHSExpr: LHS.get(), RHSExpr: RHS.get(), QuestionLoc))
6100 return QualType();
6101
6102 Diag(Loc: QuestionLoc, DiagID: diag::err_typecheck_cond_incompatible_operands)
6103 << LHS.get()->getType() << RHS.get()->getType()
6104 << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
6105 return QualType();
6106}
6107
6108QualType Sema::FindCompositePointerType(SourceLocation Loc,
6109 Expr *&E1, Expr *&E2,
6110 bool ConvertArgs) {
6111 assert(getLangOpts().CPlusPlus && "This function assumes C++");
6112
6113 // C++1z [expr]p14:
6114 // The composite pointer type of two operands p1 and p2 having types T1
6115 // and T2
6116 QualType T1 = E1->getType(), T2 = E2->getType();
6117
6118 // where at least one is a pointer or pointer to member type or
6119 // std::nullptr_t is:
6120 bool T1IsPointerLike = T1->isAnyPointerType() || T1->isMemberPointerType() ||
6121 T1->isNullPtrType();
6122 bool T2IsPointerLike = T2->isAnyPointerType() || T2->isMemberPointerType() ||
6123 T2->isNullPtrType();
6124 if (!T1IsPointerLike && !T2IsPointerLike)
6125 return QualType();
6126
6127 // - if both p1 and p2 are null pointer constants, std::nullptr_t;
6128 // This can't actually happen, following the standard, but we also use this
6129 // to implement the end of [expr.conv], which hits this case.
6130 //
6131 // - if either p1 or p2 is a null pointer constant, T2 or T1, respectively;
6132 if (T1IsPointerLike &&
6133 E2->isNullPointerConstant(Ctx&: Context, NPC: Expr::NPC_ValueDependentIsNull)) {
6134 if (ConvertArgs)
6135 E2 = ImpCastExprToType(E: E2, Type: T1, CK: T1->isMemberPointerType()
6136 ? CK_NullToMemberPointer
6137 : CK_NullToPointer).get();
6138 return T1;
6139 }
6140 if (T2IsPointerLike &&
6141 E1->isNullPointerConstant(Ctx&: Context, NPC: Expr::NPC_ValueDependentIsNull)) {
6142 if (ConvertArgs)
6143 E1 = ImpCastExprToType(E: E1, Type: T2, CK: T2->isMemberPointerType()
6144 ? CK_NullToMemberPointer
6145 : CK_NullToPointer).get();
6146 return T2;
6147 }
6148
6149 // Now both have to be pointers or member pointers.
6150 if (!T1IsPointerLike || !T2IsPointerLike)
6151 return QualType();
6152 assert(!T1->isNullPtrType() && !T2->isNullPtrType() &&
6153 "nullptr_t should be a null pointer constant");
6154
6155 struct Step {
6156 enum Kind { Pointer, ObjCPointer, MemberPointer, Array } K;
6157 // Qualifiers to apply under the step kind.
6158 Qualifiers Quals;
6159 /// The class for a pointer-to-member; a constant array type with a bound
6160 /// (if any) for an array.
6161 /// FIXME: Store Qualifier for pointer-to-member.
6162 const Type *ClassOrBound;
6163
6164 Step(Kind K, const Type *ClassOrBound = nullptr)
6165 : K(K), ClassOrBound(ClassOrBound) {}
6166 QualType rebuild(ASTContext &Ctx, QualType T) const {
6167 T = Ctx.getQualifiedType(T, Qs: Quals);
6168 switch (K) {
6169 case Pointer:
6170 return Ctx.getPointerType(T);
6171 case MemberPointer:
6172 return Ctx.getMemberPointerType(T, /*Qualifier=*/nullptr,
6173 Cls: ClassOrBound->getAsCXXRecordDecl());
6174 case ObjCPointer:
6175 return Ctx.getObjCObjectPointerType(OIT: T);
6176 case Array:
6177 if (auto *CAT = cast_or_null<ConstantArrayType>(Val: ClassOrBound))
6178 return Ctx.getConstantArrayType(EltTy: T, ArySize: CAT->getSize(), SizeExpr: nullptr,
6179 ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
6180 else
6181 return Ctx.getIncompleteArrayType(EltTy: T, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
6182 }
6183 llvm_unreachable("unknown step kind");
6184 }
6185 };
6186
6187 SmallVector<Step, 8> Steps;
6188
6189 // - if T1 is "pointer to cv1 C1" and T2 is "pointer to cv2 C2", where C1
6190 // is reference-related to C2 or C2 is reference-related to C1 (8.6.3),
6191 // the cv-combined type of T1 and T2 or the cv-combined type of T2 and T1,
6192 // respectively;
6193 // - if T1 is "pointer to member of C1 of type cv1 U1" and T2 is "pointer
6194 // to member of C2 of type cv2 U2" for some non-function type U, where
6195 // C1 is reference-related to C2 or C2 is reference-related to C1, the
6196 // cv-combined type of T2 and T1 or the cv-combined type of T1 and T2,
6197 // respectively;
6198 // - if T1 and T2 are similar types (4.5), the cv-combined type of T1 and
6199 // T2;
6200 //
6201 // Dismantle T1 and T2 to simultaneously determine whether they are similar
6202 // and to prepare to form the cv-combined type if so.
6203 QualType Composite1 = T1;
6204 QualType Composite2 = T2;
6205 unsigned NeedConstBefore = 0;
6206 while (true) {
6207 assert(!Composite1.isNull() && !Composite2.isNull());
6208
6209 Qualifiers Q1, Q2;
6210 Composite1 = Context.getUnqualifiedArrayType(T: Composite1, Quals&: Q1);
6211 Composite2 = Context.getUnqualifiedArrayType(T: Composite2, Quals&: Q2);
6212
6213 // Top-level qualifiers are ignored. Merge at all lower levels.
6214 if (!Steps.empty()) {
6215 // Find the qualifier union: (approximately) the unique minimal set of
6216 // qualifiers that is compatible with both types.
6217 Qualifiers Quals = Qualifiers::fromCVRUMask(CVRU: Q1.getCVRUQualifiers() |
6218 Q2.getCVRUQualifiers());
6219
6220 // Under one level of pointer or pointer-to-member, we can change to an
6221 // unambiguous compatible address space.
6222 if (Q1.getAddressSpace() == Q2.getAddressSpace()) {
6223 Quals.setAddressSpace(Q1.getAddressSpace());
6224 } else if (Steps.size() == 1) {
6225 bool MaybeQ1 = Q1.isAddressSpaceSupersetOf(other: Q2, Ctx: getASTContext());
6226 bool MaybeQ2 = Q2.isAddressSpaceSupersetOf(other: Q1, Ctx: getASTContext());
6227 if (MaybeQ1 == MaybeQ2) {
6228 // Exception for ptr size address spaces. Should be able to choose
6229 // either address space during comparison.
6230 if (isPtrSizeAddressSpace(AS: Q1.getAddressSpace()) ||
6231 isPtrSizeAddressSpace(AS: Q2.getAddressSpace()))
6232 MaybeQ1 = true;
6233 else
6234 return QualType(); // No unique best address space.
6235 }
6236 Quals.setAddressSpace(MaybeQ1 ? Q1.getAddressSpace()
6237 : Q2.getAddressSpace());
6238 } else {
6239 return QualType();
6240 }
6241
6242 // FIXME: In C, we merge __strong and none to __strong at the top level.
6243 if (Q1.getObjCGCAttr() == Q2.getObjCGCAttr())
6244 Quals.setObjCGCAttr(Q1.getObjCGCAttr());
6245 else if (T1->isVoidPointerType() || T2->isVoidPointerType())
6246 assert(Steps.size() == 1);
6247 else
6248 return QualType();
6249
6250 // Mismatched lifetime qualifiers never compatibly include each other.
6251 if (Q1.getObjCLifetime() == Q2.getObjCLifetime())
6252 Quals.setObjCLifetime(Q1.getObjCLifetime());
6253 else if (T1->isVoidPointerType() || T2->isVoidPointerType())
6254 assert(Steps.size() == 1);
6255 else
6256 return QualType();
6257
6258 if (Q1.getPointerAuth().isEquivalent(Other: Q2.getPointerAuth()))
6259 Quals.setPointerAuth(Q1.getPointerAuth());
6260 else
6261 return QualType();
6262
6263 Steps.back().Quals = Quals;
6264 if (Q1 != Quals || Q2 != Quals)
6265 NeedConstBefore = Steps.size() - 1;
6266 }
6267
6268 // FIXME: Can we unify the following with UnwrapSimilarTypes?
6269
6270 const ArrayType *Arr1, *Arr2;
6271 if ((Arr1 = Context.getAsArrayType(T: Composite1)) &&
6272 (Arr2 = Context.getAsArrayType(T: Composite2))) {
6273 auto *CAT1 = dyn_cast<ConstantArrayType>(Val: Arr1);
6274 auto *CAT2 = dyn_cast<ConstantArrayType>(Val: Arr2);
6275 if (CAT1 && CAT2 && CAT1->getSize() == CAT2->getSize()) {
6276 Composite1 = Arr1->getElementType();
6277 Composite2 = Arr2->getElementType();
6278 Steps.emplace_back(Args: Step::Array, Args&: CAT1);
6279 continue;
6280 }
6281 bool IAT1 = isa<IncompleteArrayType>(Val: Arr1);
6282 bool IAT2 = isa<IncompleteArrayType>(Val: Arr2);
6283 if ((IAT1 && IAT2) ||
6284 (getLangOpts().CPlusPlus20 && (IAT1 != IAT2) &&
6285 ((bool)CAT1 != (bool)CAT2) &&
6286 (Steps.empty() || Steps.back().K != Step::Array))) {
6287 // In C++20 onwards, we can unify an array of N T with an array of
6288 // a different or unknown bound. But we can't form an array whose
6289 // element type is an array of unknown bound by doing so.
6290 Composite1 = Arr1->getElementType();
6291 Composite2 = Arr2->getElementType();
6292 Steps.emplace_back(Args: Step::Array);
6293 if (CAT1 || CAT2)
6294 NeedConstBefore = Steps.size();
6295 continue;
6296 }
6297 }
6298
6299 const PointerType *Ptr1, *Ptr2;
6300 if ((Ptr1 = Composite1->getAs<PointerType>()) &&
6301 (Ptr2 = Composite2->getAs<PointerType>())) {
6302 Composite1 = Ptr1->getPointeeType();
6303 Composite2 = Ptr2->getPointeeType();
6304 Steps.emplace_back(Args: Step::Pointer);
6305 continue;
6306 }
6307
6308 const ObjCObjectPointerType *ObjPtr1, *ObjPtr2;
6309 if ((ObjPtr1 = Composite1->getAs<ObjCObjectPointerType>()) &&
6310 (ObjPtr2 = Composite2->getAs<ObjCObjectPointerType>())) {
6311 Composite1 = ObjPtr1->getPointeeType();
6312 Composite2 = ObjPtr2->getPointeeType();
6313 Steps.emplace_back(Args: Step::ObjCPointer);
6314 continue;
6315 }
6316
6317 const MemberPointerType *MemPtr1, *MemPtr2;
6318 if ((MemPtr1 = Composite1->getAs<MemberPointerType>()) &&
6319 (MemPtr2 = Composite2->getAs<MemberPointerType>())) {
6320 Composite1 = MemPtr1->getPointeeType();
6321 Composite2 = MemPtr2->getPointeeType();
6322
6323 // At the top level, we can perform a base-to-derived pointer-to-member
6324 // conversion:
6325 //
6326 // - [...] where C1 is reference-related to C2 or C2 is
6327 // reference-related to C1
6328 //
6329 // (Note that the only kinds of reference-relatedness in scope here are
6330 // "same type or derived from".) At any other level, the class must
6331 // exactly match.
6332 CXXRecordDecl *Cls = nullptr,
6333 *Cls1 = MemPtr1->getMostRecentCXXRecordDecl(),
6334 *Cls2 = MemPtr2->getMostRecentCXXRecordDecl();
6335 if (declaresSameEntity(D1: Cls1, D2: Cls2))
6336 Cls = Cls1;
6337 else if (Steps.empty())
6338 Cls = IsDerivedFrom(Loc, Derived: Cls1, Base: Cls2) ? Cls1
6339 : IsDerivedFrom(Loc, Derived: Cls2, Base: Cls1) ? Cls2
6340 : nullptr;
6341 if (!Cls)
6342 return QualType();
6343
6344 Steps.emplace_back(Args: Step::MemberPointer,
6345 Args: Context.getTypeDeclType(Decl: Cls).getTypePtr());
6346 continue;
6347 }
6348
6349 // Special case: at the top level, we can decompose an Objective-C pointer
6350 // and a 'cv void *'. Unify the qualifiers.
6351 if (Steps.empty() && ((Composite1->isVoidPointerType() &&
6352 Composite2->isObjCObjectPointerType()) ||
6353 (Composite1->isObjCObjectPointerType() &&
6354 Composite2->isVoidPointerType()))) {
6355 Composite1 = Composite1->getPointeeType();
6356 Composite2 = Composite2->getPointeeType();
6357 Steps.emplace_back(Args: Step::Pointer);
6358 continue;
6359 }
6360
6361 // FIXME: block pointer types?
6362
6363 // Cannot unwrap any more types.
6364 break;
6365 }
6366
6367 // - if T1 or T2 is "pointer to noexcept function" and the other type is
6368 // "pointer to function", where the function types are otherwise the same,
6369 // "pointer to function";
6370 // - if T1 or T2 is "pointer to member of C1 of type function", the other
6371 // type is "pointer to member of C2 of type noexcept function", and C1
6372 // is reference-related to C2 or C2 is reference-related to C1, where
6373 // the function types are otherwise the same, "pointer to member of C2 of
6374 // type function" or "pointer to member of C1 of type function",
6375 // respectively;
6376 //
6377 // We also support 'noreturn' here, so as a Clang extension we generalize the
6378 // above to:
6379 //
6380 // - [Clang] If T1 and T2 are both of type "pointer to function" or
6381 // "pointer to member function" and the pointee types can be unified
6382 // by a function pointer conversion, that conversion is applied
6383 // before checking the following rules.
6384 //
6385 // We've already unwrapped down to the function types, and we want to merge
6386 // rather than just convert, so do this ourselves rather than calling
6387 // IsFunctionConversion.
6388 //
6389 // FIXME: In order to match the standard wording as closely as possible, we
6390 // currently only do this under a single level of pointers. Ideally, we would
6391 // allow this in general, and set NeedConstBefore to the relevant depth on
6392 // the side(s) where we changed anything. If we permit that, we should also
6393 // consider this conversion when determining type similarity and model it as
6394 // a qualification conversion.
6395 if (Steps.size() == 1) {
6396 if (auto *FPT1 = Composite1->getAs<FunctionProtoType>()) {
6397 if (auto *FPT2 = Composite2->getAs<FunctionProtoType>()) {
6398 FunctionProtoType::ExtProtoInfo EPI1 = FPT1->getExtProtoInfo();
6399 FunctionProtoType::ExtProtoInfo EPI2 = FPT2->getExtProtoInfo();
6400
6401 // The result is noreturn if both operands are.
6402 bool Noreturn =
6403 EPI1.ExtInfo.getNoReturn() && EPI2.ExtInfo.getNoReturn();
6404 EPI1.ExtInfo = EPI1.ExtInfo.withNoReturn(noReturn: Noreturn);
6405 EPI2.ExtInfo = EPI2.ExtInfo.withNoReturn(noReturn: Noreturn);
6406
6407 bool CFIUncheckedCallee =
6408 EPI1.CFIUncheckedCallee || EPI2.CFIUncheckedCallee;
6409 EPI1.CFIUncheckedCallee = CFIUncheckedCallee;
6410 EPI2.CFIUncheckedCallee = CFIUncheckedCallee;
6411
6412 // The result is nothrow if both operands are.
6413 SmallVector<QualType, 8> ExceptionTypeStorage;
6414 EPI1.ExceptionSpec = EPI2.ExceptionSpec = Context.mergeExceptionSpecs(
6415 ESI1: EPI1.ExceptionSpec, ESI2: EPI2.ExceptionSpec, ExceptionTypeStorage,
6416 AcceptDependent: getLangOpts().CPlusPlus17);
6417
6418 Composite1 = Context.getFunctionType(ResultTy: FPT1->getReturnType(),
6419 Args: FPT1->getParamTypes(), EPI: EPI1);
6420 Composite2 = Context.getFunctionType(ResultTy: FPT2->getReturnType(),
6421 Args: FPT2->getParamTypes(), EPI: EPI2);
6422 }
6423 }
6424 }
6425
6426 // There are some more conversions we can perform under exactly one pointer.
6427 if (Steps.size() == 1 && Steps.front().K == Step::Pointer &&
6428 !Context.hasSameType(T1: Composite1, T2: Composite2)) {
6429 // - if T1 or T2 is "pointer to cv1 void" and the other type is
6430 // "pointer to cv2 T", where T is an object type or void,
6431 // "pointer to cv12 void", where cv12 is the union of cv1 and cv2;
6432 if (Composite1->isVoidType() && Composite2->isObjectType())
6433 Composite2 = Composite1;
6434 else if (Composite2->isVoidType() && Composite1->isObjectType())
6435 Composite1 = Composite2;
6436 // - if T1 is "pointer to cv1 C1" and T2 is "pointer to cv2 C2", where C1
6437 // is reference-related to C2 or C2 is reference-related to C1 (8.6.3),
6438 // the cv-combined type of T1 and T2 or the cv-combined type of T2 and
6439 // T1, respectively;
6440 //
6441 // The "similar type" handling covers all of this except for the "T1 is a
6442 // base class of T2" case in the definition of reference-related.
6443 else if (IsDerivedFrom(Loc, Derived: Composite1, Base: Composite2))
6444 Composite1 = Composite2;
6445 else if (IsDerivedFrom(Loc, Derived: Composite2, Base: Composite1))
6446 Composite2 = Composite1;
6447 }
6448
6449 // At this point, either the inner types are the same or we have failed to
6450 // find a composite pointer type.
6451 if (!Context.hasSameType(T1: Composite1, T2: Composite2))
6452 return QualType();
6453
6454 // Per C++ [conv.qual]p3, add 'const' to every level before the last
6455 // differing qualifier.
6456 for (unsigned I = 0; I != NeedConstBefore; ++I)
6457 Steps[I].Quals.addConst();
6458
6459 // Rebuild the composite type.
6460 QualType Composite = Context.getCommonSugaredType(X: Composite1, Y: Composite2);
6461 for (auto &S : llvm::reverse(C&: Steps))
6462 Composite = S.rebuild(Ctx&: Context, T: Composite);
6463
6464 if (ConvertArgs) {
6465 // Convert the expressions to the composite pointer type.
6466 InitializedEntity Entity =
6467 InitializedEntity::InitializeTemporary(Type: Composite);
6468 InitializationKind Kind =
6469 InitializationKind::CreateCopy(InitLoc: Loc, EqualLoc: SourceLocation());
6470
6471 InitializationSequence E1ToC(*this, Entity, Kind, E1);
6472 if (!E1ToC)
6473 return QualType();
6474
6475 InitializationSequence E2ToC(*this, Entity, Kind, E2);
6476 if (!E2ToC)
6477 return QualType();
6478
6479 // FIXME: Let the caller know if these fail to avoid duplicate diagnostics.
6480 ExprResult E1Result = E1ToC.Perform(S&: *this, Entity, Kind, Args: E1);
6481 if (E1Result.isInvalid())
6482 return QualType();
6483 E1 = E1Result.get();
6484
6485 ExprResult E2Result = E2ToC.Perform(S&: *this, Entity, Kind, Args: E2);
6486 if (E2Result.isInvalid())
6487 return QualType();
6488 E2 = E2Result.get();
6489 }
6490
6491 return Composite;
6492}
6493
6494ExprResult Sema::MaybeBindToTemporary(Expr *E) {
6495 if (!E)
6496 return ExprError();
6497
6498 assert(!isa<CXXBindTemporaryExpr>(E) && "Double-bound temporary?");
6499
6500 // If the result is a glvalue, we shouldn't bind it.
6501 if (E->isGLValue())
6502 return E;
6503
6504 // In ARC, calls that return a retainable type can return retained,
6505 // in which case we have to insert a consuming cast.
6506 if (getLangOpts().ObjCAutoRefCount &&
6507 E->getType()->isObjCRetainableType()) {
6508
6509 bool ReturnsRetained;
6510
6511 // For actual calls, we compute this by examining the type of the
6512 // called value.
6513 if (CallExpr *Call = dyn_cast<CallExpr>(Val: E)) {
6514 Expr *Callee = Call->getCallee()->IgnoreParens();
6515 QualType T = Callee->getType();
6516
6517 if (T == Context.BoundMemberTy) {
6518 // Handle pointer-to-members.
6519 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Val: Callee))
6520 T = BinOp->getRHS()->getType();
6521 else if (MemberExpr *Mem = dyn_cast<MemberExpr>(Val: Callee))
6522 T = Mem->getMemberDecl()->getType();
6523 }
6524
6525 if (const PointerType *Ptr = T->getAs<PointerType>())
6526 T = Ptr->getPointeeType();
6527 else if (const BlockPointerType *Ptr = T->getAs<BlockPointerType>())
6528 T = Ptr->getPointeeType();
6529 else if (const MemberPointerType *MemPtr = T->getAs<MemberPointerType>())
6530 T = MemPtr->getPointeeType();
6531
6532 auto *FTy = T->castAs<FunctionType>();
6533 ReturnsRetained = FTy->getExtInfo().getProducesResult();
6534
6535 // ActOnStmtExpr arranges things so that StmtExprs of retainable
6536 // type always produce a +1 object.
6537 } else if (isa<StmtExpr>(Val: E)) {
6538 ReturnsRetained = true;
6539
6540 // We hit this case with the lambda conversion-to-block optimization;
6541 // we don't want any extra casts here.
6542 } else if (isa<CastExpr>(Val: E) &&
6543 isa<BlockExpr>(Val: cast<CastExpr>(Val: E)->getSubExpr())) {
6544 return E;
6545
6546 // For message sends and property references, we try to find an
6547 // actual method. FIXME: we should infer retention by selector in
6548 // cases where we don't have an actual method.
6549 } else {
6550 ObjCMethodDecl *D = nullptr;
6551 if (ObjCMessageExpr *Send = dyn_cast<ObjCMessageExpr>(Val: E)) {
6552 D = Send->getMethodDecl();
6553 } else if (ObjCBoxedExpr *BoxedExpr = dyn_cast<ObjCBoxedExpr>(Val: E)) {
6554 D = BoxedExpr->getBoxingMethod();
6555 } else if (ObjCArrayLiteral *ArrayLit = dyn_cast<ObjCArrayLiteral>(Val: E)) {
6556 // Don't do reclaims if we're using the zero-element array
6557 // constant.
6558 if (ArrayLit->getNumElements() == 0 &&
6559 Context.getLangOpts().ObjCRuntime.hasEmptyCollections())
6560 return E;
6561
6562 D = ArrayLit->getArrayWithObjectsMethod();
6563 } else if (ObjCDictionaryLiteral *DictLit
6564 = dyn_cast<ObjCDictionaryLiteral>(Val: E)) {
6565 // Don't do reclaims if we're using the zero-element dictionary
6566 // constant.
6567 if (DictLit->getNumElements() == 0 &&
6568 Context.getLangOpts().ObjCRuntime.hasEmptyCollections())
6569 return E;
6570
6571 D = DictLit->getDictWithObjectsMethod();
6572 }
6573
6574 ReturnsRetained = (D && D->hasAttr<NSReturnsRetainedAttr>());
6575
6576 // Don't do reclaims on performSelector calls; despite their
6577 // return type, the invoked method doesn't necessarily actually
6578 // return an object.
6579 if (!ReturnsRetained &&
6580 D && D->getMethodFamily() == OMF_performSelector)
6581 return E;
6582 }
6583
6584 // Don't reclaim an object of Class type.
6585 if (!ReturnsRetained && E->getType()->isObjCARCImplicitlyUnretainedType())
6586 return E;
6587
6588 Cleanup.setExprNeedsCleanups(true);
6589
6590 CastKind ck = (ReturnsRetained ? CK_ARCConsumeObject
6591 : CK_ARCReclaimReturnedObject);
6592 return ImplicitCastExpr::Create(Context, T: E->getType(), Kind: ck, Operand: E, BasePath: nullptr,
6593 Cat: VK_PRValue, FPO: FPOptionsOverride());
6594 }
6595
6596 if (E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct)
6597 Cleanup.setExprNeedsCleanups(true);
6598
6599 if (!getLangOpts().CPlusPlus)
6600 return E;
6601
6602 // Search for the base element type (cf. ASTContext::getBaseElementType) with
6603 // a fast path for the common case that the type is directly a RecordType.
6604 const Type *T = Context.getCanonicalType(T: E->getType().getTypePtr());
6605 const RecordType *RT = nullptr;
6606 while (!RT) {
6607 switch (T->getTypeClass()) {
6608 case Type::Record:
6609 RT = cast<RecordType>(Val: T);
6610 break;
6611 case Type::ConstantArray:
6612 case Type::IncompleteArray:
6613 case Type::VariableArray:
6614 case Type::DependentSizedArray:
6615 T = cast<ArrayType>(Val: T)->getElementType().getTypePtr();
6616 break;
6617 default:
6618 return E;
6619 }
6620 }
6621
6622 // That should be enough to guarantee that this type is complete, if we're
6623 // not processing a decltype expression.
6624 CXXRecordDecl *RD = cast<CXXRecordDecl>(Val: RT->getDecl());
6625 if (RD->isInvalidDecl() || RD->isDependentContext())
6626 return E;
6627
6628 bool IsDecltype = ExprEvalContexts.back().ExprContext ==
6629 ExpressionEvaluationContextRecord::EK_Decltype;
6630 CXXDestructorDecl *Destructor = IsDecltype ? nullptr : LookupDestructor(Class: RD);
6631
6632 if (Destructor) {
6633 MarkFunctionReferenced(Loc: E->getExprLoc(), Func: Destructor);
6634 CheckDestructorAccess(Loc: E->getExprLoc(), Dtor: Destructor,
6635 PDiag: PDiag(DiagID: diag::err_access_dtor_temp)
6636 << E->getType());
6637 if (DiagnoseUseOfDecl(D: Destructor, Locs: E->getExprLoc()))
6638 return ExprError();
6639
6640 // If destructor is trivial, we can avoid the extra copy.
6641 if (Destructor->isTrivial())
6642 return E;
6643
6644 // We need a cleanup, but we don't need to remember the temporary.
6645 Cleanup.setExprNeedsCleanups(true);
6646 }
6647
6648 CXXTemporary *Temp = CXXTemporary::Create(C: Context, Destructor);
6649 CXXBindTemporaryExpr *Bind = CXXBindTemporaryExpr::Create(C: Context, Temp, SubExpr: E);
6650
6651 if (IsDecltype)
6652 ExprEvalContexts.back().DelayedDecltypeBinds.push_back(Elt: Bind);
6653
6654 return Bind;
6655}
6656
6657ExprResult
6658Sema::MaybeCreateExprWithCleanups(ExprResult SubExpr) {
6659 if (SubExpr.isInvalid())
6660 return ExprError();
6661
6662 return MaybeCreateExprWithCleanups(SubExpr: SubExpr.get());
6663}
6664
6665Expr *Sema::MaybeCreateExprWithCleanups(Expr *SubExpr) {
6666 assert(SubExpr && "subexpression can't be null!");
6667
6668 CleanupVarDeclMarking();
6669
6670 unsigned FirstCleanup = ExprEvalContexts.back().NumCleanupObjects;
6671 assert(ExprCleanupObjects.size() >= FirstCleanup);
6672 assert(Cleanup.exprNeedsCleanups() ||
6673 ExprCleanupObjects.size() == FirstCleanup);
6674 if (!Cleanup.exprNeedsCleanups())
6675 return SubExpr;
6676
6677 auto Cleanups = llvm::ArrayRef(ExprCleanupObjects.begin() + FirstCleanup,
6678 ExprCleanupObjects.size() - FirstCleanup);
6679
6680 auto *E = ExprWithCleanups::Create(
6681 C: Context, subexpr: SubExpr, CleanupsHaveSideEffects: Cleanup.cleanupsHaveSideEffects(), objects: Cleanups);
6682 DiscardCleanupsInEvaluationContext();
6683
6684 return E;
6685}
6686
6687Stmt *Sema::MaybeCreateStmtWithCleanups(Stmt *SubStmt) {
6688 assert(SubStmt && "sub-statement can't be null!");
6689
6690 CleanupVarDeclMarking();
6691
6692 if (!Cleanup.exprNeedsCleanups())
6693 return SubStmt;
6694
6695 // FIXME: In order to attach the temporaries, wrap the statement into
6696 // a StmtExpr; currently this is only used for asm statements.
6697 // This is hacky, either create a new CXXStmtWithTemporaries statement or
6698 // a new AsmStmtWithTemporaries.
6699 CompoundStmt *CompStmt =
6700 CompoundStmt::Create(C: Context, Stmts: SubStmt, FPFeatures: FPOptionsOverride(),
6701 LB: SourceLocation(), RB: SourceLocation());
6702 Expr *E = new (Context)
6703 StmtExpr(CompStmt, Context.VoidTy, SourceLocation(), SourceLocation(),
6704 /*FIXME TemplateDepth=*/0);
6705 return MaybeCreateExprWithCleanups(SubExpr: E);
6706}
6707
6708ExprResult Sema::ActOnDecltypeExpression(Expr *E) {
6709 assert(ExprEvalContexts.back().ExprContext ==
6710 ExpressionEvaluationContextRecord::EK_Decltype &&
6711 "not in a decltype expression");
6712
6713 ExprResult Result = CheckPlaceholderExpr(E);
6714 if (Result.isInvalid())
6715 return ExprError();
6716 E = Result.get();
6717
6718 // C++11 [expr.call]p11:
6719 // If a function call is a prvalue of object type,
6720 // -- if the function call is either
6721 // -- the operand of a decltype-specifier, or
6722 // -- the right operand of a comma operator that is the operand of a
6723 // decltype-specifier,
6724 // a temporary object is not introduced for the prvalue.
6725
6726 // Recursively rebuild ParenExprs and comma expressions to strip out the
6727 // outermost CXXBindTemporaryExpr, if any.
6728 if (ParenExpr *PE = dyn_cast<ParenExpr>(Val: E)) {
6729 ExprResult SubExpr = ActOnDecltypeExpression(E: PE->getSubExpr());
6730 if (SubExpr.isInvalid())
6731 return ExprError();
6732 if (SubExpr.get() == PE->getSubExpr())
6733 return E;
6734 return ActOnParenExpr(L: PE->getLParen(), R: PE->getRParen(), E: SubExpr.get());
6735 }
6736 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Val: E)) {
6737 if (BO->getOpcode() == BO_Comma) {
6738 ExprResult RHS = ActOnDecltypeExpression(E: BO->getRHS());
6739 if (RHS.isInvalid())
6740 return ExprError();
6741 if (RHS.get() == BO->getRHS())
6742 return E;
6743 return BinaryOperator::Create(C: Context, lhs: BO->getLHS(), rhs: RHS.get(), opc: BO_Comma,
6744 ResTy: BO->getType(), VK: BO->getValueKind(),
6745 OK: BO->getObjectKind(), opLoc: BO->getOperatorLoc(),
6746 FPFeatures: BO->getFPFeatures());
6747 }
6748 }
6749
6750 CXXBindTemporaryExpr *TopBind = dyn_cast<CXXBindTemporaryExpr>(Val: E);
6751 CallExpr *TopCall = TopBind ? dyn_cast<CallExpr>(Val: TopBind->getSubExpr())
6752 : nullptr;
6753 if (TopCall)
6754 E = TopCall;
6755 else
6756 TopBind = nullptr;
6757
6758 // Disable the special decltype handling now.
6759 ExprEvalContexts.back().ExprContext =
6760 ExpressionEvaluationContextRecord::EK_Other;
6761
6762 Result = CheckUnevaluatedOperand(E);
6763 if (Result.isInvalid())
6764 return ExprError();
6765 E = Result.get();
6766
6767 // In MS mode, don't perform any extra checking of call return types within a
6768 // decltype expression.
6769 if (getLangOpts().MSVCCompat)
6770 return E;
6771
6772 // Perform the semantic checks we delayed until this point.
6773 for (unsigned I = 0, N = ExprEvalContexts.back().DelayedDecltypeCalls.size();
6774 I != N; ++I) {
6775 CallExpr *Call = ExprEvalContexts.back().DelayedDecltypeCalls[I];
6776 if (Call == TopCall)
6777 continue;
6778
6779 if (CheckCallReturnType(ReturnType: Call->getCallReturnType(Ctx: Context),
6780 Loc: Call->getBeginLoc(), CE: Call, FD: Call->getDirectCallee()))
6781 return ExprError();
6782 }
6783
6784 // Now all relevant types are complete, check the destructors are accessible
6785 // and non-deleted, and annotate them on the temporaries.
6786 for (unsigned I = 0, N = ExprEvalContexts.back().DelayedDecltypeBinds.size();
6787 I != N; ++I) {
6788 CXXBindTemporaryExpr *Bind =
6789 ExprEvalContexts.back().DelayedDecltypeBinds[I];
6790 if (Bind == TopBind)
6791 continue;
6792
6793 CXXTemporary *Temp = Bind->getTemporary();
6794
6795 CXXRecordDecl *RD =
6796 Bind->getType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
6797 CXXDestructorDecl *Destructor = LookupDestructor(Class: RD);
6798 Temp->setDestructor(Destructor);
6799
6800 MarkFunctionReferenced(Loc: Bind->getExprLoc(), Func: Destructor);
6801 CheckDestructorAccess(Loc: Bind->getExprLoc(), Dtor: Destructor,
6802 PDiag: PDiag(DiagID: diag::err_access_dtor_temp)
6803 << Bind->getType());
6804 if (DiagnoseUseOfDecl(D: Destructor, Locs: Bind->getExprLoc()))
6805 return ExprError();
6806
6807 // We need a cleanup, but we don't need to remember the temporary.
6808 Cleanup.setExprNeedsCleanups(true);
6809 }
6810
6811 // Possibly strip off the top CXXBindTemporaryExpr.
6812 return E;
6813}
6814
6815/// Note a set of 'operator->' functions that were used for a member access.
6816static void noteOperatorArrows(Sema &S,
6817 ArrayRef<FunctionDecl *> OperatorArrows) {
6818 unsigned SkipStart = OperatorArrows.size(), SkipCount = 0;
6819 // FIXME: Make this configurable?
6820 unsigned Limit = 9;
6821 if (OperatorArrows.size() > Limit) {
6822 // Produce Limit-1 normal notes and one 'skipping' note.
6823 SkipStart = (Limit - 1) / 2 + (Limit - 1) % 2;
6824 SkipCount = OperatorArrows.size() - (Limit - 1);
6825 }
6826
6827 for (unsigned I = 0; I < OperatorArrows.size(); /**/) {
6828 if (I == SkipStart) {
6829 S.Diag(Loc: OperatorArrows[I]->getLocation(),
6830 DiagID: diag::note_operator_arrows_suppressed)
6831 << SkipCount;
6832 I += SkipCount;
6833 } else {
6834 S.Diag(Loc: OperatorArrows[I]->getLocation(), DiagID: diag::note_operator_arrow_here)
6835 << OperatorArrows[I]->getCallResultType();
6836 ++I;
6837 }
6838 }
6839}
6840
6841ExprResult Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base,
6842 SourceLocation OpLoc,
6843 tok::TokenKind OpKind,
6844 ParsedType &ObjectType,
6845 bool &MayBePseudoDestructor) {
6846 // Since this might be a postfix expression, get rid of ParenListExprs.
6847 ExprResult Result = MaybeConvertParenListExprToParenExpr(S, ME: Base);
6848 if (Result.isInvalid()) return ExprError();
6849 Base = Result.get();
6850
6851 Result = CheckPlaceholderExpr(E: Base);
6852 if (Result.isInvalid()) return ExprError();
6853 Base = Result.get();
6854
6855 QualType BaseType = Base->getType();
6856 MayBePseudoDestructor = false;
6857 if (BaseType->isDependentType()) {
6858 // If we have a pointer to a dependent type and are using the -> operator,
6859 // the object type is the type that the pointer points to. We might still
6860 // have enough information about that type to do something useful.
6861 if (OpKind == tok::arrow)
6862 if (const PointerType *Ptr = BaseType->getAs<PointerType>())
6863 BaseType = Ptr->getPointeeType();
6864
6865 ObjectType = ParsedType::make(P: BaseType);
6866 MayBePseudoDestructor = true;
6867 return Base;
6868 }
6869
6870 // C++ [over.match.oper]p8:
6871 // [...] When operator->returns, the operator-> is applied to the value
6872 // returned, with the original second operand.
6873 if (OpKind == tok::arrow) {
6874 QualType StartingType = BaseType;
6875 bool NoArrowOperatorFound = false;
6876 bool FirstIteration = true;
6877 FunctionDecl *CurFD = dyn_cast<FunctionDecl>(Val: CurContext);
6878 // The set of types we've considered so far.
6879 llvm::SmallPtrSet<CanQualType,8> CTypes;
6880 SmallVector<FunctionDecl*, 8> OperatorArrows;
6881 CTypes.insert(Ptr: Context.getCanonicalType(T: BaseType));
6882
6883 while (BaseType->isRecordType()) {
6884 if (OperatorArrows.size() >= getLangOpts().ArrowDepth) {
6885 Diag(Loc: OpLoc, DiagID: diag::err_operator_arrow_depth_exceeded)
6886 << StartingType << getLangOpts().ArrowDepth << Base->getSourceRange();
6887 noteOperatorArrows(S&: *this, OperatorArrows);
6888 Diag(Loc: OpLoc, DiagID: diag::note_operator_arrow_depth)
6889 << getLangOpts().ArrowDepth;
6890 return ExprError();
6891 }
6892
6893 Result = BuildOverloadedArrowExpr(
6894 S, Base, OpLoc,
6895 // When in a template specialization and on the first loop iteration,
6896 // potentially give the default diagnostic (with the fixit in a
6897 // separate note) instead of having the error reported back to here
6898 // and giving a diagnostic with a fixit attached to the error itself.
6899 NoArrowOperatorFound: (FirstIteration && CurFD && CurFD->isFunctionTemplateSpecialization())
6900 ? nullptr
6901 : &NoArrowOperatorFound);
6902 if (Result.isInvalid()) {
6903 if (NoArrowOperatorFound) {
6904 if (FirstIteration) {
6905 Diag(Loc: OpLoc, DiagID: diag::err_typecheck_member_reference_suggestion)
6906 << BaseType << 1 << Base->getSourceRange()
6907 << FixItHint::CreateReplacement(RemoveRange: OpLoc, Code: ".");
6908 OpKind = tok::period;
6909 break;
6910 }
6911 Diag(Loc: OpLoc, DiagID: diag::err_typecheck_member_reference_arrow)
6912 << BaseType << Base->getSourceRange();
6913 CallExpr *CE = dyn_cast<CallExpr>(Val: Base);
6914 if (Decl *CD = (CE ? CE->getCalleeDecl() : nullptr)) {
6915 Diag(Loc: CD->getBeginLoc(),
6916 DiagID: diag::note_member_reference_arrow_from_operator_arrow);
6917 }
6918 }
6919 return ExprError();
6920 }
6921 Base = Result.get();
6922 if (CXXOperatorCallExpr *OpCall = dyn_cast<CXXOperatorCallExpr>(Val: Base))
6923 OperatorArrows.push_back(Elt: OpCall->getDirectCallee());
6924 BaseType = Base->getType();
6925 CanQualType CBaseType = Context.getCanonicalType(T: BaseType);
6926 if (!CTypes.insert(Ptr: CBaseType).second) {
6927 Diag(Loc: OpLoc, DiagID: diag::err_operator_arrow_circular) << StartingType;
6928 noteOperatorArrows(S&: *this, OperatorArrows);
6929 return ExprError();
6930 }
6931 FirstIteration = false;
6932 }
6933
6934 if (OpKind == tok::arrow) {
6935 if (BaseType->isPointerType())
6936 BaseType = BaseType->getPointeeType();
6937 else if (auto *AT = Context.getAsArrayType(T: BaseType))
6938 BaseType = AT->getElementType();
6939 }
6940 }
6941
6942 // Objective-C properties allow "." access on Objective-C pointer types,
6943 // so adjust the base type to the object type itself.
6944 if (BaseType->isObjCObjectPointerType())
6945 BaseType = BaseType->getPointeeType();
6946
6947 // C++ [basic.lookup.classref]p2:
6948 // [...] If the type of the object expression is of pointer to scalar
6949 // type, the unqualified-id is looked up in the context of the complete
6950 // postfix-expression.
6951 //
6952 // This also indicates that we could be parsing a pseudo-destructor-name.
6953 // Note that Objective-C class and object types can be pseudo-destructor
6954 // expressions or normal member (ivar or property) access expressions, and
6955 // it's legal for the type to be incomplete if this is a pseudo-destructor
6956 // call. We'll do more incomplete-type checks later in the lookup process,
6957 // so just skip this check for ObjC types.
6958 if (!BaseType->isRecordType()) {
6959 ObjectType = ParsedType::make(P: BaseType);
6960 MayBePseudoDestructor = true;
6961 return Base;
6962 }
6963
6964 // The object type must be complete (or dependent), or
6965 // C++11 [expr.prim.general]p3:
6966 // Unlike the object expression in other contexts, *this is not required to
6967 // be of complete type for purposes of class member access (5.2.5) outside
6968 // the member function body.
6969 if (!BaseType->isDependentType() &&
6970 !isThisOutsideMemberFunctionBody(BaseType) &&
6971 RequireCompleteType(Loc: OpLoc, T: BaseType,
6972 DiagID: diag::err_incomplete_member_access)) {
6973 return CreateRecoveryExpr(Begin: Base->getBeginLoc(), End: Base->getEndLoc(), SubExprs: {Base});
6974 }
6975
6976 // C++ [basic.lookup.classref]p2:
6977 // If the id-expression in a class member access (5.2.5) is an
6978 // unqualified-id, and the type of the object expression is of a class
6979 // type C (or of pointer to a class type C), the unqualified-id is looked
6980 // up in the scope of class C. [...]
6981 ObjectType = ParsedType::make(P: BaseType);
6982 return Base;
6983}
6984
6985static bool CheckArrow(Sema &S, QualType &ObjectType, Expr *&Base,
6986 tok::TokenKind &OpKind, SourceLocation OpLoc) {
6987 if (Base->hasPlaceholderType()) {
6988 ExprResult result = S.CheckPlaceholderExpr(E: Base);
6989 if (result.isInvalid()) return true;
6990 Base = result.get();
6991 }
6992 ObjectType = Base->getType();
6993
6994 // C++ [expr.pseudo]p2:
6995 // The left-hand side of the dot operator shall be of scalar type. The
6996 // left-hand side of the arrow operator shall be of pointer to scalar type.
6997 // This scalar type is the object type.
6998 // Note that this is rather different from the normal handling for the
6999 // arrow operator.
7000 if (OpKind == tok::arrow) {
7001 // The operator requires a prvalue, so perform lvalue conversions.
7002 // Only do this if we might plausibly end with a pointer, as otherwise
7003 // this was likely to be intended to be a '.'.
7004 if (ObjectType->isPointerType() || ObjectType->isArrayType() ||
7005 ObjectType->isFunctionType()) {
7006 ExprResult BaseResult = S.DefaultFunctionArrayLvalueConversion(E: Base);
7007 if (BaseResult.isInvalid())
7008 return true;
7009 Base = BaseResult.get();
7010 ObjectType = Base->getType();
7011 }
7012
7013 if (const PointerType *Ptr = ObjectType->getAs<PointerType>()) {
7014 ObjectType = Ptr->getPointeeType();
7015 } else if (!Base->isTypeDependent()) {
7016 // The user wrote "p->" when they probably meant "p."; fix it.
7017 S.Diag(Loc: OpLoc, DiagID: diag::err_typecheck_member_reference_suggestion)
7018 << ObjectType << true
7019 << FixItHint::CreateReplacement(RemoveRange: OpLoc, Code: ".");
7020 if (S.isSFINAEContext())
7021 return true;
7022
7023 OpKind = tok::period;
7024 }
7025 }
7026
7027 return false;
7028}
7029
7030/// Check if it's ok to try and recover dot pseudo destructor calls on
7031/// pointer objects.
7032static bool
7033canRecoverDotPseudoDestructorCallsOnPointerObjects(Sema &SemaRef,
7034 QualType DestructedType) {
7035 // If this is a record type, check if its destructor is callable.
7036 if (auto *RD = DestructedType->getAsCXXRecordDecl()) {
7037 if (RD->hasDefinition())
7038 if (CXXDestructorDecl *D = SemaRef.LookupDestructor(Class: RD))
7039 return SemaRef.CanUseDecl(D, /*TreatUnavailableAsInvalid=*/false);
7040 return false;
7041 }
7042
7043 // Otherwise, check if it's a type for which it's valid to use a pseudo-dtor.
7044 return DestructedType->isDependentType() || DestructedType->isScalarType() ||
7045 DestructedType->isVectorType();
7046}
7047
7048ExprResult Sema::BuildPseudoDestructorExpr(Expr *Base,
7049 SourceLocation OpLoc,
7050 tok::TokenKind OpKind,
7051 const CXXScopeSpec &SS,
7052 TypeSourceInfo *ScopeTypeInfo,
7053 SourceLocation CCLoc,
7054 SourceLocation TildeLoc,
7055 PseudoDestructorTypeStorage Destructed) {
7056 TypeSourceInfo *DestructedTypeInfo = Destructed.getTypeSourceInfo();
7057
7058 QualType ObjectType;
7059 if (CheckArrow(S&: *this, ObjectType, Base, OpKind, OpLoc))
7060 return ExprError();
7061
7062 if (!ObjectType->isDependentType() && !ObjectType->isScalarType() &&
7063 !ObjectType->isVectorType() && !ObjectType->isMatrixType()) {
7064 if (getLangOpts().MSVCCompat && ObjectType->isVoidType())
7065 Diag(Loc: OpLoc, DiagID: diag::ext_pseudo_dtor_on_void) << Base->getSourceRange();
7066 else {
7067 Diag(Loc: OpLoc, DiagID: diag::err_pseudo_dtor_base_not_scalar)
7068 << ObjectType << Base->getSourceRange();
7069 return ExprError();
7070 }
7071 }
7072
7073 // C++ [expr.pseudo]p2:
7074 // [...] The cv-unqualified versions of the object type and of the type
7075 // designated by the pseudo-destructor-name shall be the same type.
7076 if (DestructedTypeInfo) {
7077 QualType DestructedType = DestructedTypeInfo->getType();
7078 SourceLocation DestructedTypeStart =
7079 DestructedTypeInfo->getTypeLoc().getBeginLoc();
7080 if (!DestructedType->isDependentType() && !ObjectType->isDependentType()) {
7081 if (!Context.hasSameUnqualifiedType(T1: DestructedType, T2: ObjectType)) {
7082 // Detect dot pseudo destructor calls on pointer objects, e.g.:
7083 // Foo *foo;
7084 // foo.~Foo();
7085 if (OpKind == tok::period && ObjectType->isPointerType() &&
7086 Context.hasSameUnqualifiedType(T1: DestructedType,
7087 T2: ObjectType->getPointeeType())) {
7088 auto Diagnostic =
7089 Diag(Loc: OpLoc, DiagID: diag::err_typecheck_member_reference_suggestion)
7090 << ObjectType << /*IsArrow=*/0 << Base->getSourceRange();
7091
7092 // Issue a fixit only when the destructor is valid.
7093 if (canRecoverDotPseudoDestructorCallsOnPointerObjects(
7094 SemaRef&: *this, DestructedType))
7095 Diagnostic << FixItHint::CreateReplacement(RemoveRange: OpLoc, Code: "->");
7096
7097 // Recover by setting the object type to the destructed type and the
7098 // operator to '->'.
7099 ObjectType = DestructedType;
7100 OpKind = tok::arrow;
7101 } else {
7102 Diag(Loc: DestructedTypeStart, DiagID: diag::err_pseudo_dtor_type_mismatch)
7103 << ObjectType << DestructedType << Base->getSourceRange()
7104 << DestructedTypeInfo->getTypeLoc().getSourceRange();
7105
7106 // Recover by setting the destructed type to the object type.
7107 DestructedType = ObjectType;
7108 DestructedTypeInfo =
7109 Context.getTrivialTypeSourceInfo(T: ObjectType, Loc: DestructedTypeStart);
7110 Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo);
7111 }
7112 } else if (DestructedType.getObjCLifetime() !=
7113 ObjectType.getObjCLifetime()) {
7114
7115 if (DestructedType.getObjCLifetime() == Qualifiers::OCL_None) {
7116 // Okay: just pretend that the user provided the correctly-qualified
7117 // type.
7118 } else {
7119 Diag(Loc: DestructedTypeStart, DiagID: diag::err_arc_pseudo_dtor_inconstant_quals)
7120 << ObjectType << DestructedType << Base->getSourceRange()
7121 << DestructedTypeInfo->getTypeLoc().getSourceRange();
7122 }
7123
7124 // Recover by setting the destructed type to the object type.
7125 DestructedType = ObjectType;
7126 DestructedTypeInfo = Context.getTrivialTypeSourceInfo(T: ObjectType,
7127 Loc: DestructedTypeStart);
7128 Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo);
7129 }
7130 }
7131 }
7132
7133 // C++ [expr.pseudo]p2:
7134 // [...] Furthermore, the two type-names in a pseudo-destructor-name of the
7135 // form
7136 //
7137 // ::[opt] nested-name-specifier[opt] type-name :: ~ type-name
7138 //
7139 // shall designate the same scalar type.
7140 if (ScopeTypeInfo) {
7141 QualType ScopeType = ScopeTypeInfo->getType();
7142 if (!ScopeType->isDependentType() && !ObjectType->isDependentType() &&
7143 !Context.hasSameUnqualifiedType(T1: ScopeType, T2: ObjectType)) {
7144
7145 Diag(Loc: ScopeTypeInfo->getTypeLoc().getSourceRange().getBegin(),
7146 DiagID: diag::err_pseudo_dtor_type_mismatch)
7147 << ObjectType << ScopeType << Base->getSourceRange()
7148 << ScopeTypeInfo->getTypeLoc().getSourceRange();
7149
7150 ScopeType = QualType();
7151 ScopeTypeInfo = nullptr;
7152 }
7153 }
7154
7155 Expr *Result
7156 = new (Context) CXXPseudoDestructorExpr(Context, Base,
7157 OpKind == tok::arrow, OpLoc,
7158 SS.getWithLocInContext(Context),
7159 ScopeTypeInfo,
7160 CCLoc,
7161 TildeLoc,
7162 Destructed);
7163
7164 return Result;
7165}
7166
7167ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
7168 SourceLocation OpLoc,
7169 tok::TokenKind OpKind,
7170 CXXScopeSpec &SS,
7171 UnqualifiedId &FirstTypeName,
7172 SourceLocation CCLoc,
7173 SourceLocation TildeLoc,
7174 UnqualifiedId &SecondTypeName) {
7175 assert((FirstTypeName.getKind() == UnqualifiedIdKind::IK_TemplateId ||
7176 FirstTypeName.getKind() == UnqualifiedIdKind::IK_Identifier) &&
7177 "Invalid first type name in pseudo-destructor");
7178 assert((SecondTypeName.getKind() == UnqualifiedIdKind::IK_TemplateId ||
7179 SecondTypeName.getKind() == UnqualifiedIdKind::IK_Identifier) &&
7180 "Invalid second type name in pseudo-destructor");
7181
7182 QualType ObjectType;
7183 if (CheckArrow(S&: *this, ObjectType, Base, OpKind, OpLoc))
7184 return ExprError();
7185
7186 // Compute the object type that we should use for name lookup purposes. Only
7187 // record types and dependent types matter.
7188 ParsedType ObjectTypePtrForLookup;
7189 if (!SS.isSet()) {
7190 if (ObjectType->isRecordType())
7191 ObjectTypePtrForLookup = ParsedType::make(P: ObjectType);
7192 else if (ObjectType->isDependentType())
7193 ObjectTypePtrForLookup = ParsedType::make(P: Context.DependentTy);
7194 }
7195
7196 // Convert the name of the type being destructed (following the ~) into a
7197 // type (with source-location information).
7198 QualType DestructedType;
7199 TypeSourceInfo *DestructedTypeInfo = nullptr;
7200 PseudoDestructorTypeStorage Destructed;
7201 if (SecondTypeName.getKind() == UnqualifiedIdKind::IK_Identifier) {
7202 ParsedType T = getTypeName(II: *SecondTypeName.Identifier,
7203 NameLoc: SecondTypeName.StartLocation,
7204 S, SS: &SS, isClassName: true, HasTrailingDot: false, ObjectType: ObjectTypePtrForLookup,
7205 /*IsCtorOrDtorName*/true);
7206 if (!T &&
7207 ((SS.isSet() && !computeDeclContext(SS, EnteringContext: false)) ||
7208 (!SS.isSet() && ObjectType->isDependentType()))) {
7209 // The name of the type being destroyed is a dependent name, and we
7210 // couldn't find anything useful in scope. Just store the identifier and
7211 // it's location, and we'll perform (qualified) name lookup again at
7212 // template instantiation time.
7213 Destructed = PseudoDestructorTypeStorage(SecondTypeName.Identifier,
7214 SecondTypeName.StartLocation);
7215 } else if (!T) {
7216 Diag(Loc: SecondTypeName.StartLocation,
7217 DiagID: diag::err_pseudo_dtor_destructor_non_type)
7218 << SecondTypeName.Identifier << ObjectType;
7219 if (isSFINAEContext())
7220 return ExprError();
7221
7222 // Recover by assuming we had the right type all along.
7223 DestructedType = ObjectType;
7224 } else
7225 DestructedType = GetTypeFromParser(Ty: T, TInfo: &DestructedTypeInfo);
7226 } else {
7227 // Resolve the template-id to a type.
7228 TemplateIdAnnotation *TemplateId = SecondTypeName.TemplateId;
7229 ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
7230 TemplateId->NumArgs);
7231 TypeResult T = ActOnTemplateIdType(S,
7232 SS,
7233 TemplateKWLoc: TemplateId->TemplateKWLoc,
7234 Template: TemplateId->Template,
7235 TemplateII: TemplateId->Name,
7236 TemplateIILoc: TemplateId->TemplateNameLoc,
7237 LAngleLoc: TemplateId->LAngleLoc,
7238 TemplateArgs: TemplateArgsPtr,
7239 RAngleLoc: TemplateId->RAngleLoc,
7240 /*IsCtorOrDtorName*/true);
7241 if (T.isInvalid() || !T.get()) {
7242 // Recover by assuming we had the right type all along.
7243 DestructedType = ObjectType;
7244 } else
7245 DestructedType = GetTypeFromParser(Ty: T.get(), TInfo: &DestructedTypeInfo);
7246 }
7247
7248 // If we've performed some kind of recovery, (re-)build the type source
7249 // information.
7250 if (!DestructedType.isNull()) {
7251 if (!DestructedTypeInfo)
7252 DestructedTypeInfo = Context.getTrivialTypeSourceInfo(T: DestructedType,
7253 Loc: SecondTypeName.StartLocation);
7254 Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo);
7255 }
7256
7257 // Convert the name of the scope type (the type prior to '::') into a type.
7258 TypeSourceInfo *ScopeTypeInfo = nullptr;
7259 QualType ScopeType;
7260 if (FirstTypeName.getKind() == UnqualifiedIdKind::IK_TemplateId ||
7261 FirstTypeName.Identifier) {
7262 if (FirstTypeName.getKind() == UnqualifiedIdKind::IK_Identifier) {
7263 ParsedType T = getTypeName(II: *FirstTypeName.Identifier,
7264 NameLoc: FirstTypeName.StartLocation,
7265 S, SS: &SS, isClassName: true, HasTrailingDot: false, ObjectType: ObjectTypePtrForLookup,
7266 /*IsCtorOrDtorName*/true);
7267 if (!T) {
7268 Diag(Loc: FirstTypeName.StartLocation,
7269 DiagID: diag::err_pseudo_dtor_destructor_non_type)
7270 << FirstTypeName.Identifier << ObjectType;
7271
7272 if (isSFINAEContext())
7273 return ExprError();
7274
7275 // Just drop this type. It's unnecessary anyway.
7276 ScopeType = QualType();
7277 } else
7278 ScopeType = GetTypeFromParser(Ty: T, TInfo: &ScopeTypeInfo);
7279 } else {
7280 // Resolve the template-id to a type.
7281 TemplateIdAnnotation *TemplateId = FirstTypeName.TemplateId;
7282 ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
7283 TemplateId->NumArgs);
7284 TypeResult T = ActOnTemplateIdType(S,
7285 SS,
7286 TemplateKWLoc: TemplateId->TemplateKWLoc,
7287 Template: TemplateId->Template,
7288 TemplateII: TemplateId->Name,
7289 TemplateIILoc: TemplateId->TemplateNameLoc,
7290 LAngleLoc: TemplateId->LAngleLoc,
7291 TemplateArgs: TemplateArgsPtr,
7292 RAngleLoc: TemplateId->RAngleLoc,
7293 /*IsCtorOrDtorName*/true);
7294 if (T.isInvalid() || !T.get()) {
7295 // Recover by dropping this type.
7296 ScopeType = QualType();
7297 } else
7298 ScopeType = GetTypeFromParser(Ty: T.get(), TInfo: &ScopeTypeInfo);
7299 }
7300 }
7301
7302 if (!ScopeType.isNull() && !ScopeTypeInfo)
7303 ScopeTypeInfo = Context.getTrivialTypeSourceInfo(T: ScopeType,
7304 Loc: FirstTypeName.StartLocation);
7305
7306
7307 return BuildPseudoDestructorExpr(Base, OpLoc, OpKind, SS,
7308 ScopeTypeInfo, CCLoc, TildeLoc,
7309 Destructed);
7310}
7311
7312ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
7313 SourceLocation OpLoc,
7314 tok::TokenKind OpKind,
7315 SourceLocation TildeLoc,
7316 const DeclSpec& DS) {
7317 QualType ObjectType;
7318 QualType T;
7319 TypeLocBuilder TLB;
7320 if (CheckArrow(S&: *this, ObjectType, Base, OpKind, OpLoc) ||
7321 DS.getTypeSpecType() == DeclSpec::TST_error)
7322 return ExprError();
7323
7324 switch (DS.getTypeSpecType()) {
7325 case DeclSpec::TST_decltype_auto: {
7326 Diag(Loc: DS.getTypeSpecTypeLoc(), DiagID: diag::err_decltype_auto_invalid);
7327 return true;
7328 }
7329 case DeclSpec::TST_decltype: {
7330 T = BuildDecltypeType(E: DS.getRepAsExpr(), /*AsUnevaluated=*/false);
7331 DecltypeTypeLoc DecltypeTL = TLB.push<DecltypeTypeLoc>(T);
7332 DecltypeTL.setDecltypeLoc(DS.getTypeSpecTypeLoc());
7333 DecltypeTL.setRParenLoc(DS.getTypeofParensRange().getEnd());
7334 break;
7335 }
7336 case DeclSpec::TST_typename_pack_indexing: {
7337 T = ActOnPackIndexingType(Pattern: DS.getRepAsType().get(), IndexExpr: DS.getPackIndexingExpr(),
7338 Loc: DS.getBeginLoc(), EllipsisLoc: DS.getEllipsisLoc());
7339 TLB.pushTrivial(Context&: getASTContext(),
7340 T: cast<PackIndexingType>(Val: T.getTypePtr())->getPattern(),
7341 Loc: DS.getBeginLoc());
7342 PackIndexingTypeLoc PITL = TLB.push<PackIndexingTypeLoc>(T);
7343 PITL.setEllipsisLoc(DS.getEllipsisLoc());
7344 break;
7345 }
7346 default:
7347 llvm_unreachable("Unsupported type in pseudo destructor");
7348 }
7349 TypeSourceInfo *DestructedTypeInfo = TLB.getTypeSourceInfo(Context, T);
7350 PseudoDestructorTypeStorage Destructed(DestructedTypeInfo);
7351
7352 return BuildPseudoDestructorExpr(Base, OpLoc, OpKind, SS: CXXScopeSpec(),
7353 ScopeTypeInfo: nullptr, CCLoc: SourceLocation(), TildeLoc,
7354 Destructed);
7355}
7356
7357ExprResult Sema::BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
7358 SourceLocation RParen) {
7359 // If the operand is an unresolved lookup expression, the expression is ill-
7360 // formed per [over.over]p1, because overloaded function names cannot be used
7361 // without arguments except in explicit contexts.
7362 ExprResult R = CheckPlaceholderExpr(E: Operand);
7363 if (R.isInvalid())
7364 return R;
7365
7366 R = CheckUnevaluatedOperand(E: R.get());
7367 if (R.isInvalid())
7368 return ExprError();
7369
7370 Operand = R.get();
7371
7372 if (!inTemplateInstantiation() && !Operand->isInstantiationDependent() &&
7373 Operand->HasSideEffects(Ctx: Context, IncludePossibleEffects: false)) {
7374 // The expression operand for noexcept is in an unevaluated expression
7375 // context, so side effects could result in unintended consequences.
7376 Diag(Loc: Operand->getExprLoc(), DiagID: diag::warn_side_effects_unevaluated_context);
7377 }
7378
7379 CanThrowResult CanThrow = canThrow(E: Operand);
7380 return new (Context)
7381 CXXNoexceptExpr(Context.BoolTy, Operand, CanThrow, KeyLoc, RParen);
7382}
7383
7384ExprResult Sema::ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation,
7385 Expr *Operand, SourceLocation RParen) {
7386 return BuildCXXNoexceptExpr(KeyLoc, Operand, RParen);
7387}
7388
7389static void MaybeDecrementCount(
7390 Expr *E, llvm::DenseMap<const VarDecl *, int> &RefsMinusAssignments) {
7391 DeclRefExpr *LHS = nullptr;
7392 bool IsCompoundAssign = false;
7393 bool isIncrementDecrementUnaryOp = false;
7394 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Val: E)) {
7395 if (BO->getLHS()->getType()->isDependentType() ||
7396 BO->getRHS()->getType()->isDependentType()) {
7397 if (BO->getOpcode() != BO_Assign)
7398 return;
7399 } else if (!BO->isAssignmentOp())
7400 return;
7401 else
7402 IsCompoundAssign = BO->isCompoundAssignmentOp();
7403 LHS = dyn_cast<DeclRefExpr>(Val: BO->getLHS());
7404 } else if (CXXOperatorCallExpr *COCE = dyn_cast<CXXOperatorCallExpr>(Val: E)) {
7405 if (COCE->getOperator() != OO_Equal)
7406 return;
7407 LHS = dyn_cast<DeclRefExpr>(Val: COCE->getArg(Arg: 0));
7408 } else if (UnaryOperator *UO = dyn_cast<UnaryOperator>(Val: E)) {
7409 if (!UO->isIncrementDecrementOp())
7410 return;
7411 isIncrementDecrementUnaryOp = true;
7412 LHS = dyn_cast<DeclRefExpr>(Val: UO->getSubExpr());
7413 }
7414 if (!LHS)
7415 return;
7416 VarDecl *VD = dyn_cast<VarDecl>(Val: LHS->getDecl());
7417 if (!VD)
7418 return;
7419 // Don't decrement RefsMinusAssignments if volatile variable with compound
7420 // assignment (+=, ...) or increment/decrement unary operator to avoid
7421 // potential unused-but-set-variable warning.
7422 if ((IsCompoundAssign || isIncrementDecrementUnaryOp) &&
7423 VD->getType().isVolatileQualified())
7424 return;
7425 auto iter = RefsMinusAssignments.find(Val: VD);
7426 if (iter == RefsMinusAssignments.end())
7427 return;
7428 iter->getSecond()--;
7429}
7430
7431/// Perform the conversions required for an expression used in a
7432/// context that ignores the result.
7433ExprResult Sema::IgnoredValueConversions(Expr *E) {
7434 MaybeDecrementCount(E, RefsMinusAssignments);
7435
7436 if (E->hasPlaceholderType()) {
7437 ExprResult result = CheckPlaceholderExpr(E);
7438 if (result.isInvalid()) return E;
7439 E = result.get();
7440 }
7441
7442 if (getLangOpts().CPlusPlus) {
7443 // The C++11 standard defines the notion of a discarded-value expression;
7444 // normally, we don't need to do anything to handle it, but if it is a
7445 // volatile lvalue with a special form, we perform an lvalue-to-rvalue
7446 // conversion.
7447 if (getLangOpts().CPlusPlus11 && E->isReadIfDiscardedInCPlusPlus11()) {
7448 ExprResult Res = DefaultLvalueConversion(E);
7449 if (Res.isInvalid())
7450 return E;
7451 E = Res.get();
7452 } else {
7453 // Per C++2a [expr.ass]p5, a volatile assignment is not deprecated if
7454 // it occurs as a discarded-value expression.
7455 CheckUnusedVolatileAssignment(E);
7456 }
7457
7458 // C++1z:
7459 // If the expression is a prvalue after this optional conversion, the
7460 // temporary materialization conversion is applied.
7461 //
7462 // We do not materialize temporaries by default in order to avoid creating
7463 // unnecessary temporary objects. If we skip this step, IR generation is
7464 // able to synthesize the storage for itself in the aggregate case, and
7465 // adding the extra node to the AST is just clutter.
7466 if (isInLifetimeExtendingContext() && getLangOpts().CPlusPlus17 &&
7467 E->isPRValue() && !E->getType()->isVoidType()) {
7468 ExprResult Res = TemporaryMaterializationConversion(E);
7469 if (Res.isInvalid())
7470 return E;
7471 E = Res.get();
7472 }
7473 return E;
7474 }
7475
7476 // C99 6.3.2.1:
7477 // [Except in specific positions,] an lvalue that does not have
7478 // array type is converted to the value stored in the
7479 // designated object (and is no longer an lvalue).
7480 if (E->isPRValue()) {
7481 // In C, function designators (i.e. expressions of function type)
7482 // are r-values, but we still want to do function-to-pointer decay
7483 // on them. This is both technically correct and convenient for
7484 // some clients.
7485 if (!getLangOpts().CPlusPlus && E->getType()->isFunctionType())
7486 return DefaultFunctionArrayConversion(E);
7487
7488 return E;
7489 }
7490
7491 // GCC seems to also exclude expressions of incomplete enum type.
7492 if (const EnumType *T = E->getType()->getAs<EnumType>()) {
7493 if (!T->getDecl()->isComplete()) {
7494 // FIXME: stupid workaround for a codegen bug!
7495 E = ImpCastExprToType(E, Type: Context.VoidTy, CK: CK_ToVoid).get();
7496 return E;
7497 }
7498 }
7499
7500 ExprResult Res = DefaultFunctionArrayLvalueConversion(E);
7501 if (Res.isInvalid())
7502 return E;
7503 E = Res.get();
7504
7505 if (!E->getType()->isVoidType())
7506 RequireCompleteType(Loc: E->getExprLoc(), T: E->getType(),
7507 DiagID: diag::err_incomplete_type);
7508 return E;
7509}
7510
7511ExprResult Sema::CheckUnevaluatedOperand(Expr *E) {
7512 // Per C++2a [expr.ass]p5, a volatile assignment is not deprecated if
7513 // it occurs as an unevaluated operand.
7514 CheckUnusedVolatileAssignment(E);
7515
7516 return E;
7517}
7518
7519// If we can unambiguously determine whether Var can never be used
7520// in a constant expression, return true.
7521// - if the variable and its initializer are non-dependent, then
7522// we can unambiguously check if the variable is a constant expression.
7523// - if the initializer is not value dependent - we can determine whether
7524// it can be used to initialize a constant expression. If Init can not
7525// be used to initialize a constant expression we conclude that Var can
7526// never be a constant expression.
7527// - FXIME: if the initializer is dependent, we can still do some analysis and
7528// identify certain cases unambiguously as non-const by using a Visitor:
7529// - such as those that involve odr-use of a ParmVarDecl, involve a new
7530// delete, lambda-expr, dynamic-cast, reinterpret-cast etc...
7531static inline bool VariableCanNeverBeAConstantExpression(VarDecl *Var,
7532 ASTContext &Context) {
7533 if (isa<ParmVarDecl>(Val: Var)) return true;
7534 const VarDecl *DefVD = nullptr;
7535
7536 // If there is no initializer - this can not be a constant expression.
7537 const Expr *Init = Var->getAnyInitializer(D&: DefVD);
7538 if (!Init)
7539 return true;
7540 assert(DefVD);
7541 if (DefVD->isWeak())
7542 return false;
7543
7544 if (Var->getType()->isDependentType() || Init->isValueDependent()) {
7545 // FIXME: Teach the constant evaluator to deal with the non-dependent parts
7546 // of value-dependent expressions, and use it here to determine whether the
7547 // initializer is a potential constant expression.
7548 return false;
7549 }
7550
7551 return !Var->isUsableInConstantExpressions(C: Context);
7552}
7553
7554/// Check if the current lambda has any potential captures
7555/// that must be captured by any of its enclosing lambdas that are ready to
7556/// capture. If there is a lambda that can capture a nested
7557/// potential-capture, go ahead and do so. Also, check to see if any
7558/// variables are uncaptureable or do not involve an odr-use so do not
7559/// need to be captured.
7560
7561static void CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(
7562 Expr *const FE, LambdaScopeInfo *const CurrentLSI, Sema &S) {
7563
7564 assert(!S.isUnevaluatedContext());
7565 assert(S.CurContext->isDependentContext());
7566#ifndef NDEBUG
7567 DeclContext *DC = S.CurContext;
7568 while (isa_and_nonnull<CapturedDecl>(DC))
7569 DC = DC->getParent();
7570 assert(
7571 (CurrentLSI->CallOperator == DC || !CurrentLSI->AfterParameterList) &&
7572 "The current call operator must be synchronized with Sema's CurContext");
7573#endif // NDEBUG
7574
7575 const bool IsFullExprInstantiationDependent = FE->isInstantiationDependent();
7576
7577 // All the potentially captureable variables in the current nested
7578 // lambda (within a generic outer lambda), must be captured by an
7579 // outer lambda that is enclosed within a non-dependent context.
7580 CurrentLSI->visitPotentialCaptures(Callback: [&](ValueDecl *Var, Expr *VarExpr) {
7581 // If the variable is clearly identified as non-odr-used and the full
7582 // expression is not instantiation dependent, only then do we not
7583 // need to check enclosing lambda's for speculative captures.
7584 // For e.g.:
7585 // Even though 'x' is not odr-used, it should be captured.
7586 // int test() {
7587 // const int x = 10;
7588 // auto L = [=](auto a) {
7589 // (void) +x + a;
7590 // };
7591 // }
7592 if (CurrentLSI->isVariableExprMarkedAsNonODRUsed(CapturingVarExpr: VarExpr) &&
7593 !IsFullExprInstantiationDependent)
7594 return;
7595
7596 VarDecl *UnderlyingVar = Var->getPotentiallyDecomposedVarDecl();
7597 if (!UnderlyingVar)
7598 return;
7599
7600 // If we have a capture-capable lambda for the variable, go ahead and
7601 // capture the variable in that lambda (and all its enclosing lambdas).
7602 if (const UnsignedOrNone Index =
7603 getStackIndexOfNearestEnclosingCaptureCapableLambda(
7604 FunctionScopes: S.FunctionScopes, VarToCapture: Var, S))
7605 S.MarkCaptureUsedInEnclosingContext(Capture: Var, Loc: VarExpr->getExprLoc(), CapturingScopeIndex: *Index);
7606 const bool IsVarNeverAConstantExpression =
7607 VariableCanNeverBeAConstantExpression(Var: UnderlyingVar, Context&: S.Context);
7608 if (!IsFullExprInstantiationDependent || IsVarNeverAConstantExpression) {
7609 // This full expression is not instantiation dependent or the variable
7610 // can not be used in a constant expression - which means
7611 // this variable must be odr-used here, so diagnose a
7612 // capture violation early, if the variable is un-captureable.
7613 // This is purely for diagnosing errors early. Otherwise, this
7614 // error would get diagnosed when the lambda becomes capture ready.
7615 QualType CaptureType, DeclRefType;
7616 SourceLocation ExprLoc = VarExpr->getExprLoc();
7617 if (S.tryCaptureVariable(Var, Loc: ExprLoc, Kind: TryCaptureKind::Implicit,
7618 /*EllipsisLoc*/ SourceLocation(),
7619 /*BuildAndDiagnose*/ false, CaptureType,
7620 DeclRefType, FunctionScopeIndexToStopAt: nullptr)) {
7621 // We will never be able to capture this variable, and we need
7622 // to be able to in any and all instantiations, so diagnose it.
7623 S.tryCaptureVariable(Var, Loc: ExprLoc, Kind: TryCaptureKind::Implicit,
7624 /*EllipsisLoc*/ SourceLocation(),
7625 /*BuildAndDiagnose*/ true, CaptureType,
7626 DeclRefType, FunctionScopeIndexToStopAt: nullptr);
7627 }
7628 }
7629 });
7630
7631 // Check if 'this' needs to be captured.
7632 if (CurrentLSI->hasPotentialThisCapture()) {
7633 // If we have a capture-capable lambda for 'this', go ahead and capture
7634 // 'this' in that lambda (and all its enclosing lambdas).
7635 if (const UnsignedOrNone Index =
7636 getStackIndexOfNearestEnclosingCaptureCapableLambda(
7637 FunctionScopes: S.FunctionScopes, /*0 is 'this'*/ VarToCapture: nullptr, S)) {
7638 const unsigned FunctionScopeIndexOfCapturableLambda = *Index;
7639 S.CheckCXXThisCapture(Loc: CurrentLSI->PotentialThisCaptureLocation,
7640 /*Explicit*/ false, /*BuildAndDiagnose*/ true,
7641 FunctionScopeIndexToStopAt: &FunctionScopeIndexOfCapturableLambda);
7642 }
7643 }
7644
7645 // Reset all the potential captures at the end of each full-expression.
7646 CurrentLSI->clearPotentialCaptures();
7647}
7648
7649ExprResult Sema::ActOnFinishFullExpr(Expr *FE, SourceLocation CC,
7650 bool DiscardedValue, bool IsConstexpr,
7651 bool IsTemplateArgument) {
7652 ExprResult FullExpr = FE;
7653
7654 if (!FullExpr.get())
7655 return ExprError();
7656
7657 if (!IsTemplateArgument && DiagnoseUnexpandedParameterPack(E: FullExpr.get()))
7658 return ExprError();
7659
7660 if (DiscardedValue) {
7661 // Top-level expressions default to 'id' when we're in a debugger.
7662 if (getLangOpts().DebuggerCastResultToId &&
7663 FullExpr.get()->getType() == Context.UnknownAnyTy) {
7664 FullExpr = forceUnknownAnyToType(E: FullExpr.get(), ToType: Context.getObjCIdType());
7665 if (FullExpr.isInvalid())
7666 return ExprError();
7667 }
7668
7669 FullExpr = CheckPlaceholderExpr(E: FullExpr.get());
7670 if (FullExpr.isInvalid())
7671 return ExprError();
7672
7673 FullExpr = IgnoredValueConversions(E: FullExpr.get());
7674 if (FullExpr.isInvalid())
7675 return ExprError();
7676
7677 DiagnoseUnusedExprResult(S: FullExpr.get(), DiagID: diag::warn_unused_expr);
7678 }
7679
7680 if (FullExpr.isInvalid())
7681 return ExprError();
7682
7683 CheckCompletedExpr(E: FullExpr.get(), CheckLoc: CC, IsConstexpr);
7684
7685 // At the end of this full expression (which could be a deeply nested
7686 // lambda), if there is a potential capture within the nested lambda,
7687 // have the outer capture-able lambda try and capture it.
7688 // Consider the following code:
7689 // void f(int, int);
7690 // void f(const int&, double);
7691 // void foo() {
7692 // const int x = 10, y = 20;
7693 // auto L = [=](auto a) {
7694 // auto M = [=](auto b) {
7695 // f(x, b); <-- requires x to be captured by L and M
7696 // f(y, a); <-- requires y to be captured by L, but not all Ms
7697 // };
7698 // };
7699 // }
7700
7701 // FIXME: Also consider what happens for something like this that involves
7702 // the gnu-extension statement-expressions or even lambda-init-captures:
7703 // void f() {
7704 // const int n = 0;
7705 // auto L = [&](auto a) {
7706 // +n + ({ 0; a; });
7707 // };
7708 // }
7709 //
7710 // Here, we see +n, and then the full-expression 0; ends, so we don't
7711 // capture n (and instead remove it from our list of potential captures),
7712 // and then the full-expression +n + ({ 0; }); ends, but it's too late
7713 // for us to see that we need to capture n after all.
7714
7715 LambdaScopeInfo *const CurrentLSI =
7716 getCurLambda(/*IgnoreCapturedRegions=*/IgnoreNonLambdaCapturingScope: true);
7717 // FIXME: PR 17877 showed that getCurLambda() can return a valid pointer
7718 // even if CurContext is not a lambda call operator. Refer to that Bug Report
7719 // for an example of the code that might cause this asynchrony.
7720 // By ensuring we are in the context of a lambda's call operator
7721 // we can fix the bug (we only need to check whether we need to capture
7722 // if we are within a lambda's body); but per the comments in that
7723 // PR, a proper fix would entail :
7724 // "Alternative suggestion:
7725 // - Add to Sema an integer holding the smallest (outermost) scope
7726 // index that we are *lexically* within, and save/restore/set to
7727 // FunctionScopes.size() in InstantiatingTemplate's
7728 // constructor/destructor.
7729 // - Teach the handful of places that iterate over FunctionScopes to
7730 // stop at the outermost enclosing lexical scope."
7731 DeclContext *DC = CurContext;
7732 while (isa_and_nonnull<CapturedDecl>(Val: DC))
7733 DC = DC->getParent();
7734 const bool IsInLambdaDeclContext = isLambdaCallOperator(DC);
7735 if (IsInLambdaDeclContext && CurrentLSI &&
7736 CurrentLSI->hasPotentialCaptures() && !FullExpr.isInvalid())
7737 CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(FE, CurrentLSI,
7738 S&: *this);
7739 return MaybeCreateExprWithCleanups(SubExpr: FullExpr);
7740}
7741
7742StmtResult Sema::ActOnFinishFullStmt(Stmt *FullStmt) {
7743 if (!FullStmt) return StmtError();
7744
7745 return MaybeCreateStmtWithCleanups(SubStmt: FullStmt);
7746}
7747
7748IfExistsResult
7749Sema::CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
7750 const DeclarationNameInfo &TargetNameInfo) {
7751 DeclarationName TargetName = TargetNameInfo.getName();
7752 if (!TargetName)
7753 return IfExistsResult::DoesNotExist;
7754
7755 // If the name itself is dependent, then the result is dependent.
7756 if (TargetName.isDependentName())
7757 return IfExistsResult::Dependent;
7758
7759 // Do the redeclaration lookup in the current scope.
7760 LookupResult R(*this, TargetNameInfo, Sema::LookupAnyName,
7761 RedeclarationKind::NotForRedeclaration);
7762 LookupParsedName(R, S, SS: &SS, /*ObjectType=*/QualType());
7763 R.suppressDiagnostics();
7764
7765 switch (R.getResultKind()) {
7766 case LookupResultKind::Found:
7767 case LookupResultKind::FoundOverloaded:
7768 case LookupResultKind::FoundUnresolvedValue:
7769 case LookupResultKind::Ambiguous:
7770 return IfExistsResult::Exists;
7771
7772 case LookupResultKind::NotFound:
7773 return IfExistsResult::DoesNotExist;
7774
7775 case LookupResultKind::NotFoundInCurrentInstantiation:
7776 return IfExistsResult::Dependent;
7777 }
7778
7779 llvm_unreachable("Invalid LookupResult Kind!");
7780}
7781
7782IfExistsResult Sema::CheckMicrosoftIfExistsSymbol(Scope *S,
7783 SourceLocation KeywordLoc,
7784 bool IsIfExists,
7785 CXXScopeSpec &SS,
7786 UnqualifiedId &Name) {
7787 DeclarationNameInfo TargetNameInfo = GetNameFromUnqualifiedId(Name);
7788
7789 // Check for an unexpanded parameter pack.
7790 auto UPPC = IsIfExists ? UPPC_IfExists : UPPC_IfNotExists;
7791 if (DiagnoseUnexpandedParameterPack(SS, UPPC) ||
7792 DiagnoseUnexpandedParameterPack(NameInfo: TargetNameInfo, UPPC))
7793 return IfExistsResult::Error;
7794
7795 return CheckMicrosoftIfExistsSymbol(S, SS, TargetNameInfo);
7796}
7797
7798concepts::Requirement *Sema::ActOnSimpleRequirement(Expr *E) {
7799 return BuildExprRequirement(E, /*IsSimple=*/IsSatisfied: true,
7800 /*NoexceptLoc=*/SourceLocation(),
7801 /*ReturnTypeRequirement=*/{});
7802}
7803
7804concepts::Requirement *Sema::ActOnTypeRequirement(
7805 SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
7806 const IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId) {
7807 assert(((!TypeName && TemplateId) || (TypeName && !TemplateId)) &&
7808 "Exactly one of TypeName and TemplateId must be specified.");
7809 TypeSourceInfo *TSI = nullptr;
7810 if (TypeName) {
7811 QualType T =
7812 CheckTypenameType(Keyword: ElaboratedTypeKeyword::Typename, KeywordLoc: TypenameKWLoc,
7813 QualifierLoc: SS.getWithLocInContext(Context), II: *TypeName, IILoc: NameLoc,
7814 TSI: &TSI, /*DeducedTSTContext=*/false);
7815 if (T.isNull())
7816 return nullptr;
7817 } else {
7818 ASTTemplateArgsPtr ArgsPtr(TemplateId->getTemplateArgs(),
7819 TemplateId->NumArgs);
7820 TypeResult T = ActOnTypenameType(S: CurScope, TypenameLoc: TypenameKWLoc, SS,
7821 TemplateLoc: TemplateId->TemplateKWLoc,
7822 TemplateName: TemplateId->Template, TemplateII: TemplateId->Name,
7823 TemplateIILoc: TemplateId->TemplateNameLoc,
7824 LAngleLoc: TemplateId->LAngleLoc, TemplateArgs: ArgsPtr,
7825 RAngleLoc: TemplateId->RAngleLoc);
7826 if (T.isInvalid())
7827 return nullptr;
7828 if (GetTypeFromParser(Ty: T.get(), TInfo: &TSI).isNull())
7829 return nullptr;
7830 }
7831 return BuildTypeRequirement(Type: TSI);
7832}
7833
7834concepts::Requirement *
7835Sema::ActOnCompoundRequirement(Expr *E, SourceLocation NoexceptLoc) {
7836 return BuildExprRequirement(E, /*IsSimple=*/IsSatisfied: false, NoexceptLoc,
7837 /*ReturnTypeRequirement=*/{});
7838}
7839
7840concepts::Requirement *
7841Sema::ActOnCompoundRequirement(
7842 Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
7843 TemplateIdAnnotation *TypeConstraint, unsigned Depth) {
7844 // C++2a [expr.prim.req.compound] p1.3.3
7845 // [..] the expression is deduced against an invented function template
7846 // F [...] F is a void function template with a single type template
7847 // parameter T declared with the constrained-parameter. Form a new
7848 // cv-qualifier-seq cv by taking the union of const and volatile specifiers
7849 // around the constrained-parameter. F has a single parameter whose
7850 // type-specifier is cv T followed by the abstract-declarator. [...]
7851 //
7852 // The cv part is done in the calling function - we get the concept with
7853 // arguments and the abstract declarator with the correct CV qualification and
7854 // have to synthesize T and the single parameter of F.
7855 auto &II = Context.Idents.get(Name: "expr-type");
7856 auto *TParam = TemplateTypeParmDecl::Create(C: Context, DC: CurContext,
7857 KeyLoc: SourceLocation(),
7858 NameLoc: SourceLocation(), D: Depth,
7859 /*Index=*/P: 0, Id: &II,
7860 /*Typename=*/true,
7861 /*ParameterPack=*/false,
7862 /*HasTypeConstraint=*/true);
7863
7864 if (BuildTypeConstraint(SS, TypeConstraint, ConstrainedParameter: TParam,
7865 /*EllipsisLoc=*/SourceLocation(),
7866 /*AllowUnexpandedPack=*/true))
7867 // Just produce a requirement with no type requirements.
7868 return BuildExprRequirement(E, /*IsSimple=*/IsSatisfied: false, NoexceptLoc, ReturnTypeRequirement: {});
7869
7870 auto *TPL = TemplateParameterList::Create(C: Context, TemplateLoc: SourceLocation(),
7871 LAngleLoc: SourceLocation(),
7872 Params: ArrayRef<NamedDecl *>(TParam),
7873 RAngleLoc: SourceLocation(),
7874 /*RequiresClause=*/nullptr);
7875 return BuildExprRequirement(
7876 E, /*IsSimple=*/IsSatisfied: false, NoexceptLoc,
7877 ReturnTypeRequirement: concepts::ExprRequirement::ReturnTypeRequirement(TPL));
7878}
7879
7880concepts::ExprRequirement *
7881Sema::BuildExprRequirement(
7882 Expr *E, bool IsSimple, SourceLocation NoexceptLoc,
7883 concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement) {
7884 auto Status = concepts::ExprRequirement::SS_Satisfied;
7885 ConceptSpecializationExpr *SubstitutedConstraintExpr = nullptr;
7886 if (E->isInstantiationDependent() || E->getType()->isPlaceholderType() ||
7887 ReturnTypeRequirement.isDependent())
7888 Status = concepts::ExprRequirement::SS_Dependent;
7889 else if (NoexceptLoc.isValid() && canThrow(E) == CanThrowResult::CT_Can)
7890 Status = concepts::ExprRequirement::SS_NoexceptNotMet;
7891 else if (ReturnTypeRequirement.isSubstitutionFailure())
7892 Status = concepts::ExprRequirement::SS_TypeRequirementSubstitutionFailure;
7893 else if (ReturnTypeRequirement.isTypeConstraint()) {
7894 // C++2a [expr.prim.req]p1.3.3
7895 // The immediately-declared constraint ([temp]) of decltype((E)) shall
7896 // be satisfied.
7897 TemplateParameterList *TPL =
7898 ReturnTypeRequirement.getTypeConstraintTemplateParameterList();
7899 QualType MatchedType =
7900 Context.getReferenceQualifiedType(e: E).getCanonicalType();
7901 llvm::SmallVector<TemplateArgument, 1> Args;
7902 Args.push_back(Elt: TemplateArgument(MatchedType));
7903
7904 auto *Param = cast<TemplateTypeParmDecl>(Val: TPL->getParam(Idx: 0));
7905
7906 MultiLevelTemplateArgumentList MLTAL(Param, Args, /*Final=*/false);
7907 MLTAL.addOuterRetainedLevels(Num: TPL->getDepth());
7908 const TypeConstraint *TC = Param->getTypeConstraint();
7909 assert(TC && "Type Constraint cannot be null here");
7910 auto *IDC = TC->getImmediatelyDeclaredConstraint();
7911 assert(IDC && "ImmediatelyDeclaredConstraint can't be null here.");
7912 ExprResult Constraint = SubstExpr(E: IDC, TemplateArgs: MLTAL);
7913 if (Constraint.isInvalid()) {
7914 return new (Context) concepts::ExprRequirement(
7915 createSubstDiagAt(Location: IDC->getExprLoc(),
7916 Printer: [&](llvm::raw_ostream &OS) {
7917 IDC->printPretty(OS, /*Helper=*/nullptr,
7918 Policy: getPrintingPolicy());
7919 }),
7920 IsSimple, NoexceptLoc, ReturnTypeRequirement);
7921 }
7922 SubstitutedConstraintExpr =
7923 cast<ConceptSpecializationExpr>(Val: Constraint.get());
7924 if (!SubstitutedConstraintExpr->isSatisfied())
7925 Status = concepts::ExprRequirement::SS_ConstraintsNotSatisfied;
7926 }
7927 return new (Context) concepts::ExprRequirement(E, IsSimple, NoexceptLoc,
7928 ReturnTypeRequirement, Status,
7929 SubstitutedConstraintExpr);
7930}
7931
7932concepts::ExprRequirement *
7933Sema::BuildExprRequirement(
7934 concepts::Requirement::SubstitutionDiagnostic *ExprSubstitutionDiagnostic,
7935 bool IsSimple, SourceLocation NoexceptLoc,
7936 concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement) {
7937 return new (Context) concepts::ExprRequirement(ExprSubstitutionDiagnostic,
7938 IsSimple, NoexceptLoc,
7939 ReturnTypeRequirement);
7940}
7941
7942concepts::TypeRequirement *
7943Sema::BuildTypeRequirement(TypeSourceInfo *Type) {
7944 return new (Context) concepts::TypeRequirement(Type);
7945}
7946
7947concepts::TypeRequirement *
7948Sema::BuildTypeRequirement(
7949 concepts::Requirement::SubstitutionDiagnostic *SubstDiag) {
7950 return new (Context) concepts::TypeRequirement(SubstDiag);
7951}
7952
7953concepts::Requirement *Sema::ActOnNestedRequirement(Expr *Constraint) {
7954 return BuildNestedRequirement(E: Constraint);
7955}
7956
7957concepts::NestedRequirement *
7958Sema::BuildNestedRequirement(Expr *Constraint) {
7959 ConstraintSatisfaction Satisfaction;
7960 if (!Constraint->isInstantiationDependent() &&
7961 CheckConstraintSatisfaction(Template: nullptr, AssociatedConstraints: AssociatedConstraint(Constraint),
7962 /*TemplateArgs=*/TemplateArgLists: {},
7963 TemplateIDRange: Constraint->getSourceRange(), Satisfaction))
7964 return nullptr;
7965 return new (Context) concepts::NestedRequirement(Context, Constraint,
7966 Satisfaction);
7967}
7968
7969concepts::NestedRequirement *
7970Sema::BuildNestedRequirement(StringRef InvalidConstraintEntity,
7971 const ASTConstraintSatisfaction &Satisfaction) {
7972 return new (Context) concepts::NestedRequirement(
7973 InvalidConstraintEntity,
7974 ASTConstraintSatisfaction::Rebuild(C: Context, Satisfaction));
7975}
7976
7977RequiresExprBodyDecl *
7978Sema::ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
7979 ArrayRef<ParmVarDecl *> LocalParameters,
7980 Scope *BodyScope) {
7981 assert(BodyScope);
7982
7983 RequiresExprBodyDecl *Body = RequiresExprBodyDecl::Create(C&: Context, DC: CurContext,
7984 StartLoc: RequiresKWLoc);
7985
7986 PushDeclContext(S: BodyScope, DC: Body);
7987
7988 for (ParmVarDecl *Param : LocalParameters) {
7989 if (Param->getType()->isVoidType()) {
7990 if (LocalParameters.size() > 1) {
7991 Diag(Loc: Param->getBeginLoc(), DiagID: diag::err_void_only_param);
7992 Param->setType(Context.IntTy);
7993 } else if (Param->getIdentifier()) {
7994 Diag(Loc: Param->getBeginLoc(), DiagID: diag::err_param_with_void_type);
7995 Param->setType(Context.IntTy);
7996 } else if (Param->getType().hasQualifiers()) {
7997 Diag(Loc: Param->getBeginLoc(), DiagID: diag::err_void_param_qualified);
7998 }
7999 } else if (Param->hasDefaultArg()) {
8000 // C++2a [expr.prim.req] p4
8001 // [...] A local parameter of a requires-expression shall not have a
8002 // default argument. [...]
8003 Diag(Loc: Param->getDefaultArgRange().getBegin(),
8004 DiagID: diag::err_requires_expr_local_parameter_default_argument);
8005 // Ignore default argument and move on
8006 } else if (Param->isExplicitObjectParameter()) {
8007 // C++23 [dcl.fct]p6:
8008 // An explicit-object-parameter-declaration is a parameter-declaration
8009 // with a this specifier. An explicit-object-parameter-declaration
8010 // shall appear only as the first parameter-declaration of a
8011 // parameter-declaration-list of either:
8012 // - a member-declarator that declares a member function, or
8013 // - a lambda-declarator.
8014 //
8015 // The parameter-declaration-list of a requires-expression is not such
8016 // a context.
8017 Diag(Loc: Param->getExplicitObjectParamThisLoc(),
8018 DiagID: diag::err_requires_expr_explicit_object_parameter);
8019 Param->setExplicitObjectParameterLoc(SourceLocation());
8020 }
8021
8022 Param->setDeclContext(Body);
8023 // If this has an identifier, add it to the scope stack.
8024 if (Param->getIdentifier()) {
8025 CheckShadow(S: BodyScope, D: Param);
8026 PushOnScopeChains(D: Param, S: BodyScope);
8027 }
8028 }
8029 return Body;
8030}
8031
8032void Sema::ActOnFinishRequiresExpr() {
8033 assert(CurContext && "DeclContext imbalance!");
8034 CurContext = CurContext->getLexicalParent();
8035 assert(CurContext && "Popped translation unit!");
8036}
8037
8038ExprResult Sema::ActOnRequiresExpr(
8039 SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body,
8040 SourceLocation LParenLoc, ArrayRef<ParmVarDecl *> LocalParameters,
8041 SourceLocation RParenLoc, ArrayRef<concepts::Requirement *> Requirements,
8042 SourceLocation ClosingBraceLoc) {
8043 auto *RE = RequiresExpr::Create(C&: Context, RequiresKWLoc, Body, LParenLoc,
8044 LocalParameters, RParenLoc, Requirements,
8045 RBraceLoc: ClosingBraceLoc);
8046 if (DiagnoseUnexpandedParameterPackInRequiresExpr(RE))
8047 return ExprError();
8048 return RE;
8049}
8050